Merge tag 'mmc-v6.0-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 28 Sep 2022 18:36:31 +0000 (11:36 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 28 Sep 2022 18:36:31 +0000 (11:36 -0700)
Pull MMC fixes from Ulf Hansson:
 "A couple of MMC fixes. This time there is also a fix for the ARM SCMI
  firmware driver, which has been acked by Sudeep Holla, the maintainer.

  MMC core:
   - Terminate infinite loop in SD-UHS voltage switch

  MMC host:
   - hsq: Fix kernel crash in the recovery path
   - moxart: Fix bus width configurations
   - sdhci: Fix kernel panic for cqe irq

  ARM_SCMI:
   - Fixup clock management by reverting 'firmware: arm_scmi: Add clock
     management to the SCMI power domain'"

* tag 'mmc-v6.0-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc:
  mmc: hsq: Fix data stomping during mmc recovery
  Revert "firmware: arm_scmi: Add clock management to the SCMI power domain"
  mmc: core: Terminate infinite loop in SD-UHS voltage switch
  mmc: moxart: fix 4-bit bus width and remove 8-bit bus width
  mmc: sdhci: Fix host->cmd is null

461 files changed:
.mailmap
Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml
Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml
Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
Documentation/devicetree/bindings/i2c/amlogic,meson6-i2c.yaml
Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml
Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml
Documentation/devicetree/bindings/media/amlogic,axg-ge2d.yaml
Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml
Documentation/devicetree/bindings/memory-controllers/fsl/imx8m-ddrc.yaml
Documentation/devicetree/bindings/mfd/khadas,mcu.yaml
Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
Documentation/devicetree/bindings/opp/opp-v2-kryo-cpu.yaml
Documentation/devicetree/bindings/opp/opp-v2-qcom-level.yaml
Documentation/devicetree/bindings/phy/amlogic,axg-mipi-dphy.yaml
Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml
Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml
Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml
Documentation/devicetree/bindings/rng/amlogic,meson-rng.yaml
Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml
Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml
Documentation/devicetree/bindings/spi/amlogic,meson6-spifc.yaml
Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml
Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
Documentation/i2c/dev-interface.rst
Documentation/i2c/slave-interface.rst
Documentation/i2c/writing-clients.rst
Documentation/networking/mptcp-sysctl.rst
Documentation/networking/nf_conntrack-sysctl.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/am33xx-l4.dtsi
arch/arm/boot/dts/am5748.dtsi
arch/arm/boot/dts/integratorap-im-pd1.dts
arch/arm/boot/dts/integratorap.dts
arch/arm/boot/dts/lan966x.dtsi
arch/arm/boot/dts/moxart-uc7112lx.dts
arch/arm/boot/dts/moxart.dtsi
arch/arm/mach-sunplus/Kconfig
arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts
arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts
arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
arch/arm64/boot/dts/freescale/imx8mn.dtsi
arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
arch/arm64/boot/dts/freescale/imx8ulp.dtsi
arch/arm64/boot/dts/qcom/sc7280.dtsi
arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
arch/arm64/boot/dts/qcom/sm8150.dtsi
arch/arm64/boot/dts/qcom/sm8350.dtsi
arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi
arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
arch/arm64/configs/defconfig
arch/arm64/kernel/topology.c
arch/arm64/kvm/arm.c
arch/arm64/mm/mmu.c
arch/mips/lantiq/clk.c
arch/mips/loongson32/common/platform.c
arch/parisc/Kconfig
arch/powerpc/mm/book3s64/radix_pgtable.c
arch/riscv/Kconfig
arch/riscv/Kconfig.erratas
arch/riscv/errata/thead/errata.c
arch/riscv/include/asm/cacheflush.h
arch/riscv/kernel/setup.c
arch/riscv/kernel/signal.c
arch/riscv/mm/dma-noncoherent.c
arch/s390/kvm/gaccess.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/pci.c
arch/s390/kvm/pci.h
arch/um/Makefile
arch/um/kernel/sysrq.c
arch/um/kernel/um_arch.c
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/cpu/sgx/encl.c
arch/x86/kernel/cpu/sgx/main.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/emulate.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/x86.c
arch/x86/lib/usercopy.c
arch/x86/mm/Makefile
arch/x86/um/shared/sysdep/syscalls_32.h
arch/x86/um/tls_32.c
arch/x86/um/vdso/Makefile
block/blk-core.c
block/blk-lib.c
block/genhd.c
certs/Kconfig
drivers/acpi/processor_idle.c
drivers/base/core.c
drivers/counter/104-quad-8.c
drivers/dax/hmem/device.c
drivers/dma/ti/k3-udma-private.c
drivers/dma/xilinx/xilinx_dma.c
drivers/dma/xilinx/zynqmp_dma.c
drivers/firmware/arm_scmi/clock.c
drivers/firmware/arm_scmi/optee.c
drivers/firmware/arm_scmi/reset.c
drivers/firmware/arm_scmi/scmi_pm_domain.c
drivers/firmware/arm_scmi/sensors.c
drivers/firmware/efi/efibc.c
drivers/firmware/efi/libstub/secureboot.c
drivers/firmware/efi/libstub/x86-stub.c
drivers/fpga/intel-m10-bmc-sec-update.c
drivers/gpio/gpio-ftgpio010.c
drivers/gpio/gpio-ixp4xx.c
drivers/gpio/gpio-mockup.c
drivers/gpio/gpio-mpc8xxx.c
drivers/gpio/gpio-mt7621.c
drivers/gpio/gpio-rockchip.c
drivers/gpio/gpio-tqmx86.c
drivers/gpio/gpiolib-cdev.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/soc21.c
drivers/gpu/drm/amd/amdgpu/vega10_ih.c
drivers/gpu/drm/amd/amdgpu/vega20_ih.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
drivers/gpu/drm/amd/display/dc/inc/resource.h
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/gma500/cdv_device.c
drivers/gpu/drm/gma500/gem.c
drivers/gpu/drm/gma500/gma_display.c
drivers/gpu/drm/gma500/oaktrail_device.c
drivers/gpu/drm/gma500/power.c
drivers/gpu/drm/gma500/psb_drv.c
drivers/gpu/drm/gma500/psb_drv.h
drivers/gpu/drm/gma500/psb_irq.c
drivers/gpu/drm/gma500/psb_irq.h
drivers/gpu/drm/hisilicon/hibmc/Kconfig
drivers/gpu/drm/hyperv/hyperv_drm_drv.c
drivers/gpu/drm/i915/display/icl_dsi.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_vdsc.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/meson/meson_plane.c
drivers/gpu/drm/meson/meson_viu.c
drivers/gpu/drm/mgag200/mgag200_drv.c
drivers/gpu/drm/panel/panel-edp.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/rockchip/cdn-dp-core.c
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
drivers/hv/hv_fcopy.c
drivers/hv/vmbus_drv.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-mlxbf.c
drivers/i2c/i2c-mux.c
drivers/iommu/intel/dmar.c
drivers/iommu/intel/iommu.c
drivers/media/usb/b2c2/flexcop-usb.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/can/flexcan/flexcan-core.c
drivers/net/can/usb/gs_usb.c
drivers/net/dsa/microchip/lan937x_main.c
drivers/net/ethernet/aquantia/atlantic/aq_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
drivers/net/ethernet/freescale/enetc/Makefile
drivers/net/ethernet/freescale/enetc/enetc.c
drivers/net/ethernet/freescale/enetc/enetc.h
drivers/net/ethernet/freescale/enetc/enetc_pf.c
drivers/net/ethernet/freescale/enetc/enetc_qos.c
drivers/net/ethernet/freescale/enetc/enetc_vf.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/google/gve/gve_rx_dqo.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/iavf/iavf_txrx.c
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/marvell/prestera/prestera_main.c
drivers/net/ethernet/marvell/prestera/prestera_pci.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
drivers/net/ethernet/microsoft/mana/gdma_main.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/efx_channels.c
drivers/net/ethernet/sfc/siena/efx_channels.c
drivers/net/ethernet/sfc/siena/tx.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/sun/sunhme.c
drivers/net/ipa/ipa_qmi.c
drivers/net/ipa/ipa_qmi_msg.c
drivers/net/ipa/ipa_qmi_msg.h
drivers/net/ipa/ipa_table.c
drivers/net/ipa/ipa_table.h
drivers/net/ipvlan/ipvlan_core.c
drivers/net/mdio/of_mdio.c
drivers/net/netdevsim/hwstats.c
drivers/net/phy/aquantia_main.c
drivers/net/phy/micrel.c
drivers/net/team/team.c
drivers/net/wireguard/netlink.c
drivers/net/wireguard/selftest/ratelimiter.c
drivers/net/wireless/intel/iwlwifi/Kconfig
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/pmem.c
drivers/of/fdt.c
drivers/opp/core.c
drivers/parisc/ccio-dma.c
drivers/parisc/iosapic.c
drivers/perf/arm-cmn.c
drivers/phy/marvell/phy-mvebu-a3700-comphy.c
drivers/pinctrl/pinctrl-ocelot.c
drivers/pinctrl/qcom/pinctrl-sc8180x.c
drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
drivers/reset/reset-imx7.c
drivers/reset/reset-microchip-sparx5.c
drivers/reset/reset-npcm.c
drivers/s390/block/dasd_alias.c
drivers/s390/crypto/vfio_ap_ops.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qla2xxx/qla_target.c
drivers/soc/bcm/brcmstb/biuctrl.c
drivers/soc/sunxi/sunxi_sram.c
drivers/thunderbolt/icm.c
drivers/thunderbolt/nhi.h
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/serial-tegra.c
drivers/tty/serial/sifive.c
drivers/tty/serial/tegra-tcu.c
drivers/usb/core/hub.c
drivers/usb/dwc3/core.c
drivers/usb/serial/option.c
drivers/usb/typec/Kconfig
drivers/video/fbdev/hyperv_fb.c
drivers/xen/xenbus/xenbus_client.c
fs/btrfs/disk-io.c
fs/btrfs/zoned.c
fs/cifs/cifsfs.h
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/transport.c
fs/dax.c
fs/exec.c
fs/exfat/fatent.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/ialloc.c
fs/ext4/mballoc.c
fs/ext4/mballoc.h
fs/nfs/internal.h
fs/nfs/nfs42proc.c
fs/nfs/super.c
fs/nfs/write.c
fs/nfsd/vfs.c
fs/ntfs/super.c
fs/open.c
fs/xfs/xfs_notify_failure.c
include/asm-generic/vmlinux.lds.h
include/linux/cpumask.h
include/linux/dmar.h
include/linux/hp_sdc.h
include/linux/memcontrol.h
include/linux/memremap.h
include/linux/of_device.h
include/linux/pci_ids.h
include/linux/scmi_protocol.h
include/linux/serial_core.h
include/net/bluetooth/hci_sock.h
include/net/bond_3ad.h
include/net/bonding.h
include/net/ieee802154_netdev.h
include/trace/events/scmi.h
io_uring/io_uring.c
io_uring/msg_ring.c
io_uring/net.c
io_uring/opdef.c
io_uring/rw.c
kernel/cgroup/cgroup.c
kernel/fork.c
kernel/nsproxy.c
kernel/workqueue.c
lib/Kconfig.debug
mm/damon/dbgfs.c
mm/frontswap.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/khugepaged.c
mm/madvise.c
mm/memory-failure.c
mm/memory.c
mm/migrate_device.c
mm/page_alloc.c
mm/page_isolation.c
mm/secretmem.c
mm/slab_common.c
mm/slub.c
mm/swap_state.c
mm/vmscan.c
net/batman-adv/hard-interface.c
net/bridge/netfilter/ebtables.c
net/compat.c
net/core/flow_dissector.c
net/core/net_namespace.c
net/ieee802154/socket.c
net/ipv4/ipmr.c
net/ipv4/tcp.c
net/ipv4/udp.c
net/ipv6/af_inet6.c
net/ipv6/ip6mr.c
net/mptcp/protocol.c
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_irc.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_osf.c
net/sched/cls_api.c
net/sched/sch_taprio.c
net/smc/smc_core.c
net/sunrpc/clnt.c
net/sunrpc/xprt.c
scripts/Makefile.debug
scripts/clang-tools/run-clang-tools.py
scripts/kconfig/lkc.h
scripts/kconfig/menu.c
sound/core/init.c
sound/hda/intel-dsp-config.c
sound/pci/hda/hda_bind.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/nau8824.c
sound/soc/codecs/nau8824.h
sound/soc/codecs/rt5640.c
sound/soc/codecs/rt5640.h
sound/soc/codecs/tas2770.c
sound/soc/fsl/imx-card.c
sound/soc/intel/boards/sof_sdw.c
sound/usb/endpoint.c
sound/usb/endpoint.h
sound/usb/pcm.c
tools/arch/x86/include/asm/cpufeatures.h
tools/hv/hv_kvp_daemon.c
tools/include/linux/gfp.h
tools/include/linux/gfp_types.h [new file with mode: 0644]
tools/include/uapi/asm/errno.h
tools/lib/perf/evlist.c
tools/perf/builtin-record.c
tools/perf/tests/shell/stat_bpf_counters_cgrp.sh [new file with mode: 0755]
tools/perf/tests/wp.c
tools/perf/util/bpf_counter_cgroup.c
tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
tools/perf/util/genelf.c
tools/perf/util/genelf.h
tools/perf/util/symbol-elf.c
tools/perf/util/synthetic-events.c
tools/testing/nvdimm/test/ndtest.c
tools/testing/selftests/Makefile
tools/testing/selftests/drivers/net/bonding/Makefile
tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh [new file with mode: 0755]
tools/testing/selftests/drivers/net/bonding/config
tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh [new file with mode: 0755]
tools/testing/selftests/drivers/net/bonding/lag_lib.sh [new file with mode: 0644]
tools/testing/selftests/drivers/net/team/Makefile [new file with mode: 0644]
tools/testing/selftests/drivers/net/team/config [new file with mode: 0644]
tools/testing/selftests/drivers/net/team/dev_addr_lists.sh [new file with mode: 0755]
tools/testing/selftests/kvm/rseq_test.c
tools/testing/selftests/landlock/Makefile
tools/testing/selftests/lib.mk
tools/testing/selftests/net/forwarding/router_multicast.sh
tools/testing/selftests/net/forwarding/sch_red.sh
tools/testing/selftests/netfilter/nft_concat_range.sh
tools/testing/selftests/timens/Makefile
tools/testing/selftests/timens/vfork_exec.c [deleted file]
tools/testing/selftests/wireguard/qemu/Makefile

index 8ded2e7..1917781 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -71,6 +71,9 @@ Ben M Cahill <ben.m.cahill@intel.com>
 Ben Widawsky <bwidawsk@kernel.org> <ben@bwidawsk.net>
 Ben Widawsky <bwidawsk@kernel.org> <ben.widawsky@intel.com>
 Ben Widawsky <bwidawsk@kernel.org> <benjamin.widawsky@intel.com>
+Bjorn Andersson <andersson@kernel.org> <bjorn@kryo.se>
+Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@linaro.org>
+Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@sonymobile.com>
 Björn Steinbrink <B.Steinbrink@gmx.de>
 Björn Töpel <bjorn@kernel.org> <bjorn.topel@gmail.com>
 Björn Töpel <bjorn@kernel.org> <bjorn.topel@intel.com>
@@ -315,6 +318,7 @@ Morten Welinder <welinder@troll.com>
 Mythri P K <mythripk@ti.com>
 Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
 Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
+Neil Armstrong <neil.armstrong@linaro.org> <narmstrong@baylibre.com>
 Nguyen Anh Quynh <aquynh@gmail.com>
 Nicholas Piggin <npiggin@gmail.com> <npiggen@suse.de>
 Nicholas Piggin <npiggin@gmail.com> <npiggin@kernel.dk>
index 6cc7452..1748f16 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson Firmware registers Interface
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |
   The Meson SoCs have a register bank with status and data shared with the
index 2e208d2..7cdffdb 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic specific extensions to the Synopsys Designware HDMI Controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 allOf:
   - $ref: /schemas/sound/name-prefix.yaml#
index 047fd69..6655a93 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson Display Controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |
   The Amlogic Meson Display controller is composed of several components
index bce96b5..4a5e5d9 100644 (file)
@@ -8,7 +8,7 @@ title: Analogix ANX7814 SlimPort (Full-HD Transmitter)
 
 maintainers:
   - Andrzej Hajda <andrzej.hajda@intel.com>
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
   - Robert Foss <robert.foss@linaro.org>
 
 properties:
index c6e81f5..1b2185b 100644 (file)
@@ -8,7 +8,7 @@ title: ITE it66121 HDMI bridge Device Tree Bindings
 
 maintainers:
   - Phong LE <ple@baylibre.com>
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |
   The IT66121 is a high-performance and low-power single channel HDMI
index 44e02de..2e75e37 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Solomon Goldentek Display GKTW70SDAE4SE 7" WVGA LVDS Display Panel
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
   - Thierry Reding <thierry.reding@gmail.com>
 
 allOf:
index 8a9f355..7e14e26 100644 (file)
@@ -34,8 +34,8 @@ Example:
 Use specific request line passing from dma
 For example, MMC request line is 5
 
-       sdhci: sdhci@98e00000 {
-               compatible = "moxa,moxart-sdhci";
+       mmc: mmc@98e00000 {
+               compatible = "moxa,moxart-mmc";
                reg = <0x98e00000 0x5C>;
                interrupts = <5 0>;
                clocks = <&clk_apb>;
index 6ecb027..199a354 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson I2C Controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
   - Beniamino Galvani <b.galvani@gmail.com>
 
 allOf:
index 09c8948..fa4f768 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Generic i.MX bus frequency device
 
 maintainers:
-  - Leonard Crestez <leonard.crestez@nxp.com>
+  - Peng Fan <peng.fan@nxp.com>
 
 description: |
   The i.MX SoC family has multiple buses for which clock frequency (and
index 85c85b6..e18107e 100644 (file)
@@ -96,7 +96,7 @@ properties:
               Documentation/devicetree/bindings/arm/cpus.yaml).
 
         required:
-          - fiq-index
+          - apple,fiq-index
           - cpus
 
 required:
index ea06976..dfd26b9 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson Message-Handling-Unit Controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |
   The Amlogic's Meson SoCs Message-Handling-Unit (MHU) is a mailbox controller
index bee93bd..e551be5 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic GE2D Acceleration Unit
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 properties:
   compatible:
index 5044c4b..b827eda 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Video Decoder
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
   - Maxime Jourdan <mjourdan@baylibre.com>
 
 description: |
index d93aea6..8d844f4 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson AO-CEC Controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |
   The Amlogic Meson AO-CEC module is present is Amlogic SoCs and its purpose is
index 445e46f..2b39fce 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: i.MX8M DDR Controller
 
 maintainers:
-  - Leonard Crestez <leonard.crestez@nxp.com>
+  - Peng Fan <peng.fan@nxp.com>
 
 description:
   The DDRC block is integrated in i.MX8M for interfacing with DDR based
index a3b976f..5750cc0 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Khadas on-board Microcontroller Device Tree Bindings
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |
   Khadas embeds a microcontroller on their VIM and Edge boards adding some
index 608e1d6..ddd5a07 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson DWMAC Ethernet controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
   - Martin Blumenstingl <martin.blumenstingl@googlemail.com>
 
 # We need a select here so we don't match all nodes with 'snps,dwmac'
index 59663e8..a202b6c 100644 (file)
@@ -40,6 +40,7 @@ properties:
 patternProperties:
   '^opp-?[0-9]+$':
     type: object
+    additionalProperties: false
 
     properties:
       opp-hz: true
index 14a7a68..df8442f 100644 (file)
@@ -19,6 +19,7 @@ properties:
 patternProperties:
   '^opp-?[0-9]+$':
     type: object
+    additionalProperties: false
 
     properties:
       opp-level: true
index be485f5..5eddaed 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic AXG MIPI D-PHY
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 properties:
   compatible:
index 399ebde..f3a5fba 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic G12A USB2 PHY
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 properties:
   compatible:
index 453c083..868b4e6 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic G12A USB3 + PCIE Combo PHY
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 properties:
   compatible:
index 33d1d37..624e14f 100644 (file)
@@ -8,7 +8,6 @@ title: Qualcomm Technologies, Inc. Low Power Audio SubSystem (LPASS)
   Low Power Island (LPI) TLMM block
 
 maintainers:
-  - Srinivasa Rao Mandadapu <srivasam@codeaurora.org>
   - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
 
 description: |
index 2d22816..2bd60c4 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm Technologies, Inc. SC7280 TLMM block
 
 maintainers:
-  - Rajendra Nayak <rnayak@codeaurora.org>
+  - Bjorn Andersson <andersson@kernel.org>
 
 description: |
   This binding describes the Top Level Mode Multiplexer block found in the
index 5390e98..43a9322 100644 (file)
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Amlogic Meson Everything-Else Power Domains
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |+
   The Everything-Else Power Domains node should be the child of a syscon
index 0ccca49..3934a2b 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm RPM/RPMh Power domains
 
 maintainers:
-  - Rajendra Nayak <rnayak@codeaurora.org>
+  - Bjorn Andersson <andersson@kernel.org>
 
 description:
   For RPM/RPMh Power domains, we communicate a performance state to RPM/RPMh
index 494a454..98db2aa 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson SoC Reset Controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 properties:
   compatible:
index 444be32..09c6c90 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson Random number generator
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 properties:
   compatible:
index 72e8868..7822705 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson SoC UART Serial Interface
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |
   The Amlogic Meson SoC UART Serial Interface is present on a large range
index 17db87c..c3c5990 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Canvas Video Lookup Table
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
   - Maxime Jourdan <mjourdan@baylibre.com>
 
 description: |
index 50de0da..0c10f76 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson SPI Communication Controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 allOf:
   - $ref: "spi-controller.yaml#"
index 8a9d526..ac3b2ec 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson SPI Flash Controller
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 allOf:
   - $ref: "spi-controller.yaml#"
index e349fa5..daf2a85 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Amlogic Meson G12A DWC3 USB SoC Controller Glue
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 description: |
   The Amlogic G12A embeds a DWC3 USB IP Core configured for USB2 and USB3
index c7459cf..497d604 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: Meson GXBB SoCs Watchdog timer
 
 maintainers:
-  - Neil Armstrong <narmstrong@baylibre.com>
+  - Neil Armstrong <neil.armstrong@linaro.org>
 
 allOf:
   - $ref: watchdog.yaml#
index 73ad348..c277a8e 100644 (file)
@@ -148,7 +148,7 @@ You can do plain I2C transactions by using read(2) and write(2) calls.
 You do not need to pass the address byte; instead, set it through
 ioctl I2C_SLAVE before you try to access the device.
 
-You can do SMBus level transactions (see documentation file smbus-protocol
+You can do SMBus level transactions (see documentation file smbus-protocol.rst
 for details) through the following functions::
 
   __s32 i2c_smbus_write_quick(int file, __u8 value);
index 82ea3e1..58fb143 100644 (file)
@@ -32,9 +32,9 @@ User manual
 ===========
 
 I2C slave backends behave like standard I2C clients. So, you can instantiate
-them as described in the document 'instantiating-devices'. The only difference
-is that i2c slave backends have their own address space. So, you have to add
-0x1000 to the address you would originally request. An example for
+them as described in the document instantiating-devices.rst. The only
+difference is that i2c slave backends have their own address space. So, you
+have to add 0x1000 to the address you would originally request. An example for
 instantiating the slave-eeprom driver from userspace at the 7 bit address 0x64
 on bus 1::
 
index e3b126c..47f7cbf 100644 (file)
@@ -364,7 +364,7 @@ stop condition is issued between transaction. The i2c_msg structure
 contains for each message the client address, the number of bytes of the
 message and the message data itself.
 
-You can read the file ``i2c-protocol`` for more information about the
+You can read the file i2c-protocol.rst for more information about the
 actual I2C protocol.
 
 
@@ -414,7 +414,7 @@ transactions return 0 on success; the 'read' transactions return the read
 value, except for block transactions, which return the number of values
 read. The block buffers need not be longer than 32 bytes.
 
-You can read the file ``smbus-protocol`` for more information about the
+You can read the file smbus-protocol.rst for more information about the
 actual SMBus protocol.
 
 
index e263dfc..2135106 100644 (file)
@@ -47,7 +47,6 @@ allow_join_initial_addr_port - BOOLEAN
        Default: 1
 
 pm_type - INTEGER
-
        Set the default path manager type to use for each new MPTCP
        socket. In-kernel path management will control subflow
        connections and address advertisements according to
index 834945e..1120d71 100644 (file)
@@ -70,15 +70,6 @@ nf_conntrack_generic_timeout - INTEGER (seconds)
        Default for generic timeout.  This refers to layer 4 unknown/unsupported
        protocols.
 
-nf_conntrack_helper - BOOLEAN
-       - 0 - disabled (default)
-       - not 0 - enabled
-
-       Enable automatic conntrack helper assignment.
-       If disabled it is required to set up iptables rules to assign
-       helpers to connections.  See the CT target description in the
-       iptables-extensions(8) man page for further information.
-
 nf_conntrack_icmp_timeout - INTEGER (seconds)
        default 30
 
index 936490d..56ff555 100644 (file)
@@ -671,7 +671,8 @@ F:  fs/afs/
 F:     include/trace/events/afs.h
 
 AGPGART DRIVER
-M:     David Airlie <airlied@linux.ie>
+M:     David Airlie <airlied@redhat.com>
+L:     dri-devel@lists.freedesktop.org
 S:     Maintained
 T:     git git://anongit.freedesktop.org/drm/drm
 F:     drivers/char/agp/
@@ -1010,7 +1011,6 @@ F:        drivers/spi/spi-amd.c
 
 AMD MP2 I2C DRIVER
 M:     Elie Morisse <syniurge@gmail.com>
-M:     Nehal Shah <nehal-bakulchandra.shah@amd.com>
 M:     Shyam Sundar S K <shyam-sundar.s-k@amd.com>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
@@ -1803,7 +1803,7 @@ N:        sun[x456789]i
 N:     sun50i
 
 ARM/Amlogic Meson SoC CLOCK FRAMEWORK
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 M:     Jerome Brunet <jbrunet@baylibre.com>
 L:     linux-amlogic@lists.infradead.org
 S:     Maintained
@@ -1828,7 +1828,7 @@ F:        Documentation/devicetree/bindings/sound/amlogic*
 F:     sound/soc/meson/
 
 ARM/Amlogic Meson SoC support
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 M:     Kevin Hilman <khilman@baylibre.com>
 R:     Jerome Brunet <jbrunet@baylibre.com>
 R:     Martin Blumenstingl <martin.blumenstingl@googlemail.com>
@@ -2531,7 +2531,7 @@ W:        http://www.digriz.org.uk/ts78xx/kernel
 F:     arch/arm/mach-orion5x/ts78xx-*
 
 ARM/OXNAS platform support
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-oxnas@groups.io (moderated for non-subscribers)
 S:     Maintained
@@ -2579,7 +2579,7 @@ W:        http://www.armlinux.org.uk/
 
 ARM/QUALCOMM SUPPORT
 M:     Andy Gross <agross@kernel.org>
-M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+M:     Bjorn Andersson <andersson@kernel.org>
 R:     Konrad Dybcio <konrad.dybcio@somainline.org>
 L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
@@ -5245,6 +5245,7 @@ F:        block/blk-throttle.c
 F:     include/linux/blk-cgroup.h
 
 CONTROL GROUP - CPUSET
+M:     Waiman Long <longman@redhat.com>
 M:     Zefan Li <lizefan.x@bytedance.com>
 L:     cgroups@vger.kernel.org
 S:     Maintained
@@ -6753,7 +6754,7 @@ F:        Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml
 F:     drivers/gpu/drm/panel/panel-widechips-ws2401.c
 
 DRM DRIVERS
-M:     David Airlie <airlied@linux.ie>
+M:     David Airlie <airlied@gmail.com>
 M:     Daniel Vetter <daniel@ffwll.ch>
 L:     dri-devel@lists.freedesktop.org
 S:     Maintained
@@ -6792,7 +6793,7 @@ F:        Documentation/devicetree/bindings/display/allwinner*
 F:     drivers/gpu/drm/sun4i/
 
 DRM DRIVERS FOR AMLOGIC SOCS
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 L:     dri-devel@lists.freedesktop.org
 L:     linux-amlogic@lists.infradead.org
 S:     Supported
@@ -6814,7 +6815,7 @@ F:        drivers/gpu/drm/atmel-hlcdc/
 
 DRM DRIVERS FOR BRIDGE CHIPS
 M:     Andrzej Hajda <andrzej.hajda@intel.com>
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 M:     Robert Foss <robert.foss@linaro.org>
 R:     Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
 R:     Jonas Karlman <jonas@kwiboo.se>
@@ -8652,8 +8653,8 @@ F:        drivers/input/touchscreen/goodix*
 
 GOOGLE ETHERNET DRIVERS
 M:     Jeroen de Borst <jeroendb@google.com>
-R:     Catherine Sullivan <csully@google.com>
-R:     David Awogbemila <awogbemila@google.com>
+M:     Catherine Sullivan <csully@google.com>
+R:     Shailend Chand <shailend@google.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/networking/device_drivers/ethernet/google/gve.rst
@@ -8943,7 +8944,7 @@ F:        include/linux/hw_random.h
 
 HARDWARE SPINLOCK CORE
 M:     Ohad Ben-Cohen <ohad@wizery.com>
-M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+M:     Bjorn Andersson <andersson@kernel.org>
 R:     Baolin Wang <baolin.wang7@gmail.com>
 L:     linux-remoteproc@vger.kernel.org
 S:     Maintained
@@ -9122,7 +9123,7 @@ S:        Maintained
 F:     drivers/dma/hisi_dma.c
 
 HISILICON GPIO DRIVER
-M:     Luo Jiaxing <luojiaxing@huawei.com>
+M:     Jay Fang <f.fangjian@huawei.com>
 L:     linux-gpio@vger.kernel.org
 S:     Maintained
 F:     drivers/gpio/gpio-hisi.c
@@ -10828,7 +10829,7 @@ F:      drivers/media/tuners/it913x*
 
 ITE IT66121 HDMI BRIDGE DRIVER
 M:     Phong LE <ple@baylibre.com>
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 S:     Maintained
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 F:     Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
@@ -11347,7 +11348,7 @@ F:      kernel/debug/
 F:     kernel/module/kdb.c
 
 KHADAS MCU MFD DRIVER
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 L:     linux-amlogic@lists.infradead.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/mfd/khadas,mcu.yaml
@@ -13218,7 +13219,7 @@ S:      Maintained
 F:     drivers/watchdog/menz69_wdt.c
 
 MESON AO CEC DRIVER FOR AMLOGIC SOCS
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 L:     linux-media@vger.kernel.org
 L:     linux-amlogic@lists.infradead.org
 S:     Supported
@@ -13229,7 +13230,7 @@ F:      drivers/media/cec/platform/meson/ao-cec-g12a.c
 F:     drivers/media/cec/platform/meson/ao-cec.c
 
 MESON GE2D DRIVER FOR AMLOGIC SOCS
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 L:     linux-media@vger.kernel.org
 L:     linux-amlogic@lists.infradead.org
 S:     Supported
@@ -13245,7 +13246,7 @@ F:      Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt
 F:     drivers/mtd/nand/raw/meson_*
 
 MESON VIDEO DECODER DRIVER FOR AMLOGIC SOCS
-M:     Neil Armstrong <narmstrong@baylibre.com>
+M:     Neil Armstrong <neil.armstrong@linaro.org>
 L:     linux-media@vger.kernel.org
 L:     linux-amlogic@lists.infradead.org
 S:     Supported
@@ -16125,7 +16126,7 @@ F:      drivers/gpio/gpio-sama5d2-piobu.c
 F:     drivers/pinctrl/pinctrl-at91*
 
 PIN CONTROLLER - QUALCOMM
-M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+M:     Bjorn Andersson <andersson@kernel.org>
 L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/pinctrl/qcom,*.txt
@@ -16818,7 +16819,7 @@ F:      Documentation/devicetree/bindings/media/*camss*
 F:     drivers/media/platform/qcom/camss/
 
 QUALCOMM CLOCK DRIVERS
-M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+M:     Bjorn Andersson <andersson@kernel.org>
 L:     linux-arm-msm@vger.kernel.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux.git
@@ -16857,6 +16858,7 @@ F:      drivers/net/ethernet/qualcomm/emac/
 
 QUALCOMM ETHQOS ETHERNET DRIVER
 M:     Vinod Koul <vkoul@kernel.org>
+R:     Bhupesh Sharma <bhupesh.sharma@linaro.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/qcom,ethqos.txt
@@ -17307,7 +17309,7 @@ S:      Supported
 F:     fs/reiserfs/
 
 REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
-M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+M:     Bjorn Andersson <andersson@kernel.org>
 M:     Mathieu Poirier <mathieu.poirier@linaro.org>
 L:     linux-remoteproc@vger.kernel.org
 S:     Maintained
@@ -17320,7 +17322,7 @@ F:      include/linux/remoteproc.h
 F:     include/linux/remoteproc/
 
 REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
-M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+M:     Bjorn Andersson <andersson@kernel.org>
 M:     Mathieu Poirier <mathieu.poirier@linaro.org>
 L:     linux-remoteproc@vger.kernel.org
 S:     Maintained
@@ -19959,6 +19961,7 @@ S:      Supported
 F:     drivers/net/team/
 F:     include/linux/if_team.h
 F:     include/uapi/linux/if_team.h
+F:     tools/testing/selftests/net/team/
 
 TECHNOLOGIC SYSTEMS TS-5500 PLATFORM SUPPORT
 M:     "Savoir-faire Linux Inc." <kernel@savoirfairelinux.com>
@@ -21565,7 +21568,7 @@ F:      drivers/gpio/gpio-virtio.c
 F:     include/uapi/linux/virtio_gpio.h
 
 VIRTIO GPU DRIVER
-M:     David Airlie <airlied@linux.ie>
+M:     David Airlie <airlied@redhat.com>
 M:     Gerd Hoffmann <kraxel@redhat.com>
 R:     Gurchetan Singh <gurchetansingh@chromium.org>
 R:     Chia-I Wu <olvaffe@gmail.com>
index a5e9d93..647a42a 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
index 7da42a5..7e50fe6 100644 (file)
                        mmc1: mmc@0 {
                                compatible = "ti,am335-sdhci";
                                ti,needs-special-reset;
-                               dmas = <&edma_xbar 24 0 0
-                                       &edma_xbar 25 0 0>;
+                               dmas = <&edma 24 0>, <&edma 25 0>;
                                dma-names = "tx", "rx";
                                interrupts = <64>;
                                reg = <0x0 0x1000>;
index c260aa1..a1f029e 100644 (file)
        status = "disabled";
 };
 
+&usb4_tm {
+       status = "disabled";
+};
+
 &atl_tm {
        status = "disabled";
 };
index 4c22e44..cc514cf 100644 (file)
                /* 640x480 16bpp @ 25.175MHz is 36827428 bytes/s */
                max-memory-bandwidth = <40000000>;
                memory-region = <&impd1_ram>;
+               dma-ranges;
 
                port@0 {
                        #address-cells = <1>;
index 9b652cc..9148287 100644 (file)
 
        pci: pciv3@62000000 {
                compatible = "arm,integrator-ap-pci", "v3,v360epc-pci";
+               device_type = "pci";
                #interrupt-cells = <1>;
                #size-cells = <2>;
                #address-cells = <3>;
                lm0: bus@c0000000 {
                        compatible = "simple-bus";
                        ranges = <0x00000000 0xc0000000 0x10000000>;
-                       dma-ranges = <0x00000000 0x80000000 0x10000000>;
+                       dma-ranges = <0x00000000 0xc0000000 0x10000000>;
                        reg = <0xc0000000 0x10000000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                lm1: bus@d0000000 {
                        compatible = "simple-bus";
                        ranges = <0x00000000 0xd0000000 0x10000000>;
-                       dma-ranges = <0x00000000 0x80000000 0x10000000>;
+                       dma-ranges = <0x00000000 0xd0000000 0x10000000>;
                        reg = <0xd0000000 0x10000000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                lm2: bus@e0000000 {
                        compatible = "simple-bus";
                        ranges = <0x00000000 0xe0000000 0x10000000>;
-                       dma-ranges = <0x00000000 0x80000000 0x10000000>;
+                       dma-ranges = <0x00000000 0xe0000000 0x10000000>;
                        reg = <0xe0000000 0x10000000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                lm3: bus@f0000000 {
                        compatible = "simple-bus";
                        ranges = <0x00000000 0xf0000000 0x10000000>;
-                       dma-ranges = <0x00000000 0x80000000 0x10000000>;
+                       dma-ranges = <0x00000000 0xf0000000 0x10000000>;
                        reg = <0xf0000000 0x10000000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
index 894bf9d..0bf8187 100644 (file)
 
                        phy0: ethernet-phy@1 {
                                reg = <1>;
-                               interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
                                status = "disabled";
                        };
 
                        phy1: ethernet-phy@2 {
                                reg = <2>;
-                               interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
                                status = "disabled";
                        };
                };
index eb5291b..e07b807 100644 (file)
@@ -79,7 +79,7 @@
        clocks = <&ref12>;
 };
 
-&sdhci {
+&mmc {
        status = "okay";
 };
 
index f5f070a..764832d 100644 (file)
@@ -93,8 +93,8 @@
                        clock-names = "PCLK";
                };
 
-               sdhci: sdhci@98e00000 {
-                       compatible = "moxa,moxart-sdhci";
+               mmc: mmc@98e00000 {
+                       compatible = "moxa,moxart-mmc";
                        reg = <0x98e00000 0x5C>;
                        interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&clk_apb>;
index 926cde5..d0c2416 100644 (file)
@@ -18,8 +18,8 @@ config SOC_SP7021
        select ARM_PSCI
        select PINCTRL
        select PINCTRL_SPPCTL
-       select SERIAL_SUNPLUS
-       select SERIAL_SUNPLUS_CONSOLE
+       select SERIAL_SUNPLUS if TTY
+       select SERIAL_SUNPLUS_CONSOLE if TTY
        help
          Support for Sunplus SP7021 SoC. It is based on ARM 4-core
          Cortex-A7 with various peripherals (e.g.: I2C, SPI, SDIO,
index c97f4e0..32f6f2f 100644 (file)
         * CPLD_reset is RESET_SOFT in schematic
         */
        gpio-line-names =
-               "CPLD_D[1]", "CPLD_int", "CPLD_reset", "",
-               "", "CPLD_D[0]", "", "",
-               "", "", "", "CPLD_D[2]",
-               "CPLD_D[3]", "CPLD_D[4]", "CPLD_D[5]", "CPLD_D[6]",
-               "CPLD_D[7]", "", "", "",
+               "CPLD_D[6]", "CPLD_int", "CPLD_reset", "",
+               "", "CPLD_D[7]", "", "",
+               "", "", "", "CPLD_D[5]",
+               "CPLD_D[4]", "CPLD_D[3]", "CPLD_D[2]", "CPLD_D[1]",
+               "CPLD_D[0]", "", "", "",
                "", "", "", "",
                "", "", "", "KBD_intK",
                "", "", "", "";
index 286d2df..7e0aeb2 100644 (file)
@@ -5,7 +5,6 @@
 
 /dts-v1/;
 
-#include <dt-bindings/phy/phy-imx8-pcie.h>
 #include "imx8mm-tqma8mqml.dtsi"
 #include "mba8mx.dtsi"
 
index 16ee9b5..f649dfa 100644 (file)
@@ -3,6 +3,7 @@
  * Copyright 2020-2021 TQ-Systems GmbH
  */
 
+#include <dt-bindings/phy/phy-imx8-pcie.h>
 #include "imx8mm.dtsi"
 
 / {
index b379c46..3ec0c9a 100644 (file)
                                nxp,dvs-standby-voltage = <850000>;
                                regulator-always-on;
                                regulator-boot-on;
-                               regulator-max-microvolt = <950000>;
-                               regulator-min-microvolt = <850000>;
+                               regulator-max-microvolt = <1050000>;
+                               regulator-min-microvolt = <805000>;
                                regulator-name = "On-module +VDD_ARM (BUCK2)";
                                regulator-ramp-delay = <3125>;
                        };
                        reg_vdd_dram: BUCK3 {
                                regulator-always-on;
                                regulator-boot-on;
-                               regulator-max-microvolt = <950000>;
-                               regulator-min-microvolt = <850000>;
+                               regulator-max-microvolt = <1000000>;
+                               regulator-min-microvolt = <805000>;
                                regulator-name = "On-module +VDD_GPU_VPU_DDR (BUCK3)";
                        };
 
                        reg_vdd_snvs: LDO2 {
                                regulator-always-on;
                                regulator-boot-on;
-                               regulator-max-microvolt = <900000>;
+                               regulator-max-microvolt = <800000>;
                                regulator-min-microvolt = <800000>;
                                regulator-name = "On-module +V0.8_SNVS (LDO2)";
                        };
index 0c71b74..cb2836b 100644 (file)
                                                         <&clk IMX8MN_CLK_GPU_SHADER>,
                                                         <&clk IMX8MN_CLK_GPU_BUS_ROOT>,
                                                         <&clk IMX8MN_CLK_GPU_AHB>;
-                                               resets = <&src IMX8MQ_RESET_GPU_RESET>;
                                        };
 
                                        pgc_dispmix: power-domain@3 {
index d8ca529..0e237b2 100644 (file)
                switch-1 {
                        label = "S12";
                        linux,code = <BTN_0>;
-                       gpios = <&gpio5 26 GPIO_ACTIVE_LOW>;
+                       gpios = <&gpio5 27 GPIO_ACTIVE_LOW>;
                };
 
                switch-2 {
                        label = "S13";
                        linux,code = <BTN_1>;
-                       gpios = <&gpio5 27 GPIO_ACTIVE_LOW>;
+                       gpios = <&gpio5 26 GPIO_ACTIVE_LOW>;
                };
        };
 
 
 &pcf85063 {
        /* RTC_EVENT# is connected on MBa8MPxL */
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_pcf85063>;
        interrupt-parent = <&gpio4>;
        interrupts = <28 IRQ_TYPE_EDGE_FALLING>;
 };
                fsl,pins = <MX8MP_IOMUXC_SAI5_RXC__GPIO3_IO20           0x10>; /* Power enable */
        };
 
+       pinctrl_pcf85063: pcf85063grp {
+               fsl,pins = <MX8MP_IOMUXC_SAI3_RXFS__GPIO4_IO28          0x80>;
+       };
+
        /* LVDS Backlight */
        pinctrl_pwm2: pwm2grp {
                fsl,pins = <MX8MP_IOMUXC_SAI5_RXD0__PWM2_OUT            0x14>;
index 6630ec5..211e6a1 100644 (file)
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_reg_can>;
                regulator-name = "can2_stby";
-               gpio = <&gpio3 19 GPIO_ACTIVE_HIGH>;
-               enable-active-high;
+               gpio = <&gpio3 19 GPIO_ACTIVE_LOW>;
                regulator-min-microvolt = <3300000>;
                regulator-max-microvolt = <3300000>;
        };
                        lan1: port@0 {
                                reg = <0>;
                                label = "lan1";
+                               phy-mode = "internal";
                                local-mac-address = [00 00 00 00 00 00];
                        };
 
                        lan2: port@1 {
                                reg = <1>;
                                label = "lan2";
+                               phy-mode = "internal";
                                local-mac-address = [00 00 00 00 00 00];
                        };
 
                        lan3: port@2 {
                                reg = <2>;
                                label = "lan3";
+                               phy-mode = "internal";
                                local-mac-address = [00 00 00 00 00 00];
                        };
 
                        lan4: port@3 {
                                reg = <3>;
                                label = "lan4";
+                               phy-mode = "internal";
                                local-mac-address = [00 00 00 00 00 00];
                        };
 
                        lan5: port@4 {
                                reg = <4>;
                                label = "lan5";
+                               phy-mode = "internal";
                                local-mac-address = [00 00 00 00 00 00];
                        };
 
-                       port@6 {
-                               reg = <6>;
+                       port@5 {
+                               reg = <5>;
                                label = "cpu";
                                ethernet = <&fec>;
                                phy-mode = "rgmii-id";
index 60c1b01..bb56390 100644 (file)
                                compatible = "fsl,imx8ulp-pcc3";
                                reg = <0x292d0000 0x10000>;
                                #clock-cells = <1>;
+                               #reset-cells = <1>;
                        };
 
                        tpm5: tpm@29340000 {
                                compatible = "fsl,imx8ulp-pcc4";
                                reg = <0x29800000 0x10000>;
                                #clock-cells = <1>;
+                               #reset-cells = <1>;
                        };
 
                        lpi2c6: i2c@29840000 {
                                compatible = "fsl,imx8ulp-pcc5";
                                reg = <0x2da70000 0x10000>;
                                #clock-cells = <1>;
+                               #reset-cells = <1>;
                        };
                };
 
index 13d7f26..dac3b69 100644 (file)
                                        <&gem_noc MASTER_APPSS_PROC 0 &cnoc2 SLAVE_USB3_0 0>;
                        interconnect-names = "usb-ddr", "apps-usb";
 
+                       wakeup-source;
+
                        usb_1_dwc3: usb@a600000 {
                                compatible = "snps,dwc3";
                                reg = <0 0x0a600000 0 0xe000>;
                                phys = <&usb_1_hsphy>, <&usb_1_ssphy>;
                                phy-names = "usb2-phy", "usb3-phy";
                                maximum-speed = "super-speed";
-                               wakeup-source;
                        };
                };
 
index 84dc92d..4c404e2 100644 (file)
 };
 
 &remoteproc_adsp {
-       firmware-name = "qcom/sc8280xp/qcadsp8280.mbn";
+       firmware-name = "qcom/sc8280xp/LENOVO/21BX/qcadsp8280.mbn";
 
        status = "okay";
 };
 
 &remoteproc_nsp0 {
-       firmware-name = "qcom/sc8280xp/qccdsp8280.mbn";
+       firmware-name = "qcom/sc8280xp/LENOVO/21BX/qccdsp8280.mbn";
 
        status = "okay";
 };
index 7d509ec..916f12b 100644 (file)
                                        compute-cb@1 {
                                                compatible = "qcom,fastrpc-compute-cb";
                                                reg = <1>;
-                                               iommus = <&apps_smmu 0x1401 0x2040>,
-                                                        <&apps_smmu 0x1421 0x0>,
-                                                        <&apps_smmu 0x2001 0x420>,
-                                                        <&apps_smmu 0x2041 0x0>;
+                                               iommus = <&apps_smmu 0x1001 0x0460>;
                                        };
 
                                        compute-cb@2 {
                                                compatible = "qcom,fastrpc-compute-cb";
                                                reg = <2>;
-                                               iommus = <&apps_smmu 0x2 0x3440>,
-                                                        <&apps_smmu 0x22 0x3400>;
+                                               iommus = <&apps_smmu 0x1002 0x0460>;
                                        };
 
                                        compute-cb@3 {
                                                compatible = "qcom,fastrpc-compute-cb";
                                                reg = <3>;
-                                               iommus = <&apps_smmu 0x3 0x3440>,
-                                                        <&apps_smmu 0x1423 0x0>,
-                                                        <&apps_smmu 0x2023 0x0>;
+                                               iommus = <&apps_smmu 0x1003 0x0460>;
                                        };
 
                                        compute-cb@4 {
                                                compatible = "qcom,fastrpc-compute-cb";
                                                reg = <4>;
-                                               iommus = <&apps_smmu 0x4 0x3440>,
-                                                        <&apps_smmu 0x24 0x3400>;
+                                               iommus = <&apps_smmu 0x1004 0x0460>;
                                        };
 
                                        compute-cb@5 {
                                                compatible = "qcom,fastrpc-compute-cb";
                                                reg = <5>;
-                                               iommus = <&apps_smmu 0x5 0x3440>,
-                                                        <&apps_smmu 0x25 0x3400>;
+                                               iommus = <&apps_smmu 0x1005 0x0460>;
                                        };
 
                                        compute-cb@6 {
                                                compatible = "qcom,fastrpc-compute-cb";
                                                reg = <6>;
-                                               iommus = <&apps_smmu 0x6 0x3460>;
+                                               iommus = <&apps_smmu 0x1006 0x0460>;
                                        };
 
                                        compute-cb@7 {
                                                compatible = "qcom,fastrpc-compute-cb";
                                                reg = <7>;
-                                               iommus = <&apps_smmu 0x7 0x3460>;
+                                               iommus = <&apps_smmu 0x1007 0x0460>;
                                        };
 
                                        compute-cb@8 {
                                                compatible = "qcom,fastrpc-compute-cb";
                                                reg = <8>;
-                                               iommus = <&apps_smmu 0x8 0x3460>;
+                                               iommus = <&apps_smmu 0x1008 0x0460>;
                                        };
 
                                        /* note: secure cb9 in downstream */
index e72a044..d9b08df 100644 (file)
 
                ufs_mem_phy: phy@1d87000 {
                        compatible = "qcom,sm8350-qmp-ufs-phy";
-                       reg = <0 0x01d87000 0 0xe10>;
+                       reg = <0 0x01d87000 0 0x1c4>;
                        #address-cells = <2>;
                        #size-cells = <2>;
                        ranges;
index 7249871..5eecbef 100644 (file)
@@ -2,8 +2,8 @@
 /*
  * Copyright (c) 2020 Fuzhou Rockchip Electronics Co., Ltd
  * Copyright (c) 2020 Engicam srl
- * Copyright (c) 2020 Amarula Solutons
- * Copyright (c) 2020 Amarula Solutons(India)
+ * Copyright (c) 2020 Amarula Solutions
+ * Copyright (c) 2020 Amarula Solutions(India)
  */
 
 #include <dt-bindings/gpio/gpio.h>
index 31ebb4e..0f9cc04 100644 (file)
@@ -88,3 +88,8 @@
                };
        };
 };
+
+&wlan_host_wake_l {
+       /* Kevin has an external pull up, but Bob does not. */
+       rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
+};
index cd07464..ee6095b 100644 (file)
 &edp {
        status = "okay";
 
+       /*
+        * eDP PHY/clk don't sync reliably at anything other than 24 MHz. Only
+        * set this here, because rk3399-gru.dtsi ensures we can generate this
+        * off GPLL=600MHz, whereas some other RK3399 boards may not.
+        */
+       assigned-clocks = <&cru PCLK_EDP>;
+       assigned-clock-rates = <24000000>;
+
        ports {
                edp_out: port@1 {
                        reg = <1>;
@@ -578,6 +586,7 @@ ap_i2c_tp: &i2c5 {
        };
 
        wlan_host_wake_l: wlan-host-wake-l {
+               /* Kevin has an external pull up, but Bob does not */
                rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
        };
 };
index b1ac3a8..aa3e21b 100644 (file)
@@ -62,7 +62,6 @@
        vcc5v0_host: vcc5v0-host-regulator {
                compatible = "regulator-fixed";
                gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
-               enable-active-low;
                pinctrl-names = "default";
                pinctrl-0 = <&vcc5v0_host_en>;
                regulator-name = "vcc5v0_host";
index d943559..a05460b 100644 (file)
 
        vcc3v3_sd: vcc3v3_sd {
                compatible = "regulator-fixed";
-               enable-active-low;
                gpio = <&gpio0 RK_PA5 GPIO_ACTIVE_LOW>;
                pinctrl-names = "default";
                pinctrl-0 = <&vcc_sd_h>;
index 02d5f5a..528bb4e 100644 (file)
        disable-wp;
        pinctrl-names = "default";
        pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
-       sd-uhs-sdr104;
+       sd-uhs-sdr50;
        vmmc-supply = <&vcc3v3_sd>;
        vqmmc-supply = <&vccio_sd>;
        status = "okay";
index 5e34bd0..93d383b 100644 (file)
 };
 
 &usb_host0_xhci {
-       extcon = <&usb2phy0>;
+       dr_mode = "host";
        status = "okay";
 };
 
index 6ff89ff..6747925 100644 (file)
 };
 
 &usb2phy0_otg {
-       vbus-supply = <&vcc5v0_usb_otg>;
+       phy-supply = <&vcc5v0_usb_otg>;
        status = "okay";
 };
 
index 6b5093a..b2e040d 100644 (file)
 };
 
 &usb2phy0_otg {
-       vbus-supply = <&vcc5v0_usb_otg>;
+       phy-supply = <&vcc5v0_usb_otg>;
        status = "okay";
 };
 
index d5b2d2d..5b16764 100644 (file)
@@ -48,6 +48,7 @@ CONFIG_ARCH_KEEMBAY=y
 CONFIG_ARCH_MEDIATEK=y
 CONFIG_ARCH_MESON=y
 CONFIG_ARCH_MVEBU=y
+CONFIG_ARCH_NXP=y
 CONFIG_ARCH_MXC=y
 CONFIG_ARCH_NPCM=y
 CONFIG_ARCH_QCOM=y
index ad2bfc7..44ebf5b 100644 (file)
@@ -237,7 +237,7 @@ static void amu_fie_setup(const struct cpumask *cpus)
        for_each_cpu(cpu, cpus) {
                if (!freq_counters_valid(cpu) ||
                    freq_inv_set_max_ratio(cpu,
-                                          cpufreq_get_hw_max_freq(cpu) * 1000,
+                                          cpufreq_get_hw_max_freq(cpu) * 1000ULL,
                                           arch_timer_get_rate()))
                        return;
        }
index 2ff0ef6..917086b 100644 (file)
@@ -2114,7 +2114,7 @@ static int finalize_hyp_mode(void)
         * at, which would end badly once inaccessible.
         */
        kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
-       kmemleak_free_part(__va(hyp_mem_base), hyp_mem_size);
+       kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size);
        return pkvm_drop_host_privileges();
 }
 
index e7ad445..eb48930 100644 (file)
@@ -331,12 +331,6 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
        }
        BUG_ON(p4d_bad(p4d));
 
-       /*
-        * No need for locking during early boot. And it doesn't work as
-        * expected with KASLR enabled.
-        */
-       if (system_state != SYSTEM_BOOTING)
-               mutex_lock(&fixmap_lock);
        pudp = pud_set_fixmap_offset(p4dp, addr);
        do {
                pud_t old_pud = READ_ONCE(*pudp);
@@ -368,15 +362,13 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
        } while (pudp++, addr = next, addr != end);
 
        pud_clear_fixmap();
-       if (system_state != SYSTEM_BOOTING)
-               mutex_unlock(&fixmap_lock);
 }
 
-static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
-                                unsigned long virt, phys_addr_t size,
-                                pgprot_t prot,
-                                phys_addr_t (*pgtable_alloc)(int),
-                                int flags)
+static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys,
+                                       unsigned long virt, phys_addr_t size,
+                                       pgprot_t prot,
+                                       phys_addr_t (*pgtable_alloc)(int),
+                                       int flags)
 {
        unsigned long addr, end, next;
        pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
@@ -400,8 +392,20 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
        } while (pgdp++, addr = next, addr != end);
 }
 
+static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
+                                unsigned long virt, phys_addr_t size,
+                                pgprot_t prot,
+                                phys_addr_t (*pgtable_alloc)(int),
+                                int flags)
+{
+       mutex_lock(&fixmap_lock);
+       __create_pgd_mapping_locked(pgdir, phys, virt, size, prot,
+                                   pgtable_alloc, flags);
+       mutex_unlock(&fixmap_lock);
+}
+
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-extern __alias(__create_pgd_mapping)
+extern __alias(__create_pgd_mapping_locked)
 void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
                             phys_addr_t size, pgprot_t prot,
                             phys_addr_t (*pgtable_alloc)(int), int flags);
index 7a62368..2d5a0bc 100644 (file)
@@ -50,6 +50,7 @@ struct clk *clk_get_io(void)
 {
        return &cpu_clk_generic[2];
 }
+EXPORT_SYMBOL_GPL(clk_get_io);
 
 struct clk *clk_get_ppe(void)
 {
index 794c96c..311dc15 100644 (file)
@@ -98,7 +98,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
        if (plat_dat->bus_id) {
                __raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 |
                             GMAC1_USE_UART0, LS1X_MUX_CTRL0);
-               switch (plat_dat->interface) {
+               switch (plat_dat->phy_interface) {
                case PHY_INTERFACE_MODE_RGMII:
                        val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
                        break;
@@ -107,12 +107,12 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
                        break;
                default:
                        pr_err("unsupported mii mode %d\n",
-                              plat_dat->interface);
+                              plat_dat->phy_interface);
                        return -ENOTSUPP;
                }
                val &= ~GMAC1_SHUT;
        } else {
-               switch (plat_dat->interface) {
+               switch (plat_dat->phy_interface) {
                case PHY_INTERFACE_MODE_RGMII:
                        val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
                        break;
@@ -121,7 +121,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
                        break;
                default:
                        pr_err("unsupported mii mode %d\n",
-                              plat_dat->interface);
+                              plat_dat->phy_interface);
                        return -ENOTSUPP;
                }
                val &= ~GMAC0_SHUT;
@@ -131,7 +131,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
        plat_dat = dev_get_platdata(&pdev->dev);
 
        val &= ~PHY_INTF_SELI;
-       if (plat_dat->interface == PHY_INTERFACE_MODE_RMII)
+       if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII)
                val |= 0x4 << PHY_INTF_SELI_SHIFT;
        __raw_writel(val, LS1X_MUX_CTRL1);
 
@@ -146,9 +146,9 @@ static struct plat_stmmacenet_data ls1x_eth0_pdata = {
        .bus_id                 = 0,
        .phy_addr               = -1,
 #if defined(CONFIG_LOONGSON1_LS1B)
-       .interface              = PHY_INTERFACE_MODE_MII,
+       .phy_interface          = PHY_INTERFACE_MODE_MII,
 #elif defined(CONFIG_LOONGSON1_LS1C)
-       .interface              = PHY_INTERFACE_MODE_RMII,
+       .phy_interface          = PHY_INTERFACE_MODE_RMII,
 #endif
        .mdio_bus_data          = &ls1x_mdio_bus_data,
        .dma_cfg                = &ls1x_eth_dma_cfg,
@@ -186,7 +186,7 @@ struct platform_device ls1x_eth0_pdev = {
 static struct plat_stmmacenet_data ls1x_eth1_pdata = {
        .bus_id                 = 1,
        .phy_addr               = -1,
-       .interface              = PHY_INTERFACE_MODE_MII,
+       .phy_interface          = PHY_INTERFACE_MODE_MII,
        .mdio_bus_data          = &ls1x_mdio_bus_data,
        .dma_cfg                = &ls1x_eth_dma_cfg,
        .has_gmac               = 1,
index 9aede24..a98940e 100644 (file)
@@ -224,8 +224,18 @@ config MLONGCALLS
          Enabling this option will probably slow down your kernel.
 
 config 64BIT
-       def_bool "$(ARCH)" = "parisc64"
+       def_bool y if "$(ARCH)" = "parisc64"
+       bool "64-bit kernel" if "$(ARCH)" = "parisc"
        depends on PA8X00
+       help
+         Enable this if you want to support 64bit kernel on PA-RISC platform.
+
+         At the moment, only people willing to use more than 2GB of RAM,
+         or having a 64bit-only capable PA-RISC machine should say Y here.
+
+         Since there is no 64bit userland on PA-RISC, there is no point to
+         enable this option otherwise. The 64bit kernel is significantly bigger
+         and slower than the 32bit one.
 
 choice
        prompt "Kernel page size"
index 6982741..e712f80 100644 (file)
@@ -937,15 +937,6 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
        pmd = *pmdp;
        pmd_clear(pmdp);
 
-       /*
-        * pmdp collapse_flush need to ensure that there are no parallel gup
-        * walk after this call. This is needed so that we can have stable
-        * page ref count when collapsing a page. We don't allow a collapse page
-        * if we have gup taken on the page. We can ensure that by sending IPI
-        * because gup walk happens with IRQ disabled.
-        */
-       serialize_against_pte_lookup(vma->vm_mm);
-
        radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
 
        return pmd;
index ed66c31..59d1888 100644 (file)
@@ -386,6 +386,7 @@ config RISCV_ISA_C
 config RISCV_ISA_SVPBMT
        bool "SVPBMT extension support"
        depends on 64BIT && MMU
+       depends on !XIP_KERNEL
        select RISCV_ALTERNATIVE
        default y
        help
index 6850e93..f3623df 100644 (file)
@@ -46,7 +46,7 @@ config ERRATA_THEAD
 
 config ERRATA_THEAD_PBMT
        bool "Apply T-Head memory type errata"
-       depends on ERRATA_THEAD && 64BIT
+       depends on ERRATA_THEAD && 64BIT && MMU
        select RISCV_ALTERNATIVE_EARLY
        default y
        help
@@ -57,7 +57,7 @@ config ERRATA_THEAD_PBMT
 
 config ERRATA_THEAD_CMO
        bool "Apply T-Head cache management errata"
-       depends on ERRATA_THEAD
+       depends on ERRATA_THEAD && MMU
        select RISCV_DMA_NONCOHERENT
        default y
        help
index 202c83f..96648c1 100644 (file)
@@ -37,6 +37,7 @@ static bool errata_probe_cmo(unsigned int stage,
        if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
                return false;
 
+       riscv_cbom_block_size = L1_CACHE_BYTES;
        riscv_noncoherent_supported();
        return true;
 #else
index a60acae..273ece6 100644 (file)
@@ -42,6 +42,11 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
 
 #endif /* CONFIG_SMP */
 
+/*
+ * The T-Head CMO errata internally probe the CBOM block size, but otherwise
+ * don't depend on Zicbom.
+ */
+extern unsigned int riscv_cbom_block_size;
 #ifdef CONFIG_RISCV_ISA_ZICBOM
 void riscv_init_cbom_blocksize(void);
 #else
index 95ef6e2..2dfc463 100644 (file)
@@ -296,8 +296,8 @@ void __init setup_arch(char **cmdline_p)
        setup_smp();
 #endif
 
-       riscv_fill_hwcap();
        riscv_init_cbom_blocksize();
+       riscv_fill_hwcap();
        apply_boot_alternatives();
 }
 
index 5a2de6b..5c59112 100644 (file)
@@ -124,6 +124,8 @@ SYSCALL_DEFINE0(rt_sigreturn)
        if (restore_altstack(&frame->uc.uc_stack))
                goto badframe;
 
+       regs->cause = -1UL;
+
        return regs->a0;
 
 badframe:
index cd22253..e3f9bdf 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/of_device.h>
 #include <asm/cacheflush.h>
 
-static unsigned int riscv_cbom_block_size = L1_CACHE_BYTES;
+unsigned int riscv_cbom_block_size;
 static bool noncoherent_supported;
 
 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
@@ -79,38 +79,41 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 void riscv_init_cbom_blocksize(void)
 {
        struct device_node *node;
+       unsigned long cbom_hartid;
+       u32 val, probed_block_size;
        int ret;
-       u32 val;
 
+       probed_block_size = 0;
        for_each_of_cpu_node(node) {
                unsigned long hartid;
-               int cbom_hartid;
 
                ret = riscv_of_processor_hartid(node, &hartid);
                if (ret)
                        continue;
 
-               if (hartid < 0)
-                       continue;
-
                /* set block-size for cbom extension if available */
                ret = of_property_read_u32(node, "riscv,cbom-block-size", &val);
                if (ret)
                        continue;
 
-               if (!riscv_cbom_block_size) {
-                       riscv_cbom_block_size = val;
+               if (!probed_block_size) {
+                       probed_block_size = val;
                        cbom_hartid = hartid;
                } else {
-                       if (riscv_cbom_block_size != val)
-                               pr_warn("cbom-block-size mismatched between harts %d and %lu\n",
+                       if (probed_block_size != val)
+                               pr_warn("cbom-block-size mismatched between harts %lu and %lu\n",
                                        cbom_hartid, hartid);
                }
        }
+
+       if (probed_block_size)
+               riscv_cbom_block_size = probed_block_size;
 }
 #endif
 
 void riscv_noncoherent_supported(void)
 {
+       WARN(!riscv_cbom_block_size,
+            "Non-coherent DMA support enabled without a block size\n");
        noncoherent_supported = true;
 }
index 082ec5f..0243b6e 100644 (file)
@@ -489,6 +489,8 @@ enum prot_type {
        PROT_TYPE_ALC  = 2,
        PROT_TYPE_DAT  = 3,
        PROT_TYPE_IEP  = 4,
+       /* Dummy value for passing an initialized value when code != PGM_PROTECTION */
+       PROT_NONE,
 };
 
 static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
@@ -504,6 +506,10 @@ static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva,
        switch (code) {
        case PGM_PROTECTION:
                switch (prot) {
+               case PROT_NONE:
+                       /* We should never get here, acts like termination */
+                       WARN_ON_ONCE(1);
+                       break;
                case PROT_TYPE_IEP:
                        tec->b61 = 1;
                        fallthrough;
@@ -968,8 +974,10 @@ static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
                                return rc;
                } else {
                        gpa = kvm_s390_real_to_abs(vcpu, ga);
-                       if (kvm_is_error_gpa(vcpu->kvm, gpa))
+                       if (kvm_is_error_gpa(vcpu->kvm, gpa)) {
                                rc = PGM_ADDRESSING;
+                               prot = PROT_NONE;
+                       }
                }
                if (rc)
                        return trans_exc(vcpu, rc, ga, ar, mode, prot);
@@ -1112,8 +1120,6 @@ int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
                if (rc == PGM_PROTECTION && try_storage_prot_override)
                        rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
                                                        data, fragment_len, PAGE_SPO_ACC);
-               if (rc == PGM_PROTECTION)
-                       prot = PROT_TYPE_KEYC;
                if (rc)
                        break;
                len -= fragment_len;
@@ -1123,6 +1129,10 @@ int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
        if (rc > 0) {
                bool terminate = (mode == GACC_STORE) && (idx > 0);
 
+               if (rc == PGM_PROTECTION)
+                       prot = PROT_TYPE_KEYC;
+               else
+                       prot = PROT_NONE;
                rc = trans_exc_ending(vcpu, rc, ga, ar, mode, prot, terminate);
        }
 out_unlock:
index b9c944b..ab569fa 100644 (file)
@@ -3324,7 +3324,7 @@ static void aen_host_forward(unsigned long si)
        if (gaite->count == 0)
                return;
        if (gaite->aisb != 0)
-               set_bit_inv(gaite->aisbo, (unsigned long *)gaite->aisb);
+               set_bit_inv(gaite->aisbo, phys_to_virt(gaite->aisb));
 
        kvm = kvm_s390_pci_si_to_kvm(aift, si);
        if (!kvm)
index edfd4bb..b7ef0b7 100644 (file)
@@ -505,7 +505,7 @@ int kvm_arch_init(void *opaque)
                goto out;
        }
 
-       if (kvm_s390_pci_interp_allowed()) {
+       if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
                rc = kvm_s390_pci_init();
                if (rc) {
                        pr_err("Unable to allocate AIFT for PCI\n");
@@ -527,7 +527,7 @@ out:
 void kvm_arch_exit(void)
 {
        kvm_s390_gib_destroy();
-       if (kvm_s390_pci_interp_allowed())
+       if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
                kvm_s390_pci_exit();
        debug_unregister(kvm_s390_dbf);
        debug_unregister(kvm_s390_dbf_uv);
index bb8c335..c50c164 100644 (file)
@@ -58,7 +58,7 @@ static int zpci_setup_aipb(u8 nisc)
        if (!zpci_aipb)
                return -ENOMEM;
 
-       aift->sbv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC, 0);
+       aift->sbv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC, NULL);
        if (!aift->sbv) {
                rc = -ENOMEM;
                goto free_aipb;
@@ -71,7 +71,7 @@ static int zpci_setup_aipb(u8 nisc)
                rc = -ENOMEM;
                goto free_sbv;
        }
-       aift->gait = (struct zpci_gaite *)page_to_phys(page);
+       aift->gait = (struct zpci_gaite *)page_to_virt(page);
 
        zpci_aipb->aipb.faisb = virt_to_phys(aift->sbv->vector);
        zpci_aipb->aipb.gait = virt_to_phys(aift->gait);
@@ -373,7 +373,7 @@ static int kvm_s390_pci_aif_disable(struct zpci_dev *zdev, bool force)
                gaite->gisc = 0;
                gaite->aisbo = 0;
                gaite->gisa = 0;
-               aift->kzdev[zdev->aisb] = 0;
+               aift->kzdev[zdev->aisb] = NULL;
                /* Clear zdev info */
                airq_iv_free_bit(aift->sbv, zdev->aisb);
                airq_iv_release(zdev->aibv);
@@ -672,23 +672,31 @@ out:
 
 int kvm_s390_pci_init(void)
 {
+       zpci_kvm_hook.kvm_register = kvm_s390_pci_register_kvm;
+       zpci_kvm_hook.kvm_unregister = kvm_s390_pci_unregister_kvm;
+
+       if (!kvm_s390_pci_interp_allowed())
+               return 0;
+
        aift = kzalloc(sizeof(struct zpci_aift), GFP_KERNEL);
        if (!aift)
                return -ENOMEM;
 
        spin_lock_init(&aift->gait_lock);
        mutex_init(&aift->aift_lock);
-       zpci_kvm_hook.kvm_register = kvm_s390_pci_register_kvm;
-       zpci_kvm_hook.kvm_unregister = kvm_s390_pci_unregister_kvm;
 
        return 0;
 }
 
 void kvm_s390_pci_exit(void)
 {
-       mutex_destroy(&aift->aift_lock);
        zpci_kvm_hook.kvm_register = NULL;
        zpci_kvm_hook.kvm_unregister = NULL;
 
+       if (!kvm_s390_pci_interp_allowed())
+               return;
+
+       mutex_destroy(&aift->aift_lock);
+
        kfree(aift);
 }
index 3a3606c..486d06e 100644 (file)
@@ -46,9 +46,9 @@ extern struct zpci_aift *aift;
 static inline struct kvm *kvm_s390_pci_si_to_kvm(struct zpci_aift *aift,
                                                 unsigned long si)
 {
-       if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM) || aift->kzdev == 0 ||
-           aift->kzdev[si] == 0)
-               return 0;
+       if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM) || !aift->kzdev ||
+           !aift->kzdev[si])
+               return NULL;
        return aift->kzdev[si]->kvm;
 };
 
index f2fe63b..f1d4d67 100644 (file)
@@ -132,10 +132,18 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT)
 # The wrappers will select whether using "malloc" or the kernel allocator.
 LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
 
+# Avoid binutils 2.39+ warnings by marking the stack non-executable and
+# ignorning warnings for the kallsyms sections.
+LDFLAGS_EXECSTACK = -z noexecstack
+ifeq ($(CONFIG_LD_IS_BFD),y)
+LDFLAGS_EXECSTACK += $(call ld-option,--no-warn-rwx-segments)
+endif
+
 LD_FLAGS_CMDLINE = $(foreach opt,$(KBUILD_LDFLAGS),-Wl,$(opt))
 
 # Used by link-vmlinux.sh which has special support for um link
 export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
+export LDFLAGS_vmlinux := $(LDFLAGS_EXECSTACK)
 
 # When cleaning we don't include .config, so we don't include
 # TT or skas makefiles and don't clean skas_ptregs.h.
index 7452f70..7467153 100644 (file)
@@ -48,7 +48,8 @@ void show_stack(struct task_struct *task, unsigned long *stack,
                        break;
                if (i && ((i % STACKSLOTS_PER_LINE) == 0))
                        pr_cont("\n");
-               pr_cont(" %08lx", *stack++);
+               pr_cont(" %08lx", READ_ONCE_NOCHECK(*stack));
+               stack++;
        }
 
        printk("%sCall Trace:\n", loglvl);
index e0de60e..d9e023c 100644 (file)
@@ -33,7 +33,7 @@
 #include "um_arch.h"
 
 #define DEFAULT_COMMAND_LINE_ROOT "root=98:0"
-#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty"
+#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty0"
 
 /* Changed in add_arg and setup_arch, which run before SMP is started */
 static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 };
index aeb3802..5d75fe2 100644 (file)
 #define INTEL_FAM6_RAPTORLAKE_P                0xBA
 #define INTEL_FAM6_RAPTORLAKE_S                0xBF
 
+#define INTEL_FAM6_METEORLAKE          0xAC
+#define INTEL_FAM6_METEORLAKE_L                0xAA
+
 /* "Small Core" Processors (Atom) */
 
 #define INTEL_FAM6_ATOM_BONNELL                0x1C /* Diamondville, Pineview */
index 2c96c43..aa381ab 100644 (file)
@@ -729,6 +729,7 @@ struct kvm_vcpu_arch {
        struct fpu_guest guest_fpu;
 
        u64 xcr0;
+       u64 guest_supported_xcr0;
 
        struct kvm_pio_request pio;
        void *pio_data;
index 24c1bb8..8bdeae2 100644 (file)
@@ -344,8 +344,11 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
        }
 
        va_page = sgx_encl_grow(encl, false);
-       if (IS_ERR(va_page))
+       if (IS_ERR(va_page)) {
+               if (PTR_ERR(va_page) == -EBUSY)
+                       vmret = VM_FAULT_NOPAGE;
                goto err_out_epc;
+       }
 
        if (va_page)
                list_add(&va_page->list, &encl->va_pages);
index 515e2a5..0aad028 100644 (file)
@@ -49,9 +49,13 @@ static LIST_HEAD(sgx_dirty_page_list);
  * Reset post-kexec EPC pages to the uninitialized state. The pages are removed
  * from the input list, and made available for the page allocator. SECS pages
  * prepending their children in the input list are left intact.
+ *
+ * Return 0 when sanitization was successful or kthread was stopped, and the
+ * number of unsanitized pages otherwise.
  */
-static void __sgx_sanitize_pages(struct list_head *dirty_page_list)
+static unsigned long __sgx_sanitize_pages(struct list_head *dirty_page_list)
 {
+       unsigned long left_dirty = 0;
        struct sgx_epc_page *page;
        LIST_HEAD(dirty);
        int ret;
@@ -59,7 +63,7 @@ static void __sgx_sanitize_pages(struct list_head *dirty_page_list)
        /* dirty_page_list is thread-local, no need for a lock: */
        while (!list_empty(dirty_page_list)) {
                if (kthread_should_stop())
-                       return;
+                       return 0;
 
                page = list_first_entry(dirty_page_list, struct sgx_epc_page, list);
 
@@ -92,12 +96,14 @@ static void __sgx_sanitize_pages(struct list_head *dirty_page_list)
                } else {
                        /* The page is not yet clean - move to the dirty list. */
                        list_move_tail(&page->list, &dirty);
+                       left_dirty++;
                }
 
                cond_resched();
        }
 
        list_splice(&dirty, dirty_page_list);
+       return left_dirty;
 }
 
 static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page)
@@ -395,10 +401,7 @@ static int ksgxd(void *p)
         * required for SECS pages, whose child pages blocked EREMOVE.
         */
        __sgx_sanitize_pages(&sgx_dirty_page_list);
-       __sgx_sanitize_pages(&sgx_dirty_page_list);
-
-       /* sanity check: */
-       WARN_ON(!list_empty(&sgx_dirty_page_list));
+       WARN_ON(__sgx_sanitize_pages(&sgx_dirty_page_list));
 
        while (!kthread_should_stop()) {
                if (try_to_freeze())
index 75dcf7a..4c1c2c0 100644 (file)
@@ -315,7 +315,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
        struct kvm_cpuid_entry2 *best;
-       u64 guest_supported_xcr0;
 
        best = kvm_find_cpuid_entry(vcpu, 1);
        if (best && apic) {
@@ -327,10 +326,16 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
                kvm_apic_set_version(vcpu);
        }
 
-       guest_supported_xcr0 =
+       vcpu->arch.guest_supported_xcr0 =
                cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
 
-       vcpu->arch.guest_fpu.fpstate->user_xfeatures = guest_supported_xcr0;
+       /*
+        * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
+        * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
+        * supported by the host.
+        */
+       vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 |
+                                                      XFEATURE_MASK_FPSSE;
 
        kvm_update_pv_runtime(vcpu);
 
index d5ec3a2..aacb28c 100644 (file)
@@ -4132,6 +4132,9 @@ static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
 {
        u32 eax, ecx, edx;
 
+       if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
+               return emulate_ud(ctxt);
+
        eax = reg_read(ctxt, VCPU_REGS_RAX);
        edx = reg_read(ctxt, VCPU_REGS_RDX);
        ecx = reg_read(ctxt, VCPU_REGS_RCX);
index e418ef3..3552e6a 100644 (file)
@@ -1596,6 +1596,8 @@ static void __rmap_add(struct kvm *kvm,
        rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
        rmap_count = pte_list_add(cache, spte, rmap_head);
 
+       if (rmap_count > kvm->stat.max_mmu_rmap_size)
+               kvm->stat.max_mmu_rmap_size = rmap_count;
        if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
                kvm_zap_all_rmap_sptes(kvm, rmap_head);
                kvm_flush_remote_tlbs_with_address(
index 43a6a7e..b0c47b4 100644 (file)
@@ -1011,15 +1011,10 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
 
-static inline u64 kvm_guest_supported_xcr0(struct kvm_vcpu *vcpu)
-{
-       return vcpu->arch.guest_fpu.fpstate->user_xfeatures;
-}
-
 #ifdef CONFIG_X86_64
 static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
 {
-       return kvm_guest_supported_xcr0(vcpu) & XFEATURE_MASK_USER_DYNAMIC;
+       return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC;
 }
 #endif
 
@@ -1042,7 +1037,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
         * saving.  However, xcr0 bit 0 is always set, even if the
         * emulated CPU does not support XSAVE (see kvm_vcpu_reset()).
         */
-       valid_bits = kvm_guest_supported_xcr0(vcpu) | XFEATURE_MASK_FP;
+       valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
        if (xcr0 & ~valid_bits)
                return 1;
 
@@ -1070,6 +1065,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 
 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
 {
+       /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */
        if (static_call(kvm_x86_get_cpl)(vcpu) != 0 ||
            __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) {
                kvm_inject_gp(vcpu, 0);
index ad0139d..f1bb186 100644 (file)
@@ -44,7 +44,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
         * called from other contexts.
         */
        pagefault_disable();
-       ret = __copy_from_user_inatomic(to, from, n);
+       ret = raw_copy_from_user(to, from, n);
        pagefault_enable();
 
        return ret;
index f8220fd..829c140 100644 (file)
@@ -4,10 +4,12 @@ KCOV_INSTRUMENT_tlb.o                 := n
 KCOV_INSTRUMENT_mem_encrypt.o          := n
 KCOV_INSTRUMENT_mem_encrypt_amd.o      := n
 KCOV_INSTRUMENT_mem_encrypt_identity.o := n
+KCOV_INSTRUMENT_pgprot.o               := n
 
 KASAN_SANITIZE_mem_encrypt.o           := n
 KASAN_SANITIZE_mem_encrypt_amd.o       := n
 KASAN_SANITIZE_mem_encrypt_identity.o  := n
+KASAN_SANITIZE_pgprot.o                := n
 
 # Disable KCSAN entirely, because otherwise we get warnings that some functions
 # reference __initdata sections.
@@ -17,6 +19,7 @@ ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_mem_encrypt.o            = -pg
 CFLAGS_REMOVE_mem_encrypt_amd.o                = -pg
 CFLAGS_REMOVE_mem_encrypt_identity.o   = -pg
+CFLAGS_REMOVE_pgprot.o                 = -pg
 endif
 
 obj-y                          :=  init.o init_$(BITS).o fault.o ioremap.o extable.o mmap.o \
index 68fd2cf..f6e9f84 100644 (file)
@@ -6,10 +6,9 @@
 #include <asm/unistd.h>
 #include <sysdep/ptrace.h>
 
-typedef long syscall_handler_t(struct pt_regs);
+typedef long syscall_handler_t(struct syscall_args);
 
 extern syscall_handler_t *sys_call_table[];
 
 #define EXECUTE_SYSCALL(syscall, regs) \
-       ((long (*)(struct syscall_args)) \
-        (*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
+       ((*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
index ac8eee0..66162ea 100644 (file)
@@ -65,9 +65,6 @@ static int get_free_idx(struct task_struct* task)
        struct thread_struct *t = &task->thread;
        int idx;
 
-       if (!t->arch.tls_array)
-               return GDT_ENTRY_TLS_MIN;
-
        for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
                if (!t->arch.tls_array[idx].present)
                        return idx + GDT_ENTRY_TLS_MIN;
@@ -240,9 +237,6 @@ static int get_tls_entry(struct task_struct *task, struct user_desc *info,
 {
        struct thread_struct *t = &task->thread;
 
-       if (!t->arch.tls_array)
-               goto clear;
-
        if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
                return -EINVAL;
 
index 8c0396f..6fbe97c 100644 (file)
@@ -65,7 +65,7 @@ quiet_cmd_vdso = VDSO    $@
                       -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
                 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
 
-VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv
+VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv -z noexecstack
 GCOV_PROFILE := n
 
 #
index a0d1104..651057c 100644 (file)
@@ -295,7 +295,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 
        while (!blk_try_enter_queue(q, pm)) {
                if (flags & BLK_MQ_REQ_NOWAIT)
-                       return -EBUSY;
+                       return -EAGAIN;
 
                /*
                 * read pair of barrier in blk_freeze_queue_start(), we need to
@@ -325,7 +325,7 @@ int __bio_queue_enter(struct request_queue *q, struct bio *bio)
                        if (test_bit(GD_DEAD, &disk->state))
                                goto dead;
                        bio_wouldblock_error(bio);
-                       return -EBUSY;
+                       return -EAGAIN;
                }
 
                /*
index 67e6dbc..e59c306 100644 (file)
@@ -309,6 +309,11 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
        struct blk_plug plug;
        int ret = 0;
 
+       /* make sure that "len << SECTOR_SHIFT" doesn't overflow */
+       if (max_sectors > UINT_MAX >> SECTOR_SHIFT)
+               max_sectors = UINT_MAX >> SECTOR_SHIFT;
+       max_sectors &= ~bs_mask;
+
        if (max_sectors == 0)
                return -EOPNOTSUPP;
        if ((sector | nr_sects) & bs_mask)
@@ -322,10 +327,10 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
 
                bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp);
                bio->bi_iter.bi_sector = sector;
-               bio->bi_iter.bi_size = len;
+               bio->bi_iter.bi_size = len << SECTOR_SHIFT;
 
-               sector += len << SECTOR_SHIFT;
-               nr_sects -= len << SECTOR_SHIFT;
+               sector += len;
+               nr_sects -= len;
                if (!nr_sects) {
                        ret = submit_bio_wait(bio);
                        bio_put(bio);
index d36fabf..988ba52 100644 (file)
@@ -602,7 +602,6 @@ void del_gendisk(struct gendisk *disk)
         * Prevent new I/O from crossing bio_queue_enter().
         */
        blk_queue_start_drain(q);
-       blk_mq_freeze_queue_wait(q);
 
        if (!(disk->flags & GENHD_FL_HIDDEN)) {
                sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
@@ -626,6 +625,8 @@ void del_gendisk(struct gendisk *disk)
        pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
        device_del(disk_to_dev(disk));
 
+       blk_mq_freeze_queue_wait(q);
+
        blk_throtl_cancel_bios(disk->queue);
 
        blk_sync_queue(q);
index bf9b511..1f109b0 100644 (file)
@@ -43,7 +43,7 @@ config SYSTEM_TRUSTED_KEYRING
        bool "Provide system-wide ring of trusted keys"
        depends on KEYS
        depends on ASYMMETRIC_KEY_TYPE
-       depends on X509_CERTIFICATE_PARSER
+       depends on X509_CERTIFICATE_PARSER = y
        help
          Provide a system keyring to which trusted keys can be added.  Keys in
          the keyring are considered to be trusted.  Keys may be added at will
index 16a1663..9f40917 100644 (file)
@@ -531,10 +531,27 @@ static void wait_for_freeze(void)
        /* No delay is needed if we are in guest */
        if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
                return;
+       /*
+        * Modern (>=Nehalem) Intel systems use ACPI via intel_idle,
+        * not this code.  Assume that any Intel systems using this
+        * are ancient and may need the dummy wait.  This also assumes
+        * that the motivating chipset issue was Intel-only.
+        */
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return;
 #endif
-       /* Dummy wait op - must do something useless after P_LVL2 read
-          because chipsets cannot guarantee that STPCLK# signal
-          gets asserted in time to freeze execution properly. */
+       /*
+        * Dummy wait op - must do something useless after P_LVL2 read
+        * because chipsets cannot guarantee that STPCLK# signal gets
+        * asserted in time to freeze execution properly
+        *
+        * This workaround has been in place since the original ACPI
+        * implementation was merged, circa 2002.
+        *
+        * If a profile is pointing to this instruction, please first
+        * consider moving your system to a more modern idle
+        * mechanism.
+        */
        inl(acpi_gbl_FADT.xpm_timer_block.address);
 }
 
index 753e7cc..5fb4bc5 100644 (file)
@@ -1625,7 +1625,7 @@ static int __init fw_devlink_setup(char *arg)
 }
 early_param("fw_devlink", fw_devlink_setup);
 
-static bool fw_devlink_strict = true;
+static bool fw_devlink_strict;
 static int __init fw_devlink_strict_setup(char *arg)
 {
        return strtobool(arg, &fw_devlink_strict);
index 62c2b7a..4407203 100644 (file)
@@ -449,6 +449,9 @@ static int quad8_events_configure(struct counter_device *counter)
                        return -EINVAL;
                }
 
+               /* Enable IRQ line */
+               irq_enabled |= BIT(event_node->channel);
+
                /* Skip configuration if it is the same as previously set */
                if (priv->irq_trigger[event_node->channel] == next_irq_trigger)
                        continue;
@@ -462,9 +465,6 @@ static int quad8_events_configure(struct counter_device *counter)
                          priv->irq_trigger[event_node->channel] << 3;
                iowrite8(QUAD8_CTR_IOR | ior_cfg,
                         &priv->reg->channel[event_node->channel].control);
-
-               /* Enable IRQ line */
-               irq_enabled |= BIT(event_node->channel);
        }
 
        iowrite8(irq_enabled, &priv->reg->index_interrupt);
index cb6401c..acf31cc 100644 (file)
@@ -15,6 +15,7 @@ void hmem_register_device(int target_nid, struct resource *r)
                .start = r->start,
                .end = r->end,
                .flags = IORESOURCE_MEM,
+               .desc = IORES_DESC_SOFT_RESERVED,
        };
        struct platform_device *pdev;
        struct memregion_info info;
index d4f1e4e..85e0070 100644 (file)
@@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
        }
 
        pdev = of_find_device_by_node(udma_node);
+       if (np != udma_node)
+               of_node_put(udma_node);
+
        if (!pdev) {
                pr_debug("UDMA device not found\n");
                return ERR_PTR(-EPROBE_DEFER);
        }
 
-       if (np != udma_node)
-               of_node_put(udma_node);
-
        ud = platform_get_drvdata(pdev);
        if (!ud) {
                pr_debug("UDMA has not been probed\n");
index 6276934..8cd4e69 100644 (file)
@@ -3040,9 +3040,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
 
        /* Request and map I/O memory */
        xdev->regs = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(xdev->regs))
-               return PTR_ERR(xdev->regs);
-
+       if (IS_ERR(xdev->regs)) {
+               err = PTR_ERR(xdev->regs);
+               goto disable_clks;
+       }
        /* Retrieve the DMA engine properties from the device tree */
        xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
        xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
@@ -3070,7 +3071,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
                if (err < 0) {
                        dev_err(xdev->dev,
                                "missing xlnx,num-fstores property\n");
-                       return err;
+                       goto disable_clks;
                }
 
                err = of_property_read_u32(node, "xlnx,flush-fsync",
@@ -3090,7 +3091,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
                xdev->ext_addr = false;
 
        /* Set the dma mask bits */
-       dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+       err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+       if (err < 0) {
+               dev_err(xdev->dev, "DMA mask error %d\n", err);
+               goto disable_clks;
+       }
 
        /* Initialize the DMA engine */
        xdev->common.dev = &pdev->dev;
@@ -3137,7 +3142,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
        for_each_child_of_node(node, child) {
                err = xilinx_dma_child_probe(xdev, child);
                if (err < 0)
-                       goto disable_clks;
+                       goto error;
        }
 
        if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -3172,12 +3177,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
 
        return 0;
 
-disable_clks:
-       xdma_disable_allclks(xdev);
 error:
        for (i = 0; i < xdev->dma_config->max_channels; i++)
                if (xdev->chan[i])
                        xilinx_dma_chan_remove(xdev->chan[i]);
+disable_clks:
+       xdma_disable_allclks(xdev);
 
        return err;
 }
index dc299ab..3f4ee39 100644 (file)
@@ -849,7 +849,7 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
 
        zynqmp_dma_desc_config_eod(chan, desc);
        async_tx_ack(&first->async_tx);
-       first->async_tx.flags = flags;
+       first->async_tx.flags = (enum dma_ctrl_flags)flags;
        return &first->async_tx;
 }
 
index 3ed7ae0..96060bf 100644 (file)
@@ -450,9 +450,13 @@ static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
 static const struct scmi_clock_info *
 scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
 {
+       struct scmi_clock_info *clk;
        struct clock_info *ci = ph->get_priv(ph);
-       struct scmi_clock_info *clk = ci->clk + clk_id;
 
+       if (clk_id >= ci->num_clocks)
+               return NULL;
+
+       clk = ci->clk + clk_id;
        if (!clk->name[0])
                return NULL;
 
index 8abace5..f42dad9 100644 (file)
@@ -106,6 +106,7 @@ enum scmi_optee_pta_cmd {
  * @channel_id: OP-TEE channel ID used for this transport
  * @tee_session: TEE session identifier
  * @caps: OP-TEE SCMI channel capabilities
+ * @rx_len: Response size
  * @mu: Mutex protection on channel access
  * @cinfo: SCMI channel information
  * @shmem: Virtual base address of the shared memory
index 673f3eb..e9afa8c 100644 (file)
@@ -166,9 +166,13 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
        struct scmi_xfer *t;
        struct scmi_msg_reset_domain_reset *dom;
        struct scmi_reset_info *pi = ph->get_priv(ph);
-       struct reset_dom_info *rdom = pi->dom_info + domain;
+       struct reset_dom_info *rdom;
 
-       if (rdom->async_reset)
+       if (domain >= pi->num_domains)
+               return -EINVAL;
+
+       rdom = pi->dom_info + domain;
+       if (rdom->async_reset && flags & AUTONOMOUS_RESET)
                flags |= ASYNCHRONOUS_RESET;
 
        ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t);
@@ -180,7 +184,7 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
        dom->flags = cpu_to_le32(flags);
        dom->reset_state = cpu_to_le32(state);
 
-       if (rdom->async_reset)
+       if (flags & ASYNCHRONOUS_RESET)
                ret = ph->xops->do_xfer_with_response(ph, t);
        else
                ret = ph->xops->do_xfer(ph, t);
index d5dee62..0e05a79 100644 (file)
@@ -112,9 +112,28 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
        scmi_pd_data->domains = domains;
        scmi_pd_data->num_domains = num_domains;
 
+       dev_set_drvdata(dev, scmi_pd_data);
+
        return of_genpd_add_provider_onecell(np, scmi_pd_data);
 }
 
+static void scmi_pm_domain_remove(struct scmi_device *sdev)
+{
+       int i;
+       struct genpd_onecell_data *scmi_pd_data;
+       struct device *dev = &sdev->dev;
+       struct device_node *np = dev->of_node;
+
+       of_genpd_del_provider(np);
+
+       scmi_pd_data = dev_get_drvdata(dev);
+       for (i = 0; i < scmi_pd_data->num_domains; i++) {
+               if (!scmi_pd_data->domains[i])
+                       continue;
+               pm_genpd_remove(scmi_pd_data->domains[i]);
+       }
+}
+
 static const struct scmi_device_id scmi_id_table[] = {
        { SCMI_PROTOCOL_POWER, "genpd" },
        { },
@@ -124,6 +143,7 @@ MODULE_DEVICE_TABLE(scmi, scmi_id_table);
 static struct scmi_driver scmi_power_domain_driver = {
        .name = "scmi-power-domain",
        .probe = scmi_pm_domain_probe,
+       .remove = scmi_pm_domain_remove,
        .id_table = scmi_id_table,
 };
 module_scmi_driver(scmi_power_domain_driver);
index 7288c61..0b5853f 100644 (file)
@@ -762,6 +762,10 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
 {
        int ret;
        struct scmi_xfer *t;
+       struct sensors_info *si = ph->get_priv(ph);
+
+       if (sensor_id >= si->num_sensors)
+               return -EINVAL;
 
        ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_GET,
                                      sizeof(__le32), sizeof(__le32), &t);
@@ -771,7 +775,6 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
        put_unaligned_le32(sensor_id, t->tx.buf);
        ret = ph->xops->do_xfer(ph, t);
        if (!ret) {
-               struct sensors_info *si = ph->get_priv(ph);
                struct scmi_sensor_info *s = si->sensors + sensor_id;
 
                *sensor_config = get_unaligned_le64(t->rx.buf);
@@ -788,6 +791,10 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph,
        int ret;
        struct scmi_xfer *t;
        struct scmi_msg_sensor_config_set *msg;
+       struct sensors_info *si = ph->get_priv(ph);
+
+       if (sensor_id >= si->num_sensors)
+               return -EINVAL;
 
        ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_SET,
                                      sizeof(*msg), 0, &t);
@@ -800,7 +807,6 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph,
 
        ret = ph->xops->do_xfer(ph, t);
        if (!ret) {
-               struct sensors_info *si = ph->get_priv(ph);
                struct scmi_sensor_info *s = si->sensors + sensor_id;
 
                s->sensor_config = sensor_config;
@@ -831,8 +837,11 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
        int ret;
        struct scmi_xfer *t;
        struct scmi_msg_sensor_reading_get *sensor;
+       struct scmi_sensor_info *s;
        struct sensors_info *si = ph->get_priv(ph);
-       struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+       if (sensor_id >= si->num_sensors)
+               return -EINVAL;
 
        ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET,
                                      sizeof(*sensor), 0, &t);
@@ -841,6 +850,7 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
 
        sensor = t->tx.buf;
        sensor->id = cpu_to_le32(sensor_id);
+       s = si->sensors + sensor_id;
        if (s->async) {
                sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
                ret = ph->xops->do_xfer_with_response(ph, t);
@@ -895,9 +905,13 @@ scmi_sensor_reading_get_timestamped(const struct scmi_protocol_handle *ph,
        int ret;
        struct scmi_xfer *t;
        struct scmi_msg_sensor_reading_get *sensor;
+       struct scmi_sensor_info *s;
        struct sensors_info *si = ph->get_priv(ph);
-       struct scmi_sensor_info *s = si->sensors + sensor_id;
 
+       if (sensor_id >= si->num_sensors)
+               return -EINVAL;
+
+       s = si->sensors + sensor_id;
        if (!count || !readings ||
            (!s->num_axis && count > 1) || (s->num_axis && count > s->num_axis))
                return -EINVAL;
@@ -948,6 +962,9 @@ scmi_sensor_info_get(const struct scmi_protocol_handle *ph, u32 sensor_id)
 {
        struct sensors_info *si = ph->get_priv(ph);
 
+       if (sensor_id >= si->num_sensors)
+               return NULL;
+
        return si->sensors + sensor_id;
 }
 
index 8ced7af..4f9fb08 100644 (file)
@@ -48,6 +48,9 @@ static int efibc_reboot_notifier_call(struct notifier_block *notifier,
                return NOTIFY_DONE;
 
        wdata = kmalloc(MAX_DATA_LEN * sizeof(efi_char16_t), GFP_KERNEL);
+       if (!wdata)
+               return NOTIFY_DONE;
+
        for (l = 0; l < MAX_DATA_LEN - 1 && str[l] != '\0'; l++)
                wdata[l] = str[l];
        wdata[l] = L'\0';
index 8a18930..516f4f0 100644 (file)
@@ -14,7 +14,7 @@
 
 /* SHIM variables */
 static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
-static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
+static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT";
 
 static efi_status_t get_var(efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
                            unsigned long *data_size, void *data)
@@ -43,8 +43,8 @@ enum efi_secureboot_mode efi_get_secureboot(void)
 
        /*
         * See if a user has put the shim into insecure mode. If so, and if the
-        * variable doesn't have the runtime attribute set, we might as well
-        * honor that.
+        * variable doesn't have the non-volatile attribute set, we might as
+        * well honor that.
         */
        size = sizeof(moksbstate);
        status = get_efi_var(shim_MokSBState_name, &shim_guid,
@@ -53,7 +53,7 @@ enum efi_secureboot_mode efi_get_secureboot(void)
        /* If it fails, we don't care why. Default to secure */
        if (status != EFI_SUCCESS)
                goto secure_boot_enabled;
-       if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1)
+       if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1)
                return efi_secureboot_mode_disabled;
 
 secure_boot_enabled:
index 43ca665..7a7abc8 100644 (file)
@@ -516,6 +516,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
        hdr->ramdisk_image = 0;
        hdr->ramdisk_size = 0;
 
+       /*
+        * Disregard any setup data that was provided by the bootloader:
+        * setup_data could be pointing anywhere, and we have no way of
+        * authenticating or validating the payload.
+        */
+       hdr->setup_data = 0;
+
        efi_stub_entry(handle, sys_table_arg, boot_params);
        /* not reached */
 
index 72c677c..133e511 100644 (file)
@@ -148,10 +148,6 @@ static ssize_t flash_count_show(struct device *dev,
        stride = regmap_get_reg_stride(sec->m10bmc->regmap);
        num_bits = FLASH_COUNT_SIZE * 8;
 
-       flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
-       if (!flash_buf)
-               return -ENOMEM;
-
        if (FLASH_COUNT_SIZE % stride) {
                dev_err(sec->dev,
                        "FLASH_COUNT_SIZE (0x%x) not aligned to stride (0x%x)\n",
@@ -160,6 +156,10 @@ static ssize_t flash_count_show(struct device *dev,
                return -EINVAL;
        }
 
+       flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
+       if (!flash_buf)
+               return -ENOMEM;
+
        ret = regmap_bulk_read(sec->m10bmc->regmap, STAGING_FLASH_COUNT,
                               flash_buf, FLASH_COUNT_SIZE / stride);
        if (ret) {
index f422c3e..f77a965 100644 (file)
  * struct ftgpio_gpio - Gemini GPIO state container
  * @dev: containing device for this instance
  * @gc: gpiochip for this instance
- * @irq: irqchip for this instance
  * @base: remapped I/O-memory base
  * @clk: silicon clock
  */
 struct ftgpio_gpio {
        struct device *dev;
        struct gpio_chip gc;
-       struct irq_chip irq;
        void __iomem *base;
        struct clk *clk;
 };
@@ -70,6 +68,7 @@ static void ftgpio_gpio_mask_irq(struct irq_data *d)
        val = readl(g->base + GPIO_INT_EN);
        val &= ~BIT(irqd_to_hwirq(d));
        writel(val, g->base + GPIO_INT_EN);
+       gpiochip_disable_irq(gc, irqd_to_hwirq(d));
 }
 
 static void ftgpio_gpio_unmask_irq(struct irq_data *d)
@@ -78,6 +77,7 @@ static void ftgpio_gpio_unmask_irq(struct irq_data *d)
        struct ftgpio_gpio *g = gpiochip_get_data(gc);
        u32 val;
 
+       gpiochip_enable_irq(gc, irqd_to_hwirq(d));
        val = readl(g->base + GPIO_INT_EN);
        val |= BIT(irqd_to_hwirq(d));
        writel(val, g->base + GPIO_INT_EN);
@@ -221,6 +221,16 @@ static int ftgpio_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
        return 0;
 }
 
+static const struct irq_chip ftgpio_irq_chip = {
+       .name = "FTGPIO010",
+       .irq_ack = ftgpio_gpio_ack_irq,
+       .irq_mask = ftgpio_gpio_mask_irq,
+       .irq_unmask = ftgpio_gpio_unmask_irq,
+       .irq_set_type = ftgpio_gpio_set_irq_type,
+       .flags = IRQCHIP_IMMUTABLE,
+        GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
 static int ftgpio_gpio_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -277,14 +287,8 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
        if (!IS_ERR(g->clk))
                g->gc.set_config = ftgpio_gpio_set_config;
 
-       g->irq.name = "FTGPIO010";
-       g->irq.irq_ack = ftgpio_gpio_ack_irq;
-       g->irq.irq_mask = ftgpio_gpio_mask_irq;
-       g->irq.irq_unmask = ftgpio_gpio_unmask_irq;
-       g->irq.irq_set_type = ftgpio_gpio_set_irq_type;
-
        girq = &g->gc.irq;
-       girq->chip = &g->irq;
+       gpio_irq_chip_set_chip(girq, &ftgpio_irq_chip);
        girq->parent_handler = ftgpio_gpio_irq_handler;
        girq->num_parents = 1;
        girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
index 312309b..56656fb 100644 (file)
@@ -63,6 +63,14 @@ static void ixp4xx_gpio_irq_ack(struct irq_data *d)
        __raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS);
 }
 
+static void ixp4xx_gpio_mask_irq(struct irq_data *d)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+       irq_chip_mask_parent(d);
+       gpiochip_disable_irq(gc, d->hwirq);
+}
+
 static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -72,6 +80,7 @@ static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
        if (!(g->irq_edge & BIT(d->hwirq)))
                ixp4xx_gpio_irq_ack(d);
 
+       gpiochip_enable_irq(gc, d->hwirq);
        irq_chip_unmask_parent(d);
 }
 
@@ -149,12 +158,14 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
        return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
 }
 
-static struct irq_chip ixp4xx_gpio_irqchip = {
+static const struct irq_chip ixp4xx_gpio_irqchip = {
        .name = "IXP4GPIO",
        .irq_ack = ixp4xx_gpio_irq_ack,
-       .irq_mask = irq_chip_mask_parent,
+       .irq_mask = ixp4xx_gpio_mask_irq,
        .irq_unmask = ixp4xx_gpio_irq_unmask,
        .irq_set_type = ixp4xx_gpio_irq_set_type,
+       .flags = IRQCHIP_IMMUTABLE,
+       GPIOCHIP_IRQ_RESOURCE_HELPERS,
 };
 
 static int ixp4xx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
@@ -263,7 +274,7 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
        g->gc.owner = THIS_MODULE;
 
        girq = &g->gc.irq;
-       girq->chip = &ixp4xx_gpio_irqchip;
+       gpio_irq_chip_set_chip(girq, &ixp4xx_gpio_irqchip);
        girq->fwnode = g->fwnode;
        girq->parent_domain = parent;
        girq->child_to_parent_hwirq = ixp4xx_gpio_child_to_parent_hwirq;
index a2e505a..523dfd1 100644 (file)
@@ -533,8 +533,10 @@ static int __init gpio_mockup_register_chip(int idx)
        }
 
        fwnode = fwnode_create_software_node(properties, NULL);
-       if (IS_ERR(fwnode))
+       if (IS_ERR(fwnode)) {
+               kfree_strarray(line_names, ngpio);
                return PTR_ERR(fwnode);
+       }
 
        pdevinfo.name = "gpio-mockup";
        pdevinfo.id = idx;
@@ -597,9 +599,9 @@ static int __init gpio_mockup_init(void)
 
 static void __exit gpio_mockup_exit(void)
 {
+       gpio_mockup_unregister_pdevs();
        debugfs_remove_recursive(gpio_mockup_dbg_dir);
        platform_driver_unregister(&gpio_mockup_driver);
-       gpio_mockup_unregister_pdevs();
 }
 
 module_init(gpio_mockup_init);
index 1504982..3eb08cd 100644 (file)
@@ -169,6 +169,7 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
 
        switch (flow_type) {
        case IRQ_TYPE_EDGE_FALLING:
+       case IRQ_TYPE_LEVEL_LOW:
                raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
                gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
                        gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
index d8a26e5..f163f5c 100644 (file)
@@ -112,6 +112,8 @@ mediatek_gpio_irq_unmask(struct irq_data *d)
        unsigned long flags;
        u32 rise, fall, high, low;
 
+       gpiochip_enable_irq(gc, d->hwirq);
+
        spin_lock_irqsave(&rg->lock, flags);
        rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
        fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
@@ -143,6 +145,8 @@ mediatek_gpio_irq_mask(struct irq_data *d)
        mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
        mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
        spin_unlock_irqrestore(&rg->lock, flags);
+
+       gpiochip_disable_irq(gc, d->hwirq);
 }
 
 static int
@@ -204,6 +208,16 @@ mediatek_gpio_xlate(struct gpio_chip *chip,
        return gpio % MTK_BANK_WIDTH;
 }
 
+static const struct irq_chip mt7621_irq_chip = {
+       .name           = "mt7621-gpio",
+       .irq_mask_ack   = mediatek_gpio_irq_mask,
+       .irq_mask       = mediatek_gpio_irq_mask,
+       .irq_unmask     = mediatek_gpio_irq_unmask,
+       .irq_set_type   = mediatek_gpio_irq_type,
+       .flags          = IRQCHIP_IMMUTABLE,
+       GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
 static int
 mediatek_gpio_bank_probe(struct device *dev, int bank)
 {
@@ -238,11 +252,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
                return -ENOMEM;
 
        rg->chip.offset = bank * MTK_BANK_WIDTH;
-       rg->irq_chip.name = dev_name(dev);
-       rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
-       rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
-       rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
-       rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
 
        if (mtk->gpio_irq) {
                struct gpio_irq_chip *girq;
@@ -262,7 +271,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
                }
 
                girq = &rg->chip.irq;
-               girq->chip = &rg->irq_chip;
+               gpio_irq_chip_set_chip(girq, &mt7621_irq_chip);
                /* This will let us handle the parent IRQ in the driver */
                girq->parent_handler = NULL;
                girq->num_parents = 0;
index f91e876..bb50335 100644 (file)
@@ -419,11 +419,11 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
                        goto out;
                } else {
                        bank->toggle_edge_mode |= mask;
-                       level |= mask;
+                       level &= ~mask;
 
                        /*
                         * Determine gpio state. If 1 next interrupt should be
-                        * falling otherwise rising.
+                        * low otherwise high.
                         */
                        data = readl(bank->reg_base + bank->gpio_regs->ext_port);
                        if (data & mask)
index fa4bc74..e739dce 100644 (file)
@@ -307,6 +307,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
                girq->default_type = IRQ_TYPE_NONE;
                girq->handler = handle_simple_irq;
                girq->init_valid_mask = tqmx86_init_irq_valid_mask;
+
+               irq_domain_set_pm_device(girq->domain, dev);
        }
 
        ret = devm_gpiochip_add_data(dev, chip, gpio);
@@ -315,8 +317,6 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
                goto out_pm_dis;
        }
 
-       irq_domain_set_pm_device(girq->domain, dev);
-
        dev_info(dev, "GPIO functionality initialized with %d pins\n",
                 chip->ngpio);
 
index f8041d4..92f1855 100644 (file)
@@ -1986,7 +1986,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
                ret = -ENODEV;
                goto out_free_le;
        }
-       le->irq = irq;
 
        if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
                irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
@@ -2000,7 +1999,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
        init_waitqueue_head(&le->wait);
 
        /* Request a thread to read the events */
-       ret = request_threaded_irq(le->irq,
+       ret = request_threaded_irq(irq,
                                   lineevent_irq_handler,
                                   lineevent_irq_thread,
                                   irqflags,
@@ -2009,6 +2008,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
        if (ret)
                goto out_free_le;
 
+       le->irq = irq;
+
        fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
        if (fd < 0) {
                ret = fd;
index 1400abe..be7aff2 100644 (file)
@@ -2365,8 +2365,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
                }
                adev->ip_blocks[i].status.sw = true;
 
-               /* need to do gmc hw init early so we can allocate gpu mem */
-               if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+               if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
+                       /* need to do common hw init early so everything is set up for gmc */
+                       r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
+                       if (r) {
+                               DRM_ERROR("hw_init %d failed %d\n", i, r);
+                               goto init_failed;
+                       }
+                       adev->ip_blocks[i].status.hw = true;
+               } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+                       /* need to do gmc hw init early so we can allocate gpu mem */
                        /* Try to reserve bad pages early */
                        if (amdgpu_sriov_vf(adev))
                                amdgpu_virt_exchange_data(adev);
@@ -3052,8 +3060,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
        int i, r;
 
        static enum amd_ip_block_type ip_order[] = {
-               AMD_IP_BLOCK_TYPE_GMC,
                AMD_IP_BLOCK_TYPE_COMMON,
+               AMD_IP_BLOCK_TYPE_GMC,
                AMD_IP_BLOCK_TYPE_PSP,
                AMD_IP_BLOCK_TYPE_IH,
        };
index c20922a..23998f7 100644 (file)
@@ -38,6 +38,8 @@
 #include <linux/pci.h>
 #include <linux/pm_runtime.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_fb_helper.h>
@@ -498,6 +500,12 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
        .create_handle = drm_gem_fb_create_handle,
 };
 
+static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
+       .destroy = drm_gem_fb_destroy,
+       .create_handle = drm_gem_fb_create_handle,
+       .dirty = drm_atomic_helper_dirtyfb,
+};
+
 uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
                                          uint64_t bo_flags)
 {
@@ -1100,7 +1108,10 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
        if (ret)
                goto err;
 
-       ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+       if (drm_drv_uses_atomic_modeset(dev))
+               ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic);
+       else
+               ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
        if (ret)
                goto err;
 
index fe82b8b..0c54624 100644 (file)
@@ -181,6 +181,9 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
        for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
                if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
                        adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
+               /* zero sdma_hqd_mask for non-existent engine */
+               else if (adev->sdma.num_instances == 1)
+                       adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
                else
                        adev->mes.sdma_hqd_mask[i] = 0xfc;
        }
index 35738db..c9dec24 100644 (file)
@@ -756,7 +756,7 @@ static int psp_tmr_init(struct psp_context *psp)
        }
 
        pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
-       ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE(psp->adev),
+       ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
                                      AMDGPU_GEM_DOMAIN_VRAM,
                                      &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
 
index c32b74b..e593e8c 100644 (file)
@@ -36,6 +36,7 @@
 #define PSP_CMD_BUFFER_SIZE    0x1000
 #define PSP_1_MEG              0x100000
 #define PSP_TMR_SIZE(adev)     ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000)
+#define PSP_TMR_ALIGNMENT      0x100000
 #define PSP_FW_NAME_LEN                0x24
 
 enum psp_shared_mem_size {
index ff5361f..12c6f97 100644 (file)
@@ -1811,7 +1811,8 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
                amdgpu_ras_query_error_status(adev, &info);
 
                if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
-                   adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
+                   adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
+                   adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
                        if (amdgpu_ras_reset_error_status(adev, info.head.block))
                                dev_warn(adev->dev, "Failed to reset error counter and error status");
                }
index 59cac34..690fd4f 100644 (file)
@@ -2484,8 +2484,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
                /* Intentionally setting invalid PTE flag
                 * combination to force a no-retry-fault
                 */
-               flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE |
-                       AMDGPU_PTE_TF;
+               flags = AMDGPU_PTE_SNOOPED | AMDGPU_PTE_PRT;
                value = 0;
        } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
                /* Redirect the access to the dummy page */
index 4603653..67ca16a 100644 (file)
@@ -1103,10 +1103,13 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
                        *flags |= AMDGPU_PDE_BFS(0x9);
 
        } else if (level == AMDGPU_VM_PDB0) {
-               if (*flags & AMDGPU_PDE_PTE)
+               if (*flags & AMDGPU_PDE_PTE) {
                        *flags &= ~AMDGPU_PDE_PTE;
-               else
+                       if (!(*flags & AMDGPU_PTE_VALID))
+                               *addr |= 1 << PAGE_SHIFT;
+               } else {
                        *flags |= AMDGPU_PTE_TF;
+               }
        }
 }
 
index b465baa..aa761ff 100644 (file)
@@ -380,6 +380,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
                WREG32_PCIE(smnPCIE_LC_CNTL, data);
 }
 
+#ifdef CONFIG_PCIEASPM
 static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
 {
        uint32_t def, data;
@@ -401,9 +402,11 @@ static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
        if (def != data)
                WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
 }
+#endif
 
 static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
 {
+#ifdef CONFIG_PCIEASPM
        uint32_t def, data;
 
        def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
@@ -459,7 +462,10 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
        if (def != data)
                WREG32_PCIE(smnPCIE_LC_CNTL6, data);
 
-       nbio_v2_3_program_ltr(adev);
+       /* Don't bother about LTR if LTR is not enabled
+        * in the path */
+       if (adev->pdev->ltr_path)
+               nbio_v2_3_program_ltr(adev);
 
        def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
        data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -483,6 +489,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
        data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
        if (def != data)
                WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
 }
 
 static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev)
index f7f6dde..37615a7 100644 (file)
@@ -282,6 +282,7 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
                        mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
 }
 
+#ifdef CONFIG_PCIEASPM
 static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
 {
        uint32_t def, data;
@@ -303,9 +304,11 @@ static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
        if (def != data)
                WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
 }
+#endif
 
 static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
 {
+#ifdef CONFIG_PCIEASPM
        uint32_t def, data;
 
        def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
@@ -361,7 +364,10 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
        if (def != data)
                WREG32_PCIE(smnPCIE_LC_CNTL6, data);
 
-       nbio_v6_1_program_ltr(adev);
+       /* Don't bother about LTR if LTR is not enabled
+        * in the path */
+       if (adev->pdev->ltr_path)
+               nbio_v6_1_program_ltr(adev);
 
        def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
        data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -385,6 +391,7 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
        data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
        if (def != data)
                WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
 }
 
 const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
index 11848d1..19455a7 100644 (file)
@@ -673,6 +673,7 @@ struct amdgpu_nbio_ras nbio_v7_4_ras = {
 };
 
 
+#ifdef CONFIG_PCIEASPM
 static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
 {
        uint32_t def, data;
@@ -694,9 +695,11 @@ static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
        if (def != data)
                WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
 }
+#endif
 
 static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
 {
+#ifdef CONFIG_PCIEASPM
        uint32_t def, data;
 
        if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4))
@@ -755,7 +758,10 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
        if (def != data)
                WREG32_PCIE(smnPCIE_LC_CNTL6, data);
 
-       nbio_v7_4_program_ltr(adev);
+       /* Don't bother about LTR if LTR is not enabled
+        * in the path */
+       if (adev->pdev->ltr_path)
+               nbio_v7_4_program_ltr(adev);
 
        def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
        data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -779,6 +785,7 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
        data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
        if (def != data)
                WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
 }
 
 const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
index f30bc82..def8937 100644 (file)
 #include "nbio/nbio_7_7_0_sh_mask.h"
 #include <uapi/linux/kfd_ioctl.h>
 
+static void nbio_v7_7_remap_hdp_registers(struct amdgpu_device *adev)
+{
+       WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
+                    adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+       WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
+                    adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
+
 static u32 nbio_v7_7_get_rev_id(struct amdgpu_device *adev)
 {
        u32 tmp;
@@ -336,4 +344,5 @@ const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
        .get_clockgating_state = nbio_v7_7_get_clockgating_state,
        .ih_control = nbio_v7_7_ih_control,
        .init_registers = nbio_v7_7_init_registers,
+       .remap_hdp_registers = nbio_v7_7_remap_hdp_registers,
 };
index 65181ef..56424f7 100644 (file)
@@ -1504,6 +1504,11 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
                WREG32_SDMA(i, mmSDMA0_CNTL, temp);
 
                if (!amdgpu_sriov_vf(adev)) {
+                       ring = &adev->sdma.instance[i].ring;
+                       adev->nbio.funcs->sdma_doorbell_range(adev, i,
+                               ring->use_doorbell, ring->doorbell_index,
+                               adev->doorbell_index.sdma_doorbell_range);
+
                        /* unhalt engine */
                        temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
                        temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
index fde6154..183024d 100644 (file)
@@ -1211,25 +1211,6 @@ static int soc15_common_sw_fini(void *handle)
        return 0;
 }
 
-static void soc15_doorbell_range_init(struct amdgpu_device *adev)
-{
-       int i;
-       struct amdgpu_ring *ring;
-
-       /* sdma/ih doorbell range are programed by hypervisor */
-       if (!amdgpu_sriov_vf(adev)) {
-               for (i = 0; i < adev->sdma.num_instances; i++) {
-                       ring = &adev->sdma.instance[i].ring;
-                       adev->nbio.funcs->sdma_doorbell_range(adev, i,
-                               ring->use_doorbell, ring->doorbell_index,
-                               adev->doorbell_index.sdma_doorbell_range);
-               }
-
-               adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
-                                               adev->irq.ih.doorbell_index);
-       }
-}
-
 static int soc15_common_hw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1249,12 +1230,6 @@ static int soc15_common_hw_init(void *handle)
 
        /* enable the doorbell aperture */
        soc15_enable_doorbell_aperture(adev, true);
-       /* HW doorbell routing policy: doorbell writing not
-        * in SDMA/IH/MM/ACV range will be routed to CP. So
-        * we need to init SDMA/IH/MM/ACV doorbell range prior
-        * to CP ip block init and ring test.
-        */
-       soc15_doorbell_range_init(adev);
 
        return 0;
 }
index 55284b2..2e50db3 100644 (file)
@@ -421,6 +421,7 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
 {
        switch (adev->ip_versions[GC_HWIP][0]) {
        case IP_VERSION(11, 0, 0):
+               return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
        case IP_VERSION(11, 0, 2):
                return false;
        default:
index 03b7066..1e83db0 100644 (file)
@@ -289,6 +289,10 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
                }
        }
 
+       if (!amdgpu_sriov_vf(adev))
+               adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+                                                   adev->irq.ih.doorbell_index);
+
        pci_set_master(adev->pdev);
 
        /* enable interrupts */
index 2022ffb..59dfca0 100644 (file)
@@ -340,6 +340,10 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
                }
        }
 
+       if (!amdgpu_sriov_vf(adev))
+               adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+                                                   adev->irq.ih.doorbell_index);
+
        pci_set_master(adev->pdev);
 
        /* enable interrupts */
index 5140d9c..1efe7fa 100644 (file)
@@ -4759,7 +4759,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
        plane_info->visible = true;
        plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
 
-       plane_info->layer_index = 0;
+       plane_info->layer_index = plane_state->normalized_zpos;
 
        ret = fill_plane_color_attributes(plane_state, plane_info->format,
                                          &plane_info->color_space);
@@ -4827,7 +4827,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
        dc_plane_state->global_alpha = plane_info.global_alpha;
        dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
        dc_plane_state->dcc = plane_info.dcc;
-       dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
+       dc_plane_state->layer_index = plane_info.layer_index;
        dc_plane_state->flip_int_enabled = true;
 
        /*
@@ -9485,6 +9485,14 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                }
        }
 
+       /*
+        * DC consults the zpos (layer_index in DC terminology) to determine the
+        * hw plane on which to enable the hw cursor (see
+        * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
+        * atomic state, so call drm helper to normalize zpos.
+        */
+       drm_atomic_normalize_zpos(dev, state);
+
        /* Remove exiting planes if they are modified */
        for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
                ret = dm_update_plane_state(dc, state, plane,
index c09be3f..23a299c 100644 (file)
@@ -99,7 +99,7 @@ static int dcn31_get_active_display_cnt_wa(
        return display_count;
 }
 
-static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
 {
        struct dc *dc = clk_mgr_base->ctx->dc;
        int i;
@@ -110,9 +110,10 @@ static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
                if (pipe->top_pipe || pipe->prev_odm_pipe)
                        continue;
                if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
-                       if (disable)
+                       if (disable) {
                                pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
-                       else
+                               reset_sync_context_for_pipe(dc, context, i);
+                       } else
                                pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
                }
        }
@@ -211,11 +212,11 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
        }
 
        if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
-               dcn31_disable_otg_wa(clk_mgr_base, true);
+               dcn31_disable_otg_wa(clk_mgr_base, context, true);
 
                clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
                dcn31_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
-               dcn31_disable_otg_wa(clk_mgr_base, false);
+               dcn31_disable_otg_wa(clk_mgr_base, context, false);
 
                update_dispclk = true;
        }
index beb025c..8559dcd 100644 (file)
@@ -119,7 +119,7 @@ static int dcn314_get_active_display_cnt_wa(
        return display_count;
 }
 
-static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
 {
        struct dc *dc = clk_mgr_base->ctx->dc;
        int i;
@@ -129,11 +129,11 @@ static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
 
                if (pipe->top_pipe || pipe->prev_odm_pipe)
                        continue;
-               if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
-                                    dc_is_virtual_signal(pipe->stream->signal))) {
-                       if (disable)
+               if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+                       if (disable) {
                                pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
-                       else
+                               reset_sync_context_for_pipe(dc, context, i);
+                       } else
                                pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
                }
        }
@@ -233,11 +233,11 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
        }
 
        if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
-               dcn314_disable_otg_wa(clk_mgr_base, true);
+               dcn314_disable_otg_wa(clk_mgr_base, context, true);
 
                clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
                dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
-               dcn314_disable_otg_wa(clk_mgr_base, false);
+               dcn314_disable_otg_wa(clk_mgr_base, context, false);
 
                update_dispclk = true;
        }
@@ -670,6 +670,8 @@ static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *cl
        }
        ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
        bw_params->vram_type = bios_info->memory_type;
+
+       bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
        bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
 
        for (i = 0; i < WM_SET_COUNT; i++) {
index cc07662..98ad8e0 100644 (file)
@@ -46,6 +46,9 @@
 #define TO_CLK_MGR_DCN315(clk_mgr)\
        container_of(clk_mgr, struct clk_mgr_dcn315, base)
 
+#define UNSUPPORTED_DCFCLK 10000000
+#define MIN_DPP_DISP_CLK     100000
+
 static int dcn315_get_active_display_cnt_wa(
                struct dc *dc,
                struct dc_state *context)
@@ -79,7 +82,7 @@ static int dcn315_get_active_display_cnt_wa(
        return display_count;
 }
 
-static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
 {
        struct dc *dc = clk_mgr_base->ctx->dc;
        int i;
@@ -91,9 +94,10 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
                        continue;
                if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
                                     dc_is_virtual_signal(pipe->stream->signal))) {
-                       if (disable)
+                       if (disable) {
                                pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
-                       else
+                               reset_sync_context_for_pipe(dc, context, i);
+                       } else
                                pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
                }
        }
@@ -146,6 +150,9 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
                }
        }
 
+       /* Lock pstate by requesting unsupported dcfclk if change is unsupported */
+       if (!new_clocks->p_state_change_support)
+               new_clocks->dcfclk_khz = UNSUPPORTED_DCFCLK;
        if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
                clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
                dcn315_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
@@ -159,10 +166,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
 
        // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
        if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
-               if (new_clocks->dppclk_khz < 100000)
-                       new_clocks->dppclk_khz = 100000;
-               if (new_clocks->dispclk_khz < 100000)
-                       new_clocks->dispclk_khz = 100000;
+               if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK)
+                       new_clocks->dppclk_khz = MIN_DPP_DISP_CLK;
+               if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK)
+                       new_clocks->dispclk_khz = MIN_DPP_DISP_CLK;
        }
 
        if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
@@ -175,12 +182,12 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
        if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
                /* No need to apply the w/a if we haven't taken over from bios yet */
                if (clk_mgr_base->clks.dispclk_khz)
-                       dcn315_disable_otg_wa(clk_mgr_base, true);
+                       dcn315_disable_otg_wa(clk_mgr_base, context, true);
 
                clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
                dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
                if (clk_mgr_base->clks.dispclk_khz)
-                       dcn315_disable_otg_wa(clk_mgr_base, false);
+                       dcn315_disable_otg_wa(clk_mgr_base, context, false);
 
                update_dispclk = true;
        }
@@ -275,7 +282,7 @@ static struct wm_table ddr5_wm_table = {
                {
                        .wm_inst = WM_A,
                        .wm_type = WM_TYPE_PSTATE_CHG,
-                       .pstate_latency_us = 64.0,
+                       .pstate_latency_us = 129.0,
                        .sr_exit_time_us = 11.5,
                        .sr_enter_plus_exit_time_us = 14.5,
                        .valid = true,
@@ -283,7 +290,7 @@ static struct wm_table ddr5_wm_table = {
                {
                        .wm_inst = WM_B,
                        .wm_type = WM_TYPE_PSTATE_CHG,
-                       .pstate_latency_us = 64.0,
+                       .pstate_latency_us = 129.0,
                        .sr_exit_time_us = 11.5,
                        .sr_enter_plus_exit_time_us = 14.5,
                        .valid = true,
@@ -291,7 +298,7 @@ static struct wm_table ddr5_wm_table = {
                {
                        .wm_inst = WM_C,
                        .wm_type = WM_TYPE_PSTATE_CHG,
-                       .pstate_latency_us = 64.0,
+                       .pstate_latency_us = 129.0,
                        .sr_exit_time_us = 11.5,
                        .sr_enter_plus_exit_time_us = 14.5,
                        .valid = true,
@@ -299,7 +306,7 @@ static struct wm_table ddr5_wm_table = {
                {
                        .wm_inst = WM_D,
                        .wm_type = WM_TYPE_PSTATE_CHG,
-                       .pstate_latency_us = 64.0,
+                       .pstate_latency_us = 129.0,
                        .sr_exit_time_us = 11.5,
                        .sr_enter_plus_exit_time_us = 14.5,
                        .valid = true,
@@ -556,8 +563,7 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
        ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
        bw_params->vram_type = bios_info->memory_type;
        bw_params->num_channels = bios_info->ma_channel_number;
-       if (!bw_params->num_channels)
-               bw_params->num_channels = 2;
+       bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
 
        for (i = 0; i < WM_SET_COUNT; i++) {
                bw_params->wm_table.entries[i].wm_inst = i;
index 0cd3d2e..187f5b2 100644 (file)
@@ -112,7 +112,7 @@ static int dcn316_get_active_display_cnt_wa(
        return display_count;
 }
 
-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
 {
        struct dc *dc = clk_mgr_base->ctx->dc;
        int i;
@@ -124,9 +124,10 @@ static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
                        continue;
                if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
                                     dc_is_virtual_signal(pipe->stream->signal))) {
-                       if (disable)
+                       if (disable) {
                                pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
-                       else
+                               reset_sync_context_for_pipe(dc, context, i);
+                       } else
                                pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
                }
        }
@@ -221,11 +222,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
        }
 
        if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
-               dcn316_disable_otg_wa(clk_mgr_base, true);
+               dcn316_disable_otg_wa(clk_mgr_base, context, true);
 
                clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
                dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
-               dcn316_disable_otg_wa(clk_mgr_base, false);
+               dcn316_disable_otg_wa(clk_mgr_base, context, false);
 
                update_dispclk = true;
        }
index 48dad09..780f7f4 100644 (file)
@@ -2758,8 +2758,14 @@ bool perform_link_training_with_retries(
                                                skip_video_pattern);
 
                                /* Transmit idle pattern once training successful. */
-                               if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
+                               if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) {
                                        dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+                                       /* Update verified link settings to current one
+                                        * Because DPIA LT might fallback to lower link setting.
+                                        */
+                                       link->verified_link_cap.link_rate = link->cur_link_settings.link_rate;
+                                       link->verified_link_cap.lane_count = link->cur_link_settings.lane_count;
+                               }
                        } else {
                                status = dc_link_dp_perform_link_training(link,
                                                &pipe_ctx->link_res,
@@ -5121,6 +5127,14 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
                                lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
                                                                DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
 
+               /* If this chip cap is set, at least one retimer must exist in the chain
+                * Override count to 1 if we receive a known bad count (0 or an invalid value) */
+               if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
+                               (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
+                       ASSERT(0);
+                       link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
+               }
+
                /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
                is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
                                link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
index 7dbab15..ccf7bd3 100644 (file)
@@ -3584,6 +3584,23 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
        }
 }
 
+void reset_sync_context_for_pipe(const struct dc *dc,
+       struct dc_state *context,
+       uint8_t pipe_idx)
+{
+       int i;
+       struct pipe_ctx *pipe_ctx_reset;
+
+       /* reset the otg sync context for the pipe and its slave pipes if any */
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               pipe_ctx_reset = &context->res_ctx.pipe_ctx[i];
+
+               if (((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_reset) == pipe_idx) &&
+                       IS_PIPE_SYNCD_VALID(pipe_ctx_reset)) || (i == pipe_idx))
+                       SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_reset, i);
+       }
+}
+
 uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter)
 {
        /* TODO - get transmitter to phy idx mapping from DMUB */
index f62d509..0c85ab5 100644 (file)
@@ -329,7 +329,7 @@ bool dc_stream_set_cursor_attributes(
 
        dc = stream->ctx->dc;
 
-       if (attributes->height * attributes->width * 4 > 16384)
+       if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384)
                if (stream->mall_stream_config.type == SUBVP_MAIN)
                        return false;
 
index 5908b60..dbf8158 100644 (file)
@@ -745,6 +745,7 @@ struct dc_debug_options {
        bool disable_fixed_vs_aux_timeout_wa;
        bool force_disable_subvp;
        bool force_subvp_mclk_switch;
+       bool allow_sw_cursor_fallback;
        bool force_usr_allow;
        /* uses value at boot and disables switch */
        bool disable_dtb_ref_clk_switch;
index 09b3045..52a61b3 100644 (file)
@@ -417,44 +417,42 @@ static void populate_subvp_cmd_drr_info(struct dc *dc,
        struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
        struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
        struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
-       int16_t drr_frame_us = 0;
-       int16_t min_drr_supported_us = 0;
-       int16_t max_drr_supported_us = 0;
-       int16_t max_drr_vblank_us = 0;
-       int16_t max_drr_mallregion_us = 0;
-       int16_t mall_region_us = 0;
-       int16_t prefetch_us = 0;
-       int16_t subvp_active_us = 0;
-       int16_t drr_active_us = 0;
-       int16_t min_vtotal_supported = 0;
-       int16_t max_vtotal_supported = 0;
+       uint16_t drr_frame_us = 0;
+       uint16_t min_drr_supported_us = 0;
+       uint16_t max_drr_supported_us = 0;
+       uint16_t max_drr_vblank_us = 0;
+       uint16_t max_drr_mallregion_us = 0;
+       uint16_t mall_region_us = 0;
+       uint16_t prefetch_us = 0;
+       uint16_t subvp_active_us = 0;
+       uint16_t drr_active_us = 0;
+       uint16_t min_vtotal_supported = 0;
+       uint16_t max_vtotal_supported = 0;
 
        pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true;
        pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping
        pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now
 
-       drr_frame_us = div64_s64(drr_timing->v_total * drr_timing->h_total,
-                                (int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000);
+       drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000),
+                       (((uint64_t)drr_timing->pix_clk_100hz * 100)));
        // P-State allow width and FW delays already included phantom_timing->v_addressable
-       mall_region_us = div64_s64(phantom_timing->v_addressable * phantom_timing->h_total,
-                                  (int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000);
+       mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000),
+                       (((uint64_t)phantom_timing->pix_clk_100hz * 100)));
        min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
-       min_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 *
-                                        (div64_s64((int64_t)min_drr_supported_us, 1000000)),
-                                        (int64_t)drr_timing->h_total);
-
-       prefetch_us = div64_s64((phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total,
-                               (int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
-                               dc->caps.subvp_prefetch_end_to_mall_start_us);
-       subvp_active_us = div64_s64(main_timing->v_addressable * main_timing->h_total,
-                                   (int64_t)(main_timing->pix_clk_100hz * 100) * 1000000);
-       drr_active_us = div64_s64(drr_timing->v_addressable * drr_timing->h_total,
-                                 (int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000);
-       max_drr_vblank_us = div64_s64((int64_t)(subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us;
+       min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us),
+                       (((uint64_t)drr_timing->h_total * 1000000)));
+
+       prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000),
+                       (((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
+       subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000),
+                       (((uint64_t)main_timing->pix_clk_100hz * 100)));
+       drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000),
+                       (((uint64_t)drr_timing->pix_clk_100hz * 100)));
+       max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us;
        max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us;
        max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us;
-       max_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 * (div64_s64((int64_t)max_drr_supported_us, 1000000)),
-                                        (int64_t)drr_timing->h_total);
+       max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us),
+                       (((uint64_t)drr_timing->h_total * 1000000)));
 
        pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported;
        pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported;
@@ -548,10 +546,12 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
        struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing;
        struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
 
-       subvp0_prefetch_us = div64_s64((phantom_timing0->v_total - phantom_timing0->v_front_porch) * phantom_timing0->h_total,
-                                      (int64_t)(phantom_timing0->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us);
-       subvp1_prefetch_us = div64_s64((phantom_timing1->v_total - phantom_timing1->v_front_porch) * phantom_timing1->h_total,
-                                      (int64_t)(phantom_timing1->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us);
+       subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
+                       (uint64_t)phantom_timing0->h_total * 1000000),
+                       (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
+       subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) *
+                       (uint64_t)phantom_timing1->h_total * 1000000),
+                       (((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
 
        // Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time)
        // should increase it's prefetch time to match the other
@@ -559,16 +559,17 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
                pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1];
                prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us;
                pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
-                       div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) *
-                                  (phantom_timing1->pix_clk_100hz * 100) + phantom_timing1->h_total - 1),
-                                 (int64_t)phantom_timing1->h_total);
+                               div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
+                                       ((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)),
+                                       ((uint64_t)phantom_timing1->h_total * 1000000));
+
        } else if (subvp1_prefetch_us >  subvp0_prefetch_us) {
                pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0];
                prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us;
                pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
-                       div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) *
-                                  (phantom_timing0->pix_clk_100hz * 100) + phantom_timing0->h_total - 1),
-                                 (int64_t)phantom_timing0->h_total);
+                               div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
+                                       ((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)),
+                                       ((uint64_t)phantom_timing0->h_total * 1000000));
        }
 }
 
@@ -630,13 +631,11 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
 
        // Round up
        pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
-               div64_s64(((div64_s64((int64_t)dc->caps.subvp_prefetch_end_to_mall_start_us, 1000000)) *
-                          (phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1),
-                         (int64_t)phantom_timing->h_total);
+                       div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
+                                       ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
        pipe_data->pipe_config.subvp_data.processing_delay_lines =
-               div64_s64(((div64_s64((int64_t)dc->caps.subvp_fw_processing_delay_us, 1000000)) *
-                          (phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1),
-                         (int64_t)phantom_timing->h_total);
+                       div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
+                                       ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
        // Find phantom pipe index based on phantom stream
        for (j = 0; j < dc->res_pool->pipe_count; j++) {
                struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
index 38a6705..aea4933 100644 (file)
@@ -2164,7 +2164,8 @@ static void dce110_setup_audio_dto(
                        continue;
                if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
                        continue;
-               if (pipe_ctx->stream_res.audio != NULL) {
+               if (pipe_ctx->stream_res.audio != NULL &&
+                       pipe_ctx->stream_res.audio->enabled == false) {
                        struct audio_output audio_output;
 
                        build_audio_output(context, pipe_ctx, &audio_output);
@@ -2204,7 +2205,8 @@ static void dce110_setup_audio_dto(
                        if (!dc_is_dp_signal(pipe_ctx->stream->signal))
                                continue;
 
-                       if (pipe_ctx->stream_res.audio != NULL) {
+                       if (pipe_ctx->stream_res.audio != NULL &&
+                               pipe_ctx->stream_res.audio->enabled == false) {
                                struct audio_output audio_output;
 
                                build_audio_output(context, pipe_ctx, &audio_output);
index cd26711..7ce64a3 100644 (file)
        type DSCRM_DSC_FORWARD_EN; \
        type DSCRM_DSC_OPP_PIPE_SOURCE
 
-#define DSC_REG_LIST_DCN314(id) \
-       SRI(DSC_TOP_CONTROL, DSC_TOP, id),\
-       SRI(DSC_DEBUG_CONTROL, DSC_TOP, id),\
-       SRI(DSCC_CONFIG0, DSCC, id),\
-       SRI(DSCC_CONFIG1, DSCC, id),\
-       SRI(DSCC_STATUS, DSCC, id),\
-       SRI(DSCC_INTERRUPT_CONTROL_STATUS, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG0, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG1, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG2, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG3, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG4, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG5, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG6, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG7, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG8, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG9, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG10, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG11, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG12, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG13, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG14, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG15, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG16, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG17, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG18, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG19, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG20, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG21, DSCC, id),\
-       SRI(DSCC_PPS_CONFIG22, DSCC, id),\
-       SRI(DSCC_MEM_POWER_CONTROL, DSCC, id),\
-       SRI(DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC, id),\
-       SRI(DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC, id),\
-       SRI(DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC, id),\
-       SRI(DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC, id),\
-       SRI(DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC, id),\
-       SRI(DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC, id),\
-       SRI(DSCC_MAX_ABS_ERROR0, DSCC, id),\
-       SRI(DSCC_MAX_ABS_ERROR1, DSCC, id),\
-       SRI(DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\
-       SRI(DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
-       SRI(DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
-       SRI(DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
-       SRI(DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\
-       SRI(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
-       SRI(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
-       SRI(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
-       SRI(DSCCIF_CONFIG0, DSCCIF, id),\
-       SRI(DSCCIF_CONFIG1, DSCCIF, id),\
-       SRI(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id)
-
-#define DSC_REG_LIST_SH_MASK_DCN314(mask_sh)\
-       DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_CLOCK_EN, mask_sh), \
-       DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DISPCLK_R_GATE_DIS, mask_sh), \
-       DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DSCCLK_R_GATE_DIS, mask_sh), \
-       DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_DBG_EN, mask_sh), \
-       DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_TEST_CLOCK_MUX_SEL, mask_sh), \
-       DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_PER_LINE, mask_sh), \
-       DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \
-       DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, mask_sh), \
-       DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, mask_sh), \
-       /*DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh),*/ \
-       DSC_SF(DSCC0_DSCC_STATUS, DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
-       DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MINOR, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MAJOR, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG0, PPS_IDENTIFIER, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG0, LINEBUF_DEPTH, mask_sh), \
-       DSC2_SF(DSCC0, DSCC_PPS_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BITS_PER_PIXEL, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG1, VBR_ENABLE, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG1, SIMPLE_422, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CONVERT_RGB, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BLOCK_PRED_ENABLE, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_422, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_420, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CHUNK_SIZE, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_WIDTH, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_HEIGHT, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_WIDTH, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_HEIGHT, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_XMIT_DELAY, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_DEC_DELAY, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG5, INITIAL_SCALE_VALUE, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG5, SCALE_INCREMENT_INTERVAL, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SCALE_DECREMENT_INTERVAL, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG6, FIRST_LINE_BPG_OFFSET, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SECOND_LINE_BPG_OFFSET, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG7, NFL_BPG_OFFSET, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG8, NSL_BPG_OFFSET, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG8, SECOND_LINE_OFFSET_ADJ, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG9, INITIAL_OFFSET, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG9, FINAL_OFFSET, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MIN_QP, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MAX_QP, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG10, RC_MODEL_SIZE, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_EDGE_FACTOR, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT0, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT1, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_LO, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_HI, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH0, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH1, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH2, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH3, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH4, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH5, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH6, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH7, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH8, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH9, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH10, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH11, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH12, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH13, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MIN_QP0, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MAX_QP0, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_BPG_OFFSET0, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP1, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP1, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET1, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP2, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP2, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET2, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP3, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP3, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET3, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP4, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP4, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET4, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP5, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP5, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET5, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP6, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP6, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET6, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP7, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP7, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET7, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP8, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP8, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET8, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP9, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP9, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET9, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP10, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP10, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET10, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP11, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP11, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET11, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP12, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP12, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET12, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP13, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP13, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET13, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP14, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP14, mask_sh), \
-       DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET14, mask_sh), \
-       DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_DEFAULT_MEM_LOW_POWER_STATE, mask_sh), \
-       DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_FORCE, mask_sh), \
-       DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_DIS, mask_sh), \
-       DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_STATE, mask_sh), \
-       DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_FORCE, mask_sh), \
-       DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_DIS, mask_sh), \
-       DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_STATE, mask_sh), \
-       DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC_R_Y_SQUARED_ERROR_LOWER, mask_sh), \
-       DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC_R_Y_SQUARED_ERROR_UPPER, mask_sh), \
-       DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC_G_CB_SQUARED_ERROR_LOWER, mask_sh), \
-       DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC_G_CB_SQUARED_ERROR_UPPER, mask_sh), \
-       DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC_B_CR_SQUARED_ERROR_LOWER, mask_sh), \
-       DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC_B_CR_SQUARED_ERROR_UPPER, mask_sh), \
-       DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_R_Y_MAX_ABS_ERROR, mask_sh), \
-       DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_G_CB_MAX_ABS_ERROR, mask_sh), \
-       DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR1, DSCC_B_CR_MAX_ABS_ERROR, mask_sh), \
-       DSC_SF(DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \
-       DSC_SF(DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
-       DSC_SF(DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
-       DSC_SF(DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
-       DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \
-       DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
-       DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
-       DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
-       DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, mask_sh), \
-       DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
-       DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, mask_sh), \
-       DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_PIXEL_FORMAT, mask_sh), \
-       DSC2_SF(DSCCIF0, DSCCIF_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
-       DSC_SF(DSCCIF0_DSCCIF_CONFIG0, DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
-       DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_WIDTH, mask_sh), \
-       DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_HEIGHT, mask_sh), \
-       DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, mask_sh), \
-       DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_OPP_PIPE_SOURCE, mask_sh)
-
-
 struct dcn20_dsc_registers {
        uint32_t DSC_TOP_CONTROL;
        uint32_t DSC_DEBUG_CONTROL;
index 884fa06..598ce87 100644 (file)
@@ -1565,6 +1565,7 @@ static void dcn20_update_dchubp_dpp(
        /* Any updates are handled in dc interface, just need
         * to apply existing for plane enable / opp change */
        if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
+                       || pipe_ctx->update_flags.bits.plane_changed
                        || pipe_ctx->stream->update_flags.bits.gamut_remap
                        || pipe_ctx->stream->update_flags.bits.out_csc) {
                /* dpp/cm gamut remap*/
index e3351dd..06d8638 100644 (file)
@@ -67,8 +67,7 @@ static void enc314_disable_fifo(struct stream_encoder *enc)
 {
        struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 
-       REG_UPDATE_2(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0,
-                    DIG_FIFO_READ_START_LEVEL, 0);
+       REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0);
 }
 
 static void enc314_dp_set_odm_combine(
index 39931d4..f4d1b83 100644 (file)
@@ -343,7 +343,6 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
 {
        struct dc_stream_state *stream = pipe_ctx->stream;
        unsigned int odm_combine_factor = 0;
-       struct dc *dc = pipe_ctx->stream->ctx->dc;
        bool two_pix_per_container = false;
 
        two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
@@ -364,7 +363,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
                } else {
                        *k1_div = PIXEL_RATE_DIV_BY_1;
                        *k2_div = PIXEL_RATE_DIV_BY_4;
-                       if ((odm_combine_factor == 2) || dc->debug.enable_dp_dig_pixel_rate_div_policy)
+                       if (odm_combine_factor == 2)
                                *k2_div = PIXEL_RATE_DIV_BY_2;
                }
        }
@@ -384,21 +383,10 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
                return;
 
        odm_combine_factor = get_odm_config(pipe_ctx, NULL);
-       if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1
-               || dcn314_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
+       if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1)
                pix_per_cycle = 2;
 
        if (pipe_ctx->stream_res.stream_enc->funcs->set_input_mode)
                pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,
                                pix_per_cycle);
 }
-
-bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
-{
-       struct dc *dc = pipe_ctx->stream->ctx->dc;
-
-       if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
-               dc->debug.enable_dp_dig_pixel_rate_div_policy)
-               return true;
-       return false;
-}
index d014580..2442802 100644 (file)
@@ -41,6 +41,4 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
 
 void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
 
-bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
-
 #endif /* __DC_HWSS_DCN314_H__ */
index fcf67eb..72a563a 100644 (file)
@@ -146,7 +146,6 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
        .setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
        .calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values,
        .set_pixels_per_cycle = dcn314_set_pixels_per_cycle,
-       .is_dp_dig_pixel_rate_div_policy = dcn314_is_dp_dig_pixel_rate_div_policy,
 };
 
 void dcn314_hw_sequencer_construct(struct dc *dc)
index 2a2a4a9..44ac1c2 100644 (file)
@@ -87,6 +87,9 @@
 #define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT                0x10
 #define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK          0x01FF0000L
 
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT                   0x0
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK                     0x0000000FL
+
 #include "reg_helper.h"
 #include "dce/dmub_abm.h"
 #include "dce/dmub_psr.h"
@@ -579,7 +582,7 @@ static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
 
 #define dsc_regsDCN314(id)\
 [id] = {\
-       DSC_REG_LIST_DCN314(id)\
+       DSC_REG_LIST_DCN20(id)\
 }
 
 static const struct dcn20_dsc_registers dsc_regs[] = {
@@ -590,11 +593,11 @@ static const struct dcn20_dsc_registers dsc_regs[] = {
 };
 
 static const struct dcn20_dsc_shift dsc_shift = {
-       DSC_REG_LIST_SH_MASK_DCN314(__SHIFT)
+       DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
 };
 
 static const struct dcn20_dsc_mask dsc_mask = {
-       DSC_REG_LIST_SH_MASK_DCN314(_MASK)
+       DSC_REG_LIST_SH_MASK_DCN20(_MASK)
 };
 
 static const struct dcn30_mpc_registers mpc_regs = {
@@ -844,7 +847,7 @@ static const struct resource_caps res_cap_dcn314 = {
        .num_ddc = 5,
        .num_vmid = 16,
        .num_mpc_3dlut = 2,
-       .num_dsc = 4,
+       .num_dsc = 3,
 };
 
 static const struct dc_plane_cap plane_cap = {
index 6ec1c52..2038cbd 100644 (file)
@@ -103,6 +103,11 @@ void hubp32_cursor_set_attributes(
        enum cursor_lines_per_chunk lpc = hubp2_get_lines_per_chunk(
                        attr->width, attr->color_format);
 
+       //Round cursor width up to next multiple of 64
+       uint32_t cursor_width = ((attr->width + 63) / 64) * 64;
+       uint32_t cursor_height = attr->height;
+       uint32_t cursor_size = cursor_width * cursor_height;
+
        hubp->curs_attr = *attr;
 
        REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
@@ -126,7 +131,24 @@ void hubp32_cursor_set_attributes(
                         /* used to shift the cursor chunk request deadline */
                        CURSOR0_CHUNK_HDL_ADJUST, 3);
 
-       if (attr->width * attr->height * 4 > 16384)
+       switch (attr->color_format) {
+       case CURSOR_MODE_MONO:
+               cursor_size /= 2;
+               break;
+       case CURSOR_MODE_COLOR_1BIT_AND:
+       case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+       case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+               cursor_size *= 4;
+               break;
+
+       case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+       case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+       default:
+               cursor_size *= 8;
+               break;
+       }
+
+       if (cursor_size > 16384)
                REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, true);
        else
                REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false);
index 8d9d96c..344fe75 100644 (file)
@@ -741,7 +741,29 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
                struct hubp *hubp = pipe->plane_res.hubp;
 
                if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
-                       if (hubp->curs_attr.width * hubp->curs_attr.height * 4 > 16384)
+                       //Round cursor width up to next multiple of 64
+                       int cursor_width = ((hubp->curs_attr.width + 63) / 64) * 64;
+                       int cursor_height = hubp->curs_attr.height;
+                       int cursor_size = cursor_width * cursor_height;
+
+                       switch (hubp->curs_attr.color_format) {
+                       case CURSOR_MODE_MONO:
+                               cursor_size /= 2;
+                               break;
+                       case CURSOR_MODE_COLOR_1BIT_AND:
+                       case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+                       case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+                               cursor_size *= 4;
+                               break;
+
+                       case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+                       case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+                       default:
+                               cursor_size *= 8;
+                               break;
+                       }
+
+                       if (cursor_size > 16384)
                                cache_cursor = true;
 
                        if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
index 8b887b5..c3b783c 100644 (file)
@@ -871,6 +871,7 @@ static const struct dc_debug_options debug_defaults_drv = {
        .exit_idle_opt_for_cursor_updates = true,
        .enable_single_display_2to1_odm_policy = true,
        .enable_dp_dig_pixel_rate_div_policy = 1,
+       .allow_sw_cursor_fallback = false,
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
@@ -2039,7 +2040,8 @@ static bool dcn32_resource_construct(
        dc->caps.max_downscale_ratio = 600;
        dc->caps.i2c_speed_in_khz = 100;
        dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
-       dc->caps.max_cursor_size = 256;
+       /* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/
+       dc->caps.max_cursor_size = 64;
        dc->caps.min_horizontal_blanking_period = 80;
        dc->caps.dmdata_alloc_size = 2048;
        dc->caps.mall_size_per_mem_channel = 0;
index 1e7e620..cf15d0e 100644 (file)
@@ -30,6 +30,9 @@
 
 #define DCN3_2_DET_SEG_SIZE 64
 #define DCN3_2_MALL_MBLK_SIZE_BYTES 65536 // 64 * 1024
+#define DCN3_2_MBLK_WIDTH 128
+#define DCN3_2_MBLK_HEIGHT_4BPE 128
+#define DCN3_2_MBLK_HEIGHT_8BPE 64
 
 #define TO_DCN32_RES_POOL(pool)\
        container_of(pool, struct dcn32_resource_pool, base)
index ab918fe..1f195c5 100644 (file)
@@ -46,7 +46,6 @@
 uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_state *context)
 {
        uint32_t num_ways = 0;
-       uint32_t mall_region_pixels = 0;
        uint32_t bytes_per_pixel = 0;
        uint32_t cache_lines_used = 0;
        uint32_t lines_per_way = 0;
@@ -54,20 +53,64 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
        uint32_t bytes_in_mall = 0;
        uint32_t num_mblks = 0;
        uint32_t cache_lines_per_plane = 0;
-       uint32_t i = 0;
+       uint32_t i = 0, j = 0;
+       uint32_t mblk_width = 0;
+       uint32_t mblk_height = 0;
+       uint32_t full_vp_width_blk_aligned = 0;
+       uint32_t full_vp_height_blk_aligned = 0;
+       uint32_t mall_alloc_width_blk_aligned = 0;
+       uint32_t mall_alloc_height_blk_aligned = 0;
+       uint32_t full_vp_height = 0;
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
                // Find the phantom pipes
-               if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
+               if (pipe->stream && pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe &&
                                pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
-                       bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
-                       mall_region_pixels = pipe->plane_state->plane_size.surface_pitch * pipe->stream->timing.v_addressable;
+                       struct pipe_ctx *main_pipe = NULL;
+
+                       /* Get full viewport height from main pipe (required for MBLK calculation) */
+                       for (j = 0; j < dc->res_pool->pipe_count; j++) {
+                               main_pipe = &context->res_ctx.pipe_ctx[j];
+                               if (main_pipe->stream == pipe->stream->mall_stream_config.paired_stream) {
+                                       full_vp_height = main_pipe->plane_res.scl_data.viewport.height;
+                                       break;
+                               }
+                       }
 
-                       // For bytes required in MALL, calculate based on number of MBlks required
-                       num_mblks = (mall_region_pixels * bytes_per_pixel +
-                                       DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES;
+                       bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
+                       mblk_width = DCN3_2_MBLK_WIDTH;
+                       mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
+
+                       /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
+                        * FLOOR(vp_x_start, blk_width)
+                        */
+                       full_vp_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
+                                       pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) +
+                                       (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
+
+                       /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
+                        * FLOOR(vp_y_start, blk_height)
+                        */
+                       full_vp_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
+                                       full_vp_height + mblk_height - 1) / mblk_height * mblk_height) +
+                                       (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
+
+                       /* mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c */
+                       mall_alloc_width_blk_aligned = full_vp_width_blk_aligned;
+
+                       /* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */
+                       mall_alloc_height_blk_aligned = (pipe->stream->timing.v_addressable - 1 + mblk_height - 1) /
+                                       mblk_height * mblk_height + mblk_height;
+
+                       /* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c;
+                        * full_mblk_height_ub_l/c = mall_alloc_height_blk_aligned_l/c;
+                        * num_mblk_l/c = (full_mblk_width_ub_l/c / mblk_width_l/c) * (full_mblk_height_ub_l/c / mblk_height_l/c);
+                        * (Should be divisible, but round up if not)
+                        */
+                       num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
+                                       ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
                        bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
                        // cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
                        // (MALL is 64-byte aligned)
index c8b7d6f..7309eed 100644 (file)
@@ -872,6 +872,7 @@ static const struct dc_debug_options debug_defaults_drv = {
        .exit_idle_opt_for_cursor_updates = true,
        .enable_single_display_2to1_odm_policy = true,
        .enable_dp_dig_pixel_rate_div_policy = 1,
+       .allow_sw_cursor_fallback = false,
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
@@ -1651,7 +1652,8 @@ static bool dcn321_resource_construct(
        dc->caps.max_downscale_ratio = 600;
        dc->caps.i2c_speed_in_khz = 100;
        dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
-       dc->caps.max_cursor_size = 256;
+       /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/
+       dc->caps.max_cursor_size = 64;
        dc->caps.min_horizontal_blanking_period = 80;
        dc->caps.dmdata_alloc_size = 2048;
        dc->caps.mall_size_per_mem_channel = 0;
index 86a3b5b..cb81ed2 100644 (file)
@@ -70,6 +70,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/display_mode_vba_314.o := $(dml_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/display_rq_dlg_calc_314.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/dcn314_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags)
@@ -123,6 +125,7 @@ DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
 DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
 DML += dcn30/dcn30_fpu.o dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
 DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
+DML += dcn314/display_mode_vba_314.o dcn314/display_rq_dlg_calc_314.o
 DML += dcn32/display_mode_vba_32.o dcn32/display_rq_dlg_calc_32.o dcn32/display_mode_vba_util_32.o
 DML += dcn31/dcn31_fpu.o
 DML += dcn32/dcn32_fpu.o
index 876b321..1cb858d 100644 (file)
@@ -6610,8 +6610,7 @@ static double CalculateUrgentLatency(
        return ret;
 }
 
-
-static void UseMinimumDCFCLK(
+static noinline_for_stack void UseMinimumDCFCLK(
                struct display_mode_lib *mode_lib,
                int MaxInterDCNTileRepeaters,
                int MaxPrefetchMode,
index 149a1b1..fa7b029 100644 (file)
@@ -291,6 +291,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
        .do_urgent_latency_adjustment = false,
        .urgent_latency_adjustment_fabric_clock_component_us = 0,
        .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+       .num_chans = 4,
 };
 
 struct _vcs_dpi_ip_params_st dcn3_16_ip = {
@@ -680,7 +681,11 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
 
        dcn3_15_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
        dcn3_15_ip.max_num_dpp = dc->res_pool->pipe_count;
-       dcn3_15_soc.num_chans = bw_params->num_channels;
+
+       if (bw_params->num_channels > 0)
+               dcn3_15_soc.num_chans = bw_params->num_channels;
+       if (bw_params->dram_channel_width_bytes > 0)
+               dcn3_15_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes;
 
        ASSERT(clk_table->num_entries);
 
index d63b420..8ca66f1 100644 (file)
@@ -251,33 +251,13 @@ static void CalculateRowBandwidth(
 
 static void CalculateFlipSchedule(
                struct display_mode_lib *mode_lib,
+               unsigned int k,
                double HostVMInefficiencyFactor,
                double UrgentExtraLatency,
                double UrgentLatency,
-               unsigned int GPUVMMaxPageTableLevels,
-               bool HostVMEnable,
-               unsigned int HostVMMaxNonCachedPageTableLevels,
-               bool GPUVMEnable,
-               double HostVMMinPageSize,
                double PDEAndMetaPTEBytesPerFrame,
                double MetaRowBytes,
-               double DPTEBytesPerRow,
-               double BandwidthAvailableForImmediateFlip,
-               unsigned int TotImmediateFlipBytes,
-               enum source_format_class SourcePixelFormat,
-               double LineTime,
-               double VRatio,
-               double VRatioChroma,
-               double Tno_bw,
-               bool DCCEnable,
-               unsigned int dpte_row_height,
-               unsigned int meta_row_height,
-               unsigned int dpte_row_height_chroma,
-               unsigned int meta_row_height_chroma,
-               double *DestinationLinesToRequestVMInImmediateFlip,
-               double *DestinationLinesToRequestRowInImmediateFlip,
-               double *final_flip_bw,
-               bool *ImmediateFlipSupportedForPipe);
+               double DPTEBytesPerRow);
 static double CalculateWriteBackDelay(
                enum source_format_class WritebackPixelFormat,
                double WritebackHRatio,
@@ -311,64 +291,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
 static void CalculateWatermarksAndDRAMSpeedChangeSupport(
                struct display_mode_lib *mode_lib,
                unsigned int PrefetchMode,
-               unsigned int NumberOfActivePlanes,
-               unsigned int MaxLineBufferLines,
-               unsigned int LineBufferSize,
-               unsigned int WritebackInterfaceBufferSize,
                double DCFCLK,
                double ReturnBW,
-               bool SynchronizedVBlank,
-               unsigned int dpte_group_bytes[],
-               unsigned int MetaChunkSize,
                double UrgentLatency,
                double ExtraLatency,
-               double WritebackLatency,
-               double WritebackChunkSize,
                double SOCCLK,
-               double DRAMClockChangeLatency,
-               double SRExitTime,
-               double SREnterPlusExitTime,
-               double SRExitZ8Time,
-               double SREnterPlusExitZ8Time,
                double DCFCLKDeepSleep,
                unsigned int DETBufferSizeY[],
                unsigned int DETBufferSizeC[],
                unsigned int SwathHeightY[],
                unsigned int SwathHeightC[],
-               unsigned int LBBitPerPixel[],
                double SwathWidthY[],
                double SwathWidthC[],
-               double HRatio[],
-               double HRatioChroma[],
-               unsigned int vtaps[],
-               unsigned int VTAPsChroma[],
-               double VRatio[],
-               double VRatioChroma[],
-               unsigned int HTotal[],
-               double PixelClock[],
-               unsigned int BlendingAndTiming[],
                unsigned int DPPPerPlane[],
                double BytePerPixelDETY[],
                double BytePerPixelDETC[],
-               double DSTXAfterScaler[],
-               double DSTYAfterScaler[],
-               bool WritebackEnable[],
-               enum source_format_class WritebackPixelFormat[],
-               double WritebackDestinationWidth[],
-               double WritebackDestinationHeight[],
-               double WritebackSourceHeight[],
                bool UnboundedRequestEnabled,
                int unsigned CompressedBufferSizeInkByte,
                enum clock_change_support *DRAMClockChangeSupport,
-               double *UrgentWatermark,
-               double *WritebackUrgentWatermark,
-               double *DRAMClockChangeWatermark,
-               double *WritebackDRAMClockChangeWatermark,
                double *StutterExitWatermark,
                double *StutterEnterPlusExitWatermark,
                double *Z8StutterExitWatermark,
-               double *Z8StutterEnterPlusExitWatermark,
-               double *MinActiveDRAMClockChangeLatencySupported);
+               double *Z8StutterEnterPlusExitWatermark);
 
 static void CalculateDCFCLKDeepSleep(
                struct display_mode_lib *mode_lib,
@@ -2904,33 +2848,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
                        for (k = 0; k < v->NumberOfActivePlanes; ++k) {
                                CalculateFlipSchedule(
                                                mode_lib,
+                                               k,
                                                HostVMInefficiencyFactor,
                                                v->UrgentExtraLatency,
                                                v->UrgentLatency,
-                                               v->GPUVMMaxPageTableLevels,
-                                               v->HostVMEnable,
-                                               v->HostVMMaxNonCachedPageTableLevels,
-                                               v->GPUVMEnable,
-                                               v->HostVMMinPageSize,
                                                v->PDEAndMetaPTEBytesFrame[k],
                                                v->MetaRowByte[k],
-                                               v->PixelPTEBytesPerRow[k],
-                                               v->BandwidthAvailableForImmediateFlip,
-                                               v->TotImmediateFlipBytes,
-                                               v->SourcePixelFormat[k],
-                                               v->HTotal[k] / v->PixelClock[k],
-                                               v->VRatio[k],
-                                               v->VRatioChroma[k],
-                                               v->Tno_bw[k],
-                                               v->DCCEnable[k],
-                                               v->dpte_row_height[k],
-                                               v->meta_row_height[k],
-                                               v->dpte_row_height_chroma[k],
-                                               v->meta_row_height_chroma[k],
-                                               &v->DestinationLinesToRequestVMInImmediateFlip[k],
-                                               &v->DestinationLinesToRequestRowInImmediateFlip[k],
-                                               &v->final_flip_bw[k],
-                                               &v->ImmediateFlipSupportedForPipe[k]);
+                                               v->PixelPTEBytesPerRow[k]);
                        }
 
                        v->total_dcn_read_bw_with_flip = 0.0;
@@ -3017,64 +2941,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
                CalculateWatermarksAndDRAMSpeedChangeSupport(
                                mode_lib,
                                PrefetchMode,
-                               v->NumberOfActivePlanes,
-                               v->MaxLineBufferLines,
-                               v->LineBufferSize,
-                               v->WritebackInterfaceBufferSize,
                                v->DCFCLK,
                                v->ReturnBW,
-                               v->SynchronizedVBlank,
-                               v->dpte_group_bytes,
-                               v->MetaChunkSize,
                                v->UrgentLatency,
                                v->UrgentExtraLatency,
-                               v->WritebackLatency,
-                               v->WritebackChunkSize,
                                v->SOCCLK,
-                               v->DRAMClockChangeLatency,
-                               v->SRExitTime,
-                               v->SREnterPlusExitTime,
-                               v->SRExitZ8Time,
-                               v->SREnterPlusExitZ8Time,
                                v->DCFCLKDeepSleep,
                                v->DETBufferSizeY,
                                v->DETBufferSizeC,
                                v->SwathHeightY,
                                v->SwathHeightC,
-                               v->LBBitPerPixel,
                                v->SwathWidthY,
                                v->SwathWidthC,
-                               v->HRatio,
-                               v->HRatioChroma,
-                               v->vtaps,
-                               v->VTAPsChroma,
-                               v->VRatio,
-                               v->VRatioChroma,
-                               v->HTotal,
-                               v->PixelClock,
-                               v->BlendingAndTiming,
                                v->DPPPerPlane,
                                v->BytePerPixelDETY,
                                v->BytePerPixelDETC,
-                               v->DSTXAfterScaler,
-                               v->DSTYAfterScaler,
-                               v->WritebackEnable,
-                               v->WritebackPixelFormat,
-                               v->WritebackDestinationWidth,
-                               v->WritebackDestinationHeight,
-                               v->WritebackSourceHeight,
                                v->UnboundedRequestEnabled,
                                v->CompressedBufferSizeInkByte,
                                &DRAMClockChangeSupport,
-                               &v->UrgentWatermark,
-                               &v->WritebackUrgentWatermark,
-                               &v->DRAMClockChangeWatermark,
-                               &v->WritebackDRAMClockChangeWatermark,
                                &v->StutterExitWatermark,
                                &v->StutterEnterPlusExitWatermark,
                                &v->Z8StutterExitWatermark,
-                               &v->Z8StutterEnterPlusExitWatermark,
-                               &v->MinActiveDRAMClockChangeLatencySupported);
+                               &v->Z8StutterEnterPlusExitWatermark);
 
                for (k = 0; k < v->NumberOfActivePlanes; ++k) {
                        if (v->WritebackEnable[k] == true) {
@@ -3598,61 +3486,43 @@ static void CalculateRowBandwidth(
 
 static void CalculateFlipSchedule(
                struct display_mode_lib *mode_lib,
+               unsigned int k,
                double HostVMInefficiencyFactor,
                double UrgentExtraLatency,
                double UrgentLatency,
-               unsigned int GPUVMMaxPageTableLevels,
-               bool HostVMEnable,
-               unsigned int HostVMMaxNonCachedPageTableLevels,
-               bool GPUVMEnable,
-               double HostVMMinPageSize,
                double PDEAndMetaPTEBytesPerFrame,
                double MetaRowBytes,
-               double DPTEBytesPerRow,
-               double BandwidthAvailableForImmediateFlip,
-               unsigned int TotImmediateFlipBytes,
-               enum source_format_class SourcePixelFormat,
-               double LineTime,
-               double VRatio,
-               double VRatioChroma,
-               double Tno_bw,
-               bool DCCEnable,
-               unsigned int dpte_row_height,
-               unsigned int meta_row_height,
-               unsigned int dpte_row_height_chroma,
-               unsigned int meta_row_height_chroma,
-               double *DestinationLinesToRequestVMInImmediateFlip,
-               double *DestinationLinesToRequestRowInImmediateFlip,
-               double *final_flip_bw,
-               bool *ImmediateFlipSupportedForPipe)
+               double DPTEBytesPerRow)
 {
+       struct vba_vars_st *v = &mode_lib->vba;
        double min_row_time = 0.0;
        unsigned int HostVMDynamicLevelsTrips;
        double TimeForFetchingMetaPTEImmediateFlip;
        double TimeForFetchingRowInVBlankImmediateFlip;
        double ImmediateFlipBW;
+       double LineTime = v->HTotal[k] / v->PixelClock[k];
 
-       if (GPUVMEnable == true && HostVMEnable == true) {
-               HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+       if (v->GPUVMEnable == true && v->HostVMEnable == true) {
+               HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
        } else {
                HostVMDynamicLevelsTrips = 0;
        }
 
-       if (GPUVMEnable == true || DCCEnable == true) {
-               ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
+       if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
+               ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
        }
 
-       if (GPUVMEnable == true) {
+       if (v->GPUVMEnable == true) {
                TimeForFetchingMetaPTEImmediateFlip = dml_max3(
-                               Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
-                               UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
+                               v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
+                               UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
                                LineTime / 4.0);
        } else {
                TimeForFetchingMetaPTEImmediateFlip = 0;
        }
 
-       *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
-       if ((GPUVMEnable == true || DCCEnable == true)) {
+       v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
+       if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
                TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
                                (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
                                UrgentLatency * (HostVMDynamicLevelsTrips + 1),
@@ -3661,54 +3531,54 @@ static void CalculateFlipSchedule(
                TimeForFetchingRowInVBlankImmediateFlip = 0;
        }
 
-       *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
+       v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
 
-       if (GPUVMEnable == true) {
-               *final_flip_bw = dml_max(
-                               PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
-                               (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
-       } else if ((GPUVMEnable == true || DCCEnable == true)) {
-               *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
+       if (v->GPUVMEnable == true) {
+               v->final_flip_bw[k] = dml_max(
+                               PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
+                               (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
+       } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
+               v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
        } else {
-               *final_flip_bw = 0;
+               v->final_flip_bw[k] = 0;
        }
 
-       if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
-               if (GPUVMEnable == true && DCCEnable != true) {
-                       min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
-               } else if (GPUVMEnable != true && DCCEnable == true) {
-                       min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
+       if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
+               if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+                       min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
+               } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+                       min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
                } else {
                        min_row_time = dml_min4(
-                                       dpte_row_height * LineTime / VRatio,
-                                       meta_row_height * LineTime / VRatio,
-                                       dpte_row_height_chroma * LineTime / VRatioChroma,
-                                       meta_row_height_chroma * LineTime / VRatioChroma);
+                                       v->dpte_row_height[k] * LineTime / v->VRatio[k],
+                                       v->meta_row_height[k] * LineTime / v->VRatio[k],
+                                       v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
+                                       v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
                }
        } else {
-               if (GPUVMEnable == true && DCCEnable != true) {
-                       min_row_time = dpte_row_height * LineTime / VRatio;
-               } else if (GPUVMEnable != true && DCCEnable == true) {
-                       min_row_time = meta_row_height * LineTime / VRatio;
+               if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+                       min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
+               } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+                       min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
                } else {
-                       min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
+                       min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
                }
        }
 
-       if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
+       if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
                        || TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
-               *ImmediateFlipSupportedForPipe = false;
+               v->ImmediateFlipSupportedForPipe[k] = false;
        } else {
-               *ImmediateFlipSupportedForPipe = true;
+               v->ImmediateFlipSupportedForPipe[k] = true;
        }
 
 #ifdef __DML_VBA_DEBUG__
-       dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
-       dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
+       dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
+       dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
        dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
        dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
        dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
-       dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
+       dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
 #endif
 
 }
@@ -5300,33 +5170,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                                        for (k = 0; k < v->NumberOfActivePlanes; k++) {
                                                CalculateFlipSchedule(
                                                                mode_lib,
+                                                               k,
                                                                HostVMInefficiencyFactor,
                                                                v->ExtraLatency,
                                                                v->UrgLatency[i],
-                                                               v->GPUVMMaxPageTableLevels,
-                                                               v->HostVMEnable,
-                                                               v->HostVMMaxNonCachedPageTableLevels,
-                                                               v->GPUVMEnable,
-                                                               v->HostVMMinPageSize,
                                                                v->PDEAndMetaPTEBytesPerFrame[i][j][k],
                                                                v->MetaRowBytes[i][j][k],
-                                                               v->DPTEBytesPerRow[i][j][k],
-                                                               v->BandwidthAvailableForImmediateFlip,
-                                                               v->TotImmediateFlipBytes,
-                                                               v->SourcePixelFormat[k],
-                                                               v->HTotal[k] / v->PixelClock[k],
-                                                               v->VRatio[k],
-                                                               v->VRatioChroma[k],
-                                                               v->Tno_bw[k],
-                                                               v->DCCEnable[k],
-                                                               v->dpte_row_height[k],
-                                                               v->meta_row_height[k],
-                                                               v->dpte_row_height_chroma[k],
-                                                               v->meta_row_height_chroma[k],
-                                                               &v->DestinationLinesToRequestVMInImmediateFlip[k],
-                                                               &v->DestinationLinesToRequestRowInImmediateFlip[k],
-                                                               &v->final_flip_bw[k],
-                                                               &v->ImmediateFlipSupportedForPipe[k]);
+                                                               v->DPTEBytesPerRow[i][j][k]);
                                        }
                                        v->total_dcn_read_bw_with_flip = 0.0;
                                        for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -5384,64 +5234,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                        CalculateWatermarksAndDRAMSpeedChangeSupport(
                                        mode_lib,
                                        v->PrefetchModePerState[i][j],
-                                       v->NumberOfActivePlanes,
-                                       v->MaxLineBufferLines,
-                                       v->LineBufferSize,
-                                       v->WritebackInterfaceBufferSize,
                                        v->DCFCLKState[i][j],
                                        v->ReturnBWPerState[i][j],
-                                       v->SynchronizedVBlank,
-                                       v->dpte_group_bytes,
-                                       v->MetaChunkSize,
                                        v->UrgLatency[i],
                                        v->ExtraLatency,
-                                       v->WritebackLatency,
-                                       v->WritebackChunkSize,
                                        v->SOCCLKPerState[i],
-                                       v->DRAMClockChangeLatency,
-                                       v->SRExitTime,
-                                       v->SREnterPlusExitTime,
-                                       v->SRExitZ8Time,
-                                       v->SREnterPlusExitZ8Time,
                                        v->ProjectedDCFCLKDeepSleep[i][j],
                                        v->DETBufferSizeYThisState,
                                        v->DETBufferSizeCThisState,
                                        v->SwathHeightYThisState,
                                        v->SwathHeightCThisState,
-                                       v->LBBitPerPixel,
                                        v->SwathWidthYThisState,
                                        v->SwathWidthCThisState,
-                                       v->HRatio,
-                                       v->HRatioChroma,
-                                       v->vtaps,
-                                       v->VTAPsChroma,
-                                       v->VRatio,
-                                       v->VRatioChroma,
-                                       v->HTotal,
-                                       v->PixelClock,
-                                       v->BlendingAndTiming,
                                        v->NoOfDPPThisState,
                                        v->BytePerPixelInDETY,
                                        v->BytePerPixelInDETC,
-                                       v->DSTXAfterScaler,
-                                       v->DSTYAfterScaler,
-                                       v->WritebackEnable,
-                                       v->WritebackPixelFormat,
-                                       v->WritebackDestinationWidth,
-                                       v->WritebackDestinationHeight,
-                                       v->WritebackSourceHeight,
                                        UnboundedRequestEnabledThisState,
                                        CompressedBufferSizeInkByteThisState,
                                        &v->DRAMClockChangeSupport[i][j],
-                                       &v->UrgentWatermark,
-                                       &v->WritebackUrgentWatermark,
-                                       &v->DRAMClockChangeWatermark,
-                                       &v->WritebackDRAMClockChangeWatermark,
-                                       &dummy,
                                        &dummy,
                                        &dummy,
                                        &dummy,
-                                       &v->MinActiveDRAMClockChangeLatencySupported);
+                                       &dummy);
                }
        }
 
@@ -5566,64 +5380,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 static void CalculateWatermarksAndDRAMSpeedChangeSupport(
                struct display_mode_lib *mode_lib,
                unsigned int PrefetchMode,
-               unsigned int NumberOfActivePlanes,
-               unsigned int MaxLineBufferLines,
-               unsigned int LineBufferSize,
-               unsigned int WritebackInterfaceBufferSize,
                double DCFCLK,
                double ReturnBW,
-               bool SynchronizedVBlank,
-               unsigned int dpte_group_bytes[],
-               unsigned int MetaChunkSize,
                double UrgentLatency,
                double ExtraLatency,
-               double WritebackLatency,
-               double WritebackChunkSize,
                double SOCCLK,
-               double DRAMClockChangeLatency,
-               double SRExitTime,
-               double SREnterPlusExitTime,
-               double SRExitZ8Time,
-               double SREnterPlusExitZ8Time,
                double DCFCLKDeepSleep,
                unsigned int DETBufferSizeY[],
                unsigned int DETBufferSizeC[],
                unsigned int SwathHeightY[],
                unsigned int SwathHeightC[],
-               unsigned int LBBitPerPixel[],
                double SwathWidthY[],
                double SwathWidthC[],
-               double HRatio[],
-               double HRatioChroma[],
-               unsigned int vtaps[],
-               unsigned int VTAPsChroma[],
-               double VRatio[],
-               double VRatioChroma[],
-               unsigned int HTotal[],
-               double PixelClock[],
-               unsigned int BlendingAndTiming[],
                unsigned int DPPPerPlane[],
                double BytePerPixelDETY[],
                double BytePerPixelDETC[],
-               double DSTXAfterScaler[],
-               double DSTYAfterScaler[],
-               bool WritebackEnable[],
-               enum source_format_class WritebackPixelFormat[],
-               double WritebackDestinationWidth[],
-               double WritebackDestinationHeight[],
-               double WritebackSourceHeight[],
                bool UnboundedRequestEnabled,
                int unsigned CompressedBufferSizeInkByte,
                enum clock_change_support *DRAMClockChangeSupport,
-               double *UrgentWatermark,
-               double *WritebackUrgentWatermark,
-               double *DRAMClockChangeWatermark,
-               double *WritebackDRAMClockChangeWatermark,
                double *StutterExitWatermark,
                double *StutterEnterPlusExitWatermark,
                double *Z8StutterExitWatermark,
-               double *Z8StutterEnterPlusExitWatermark,
-               double *MinActiveDRAMClockChangeLatencySupported)
+               double *Z8StutterEnterPlusExitWatermark)
 {
        struct vba_vars_st *v = &mode_lib->vba;
        double EffectiveLBLatencyHidingY;
@@ -5643,103 +5421,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
        double TotalPixelBW = 0.0;
        int k, j;
 
-       *UrgentWatermark = UrgentLatency + ExtraLatency;
+       v->UrgentWatermark = UrgentLatency + ExtraLatency;
 
 #ifdef __DML_VBA_DEBUG__
        dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
        dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
-       dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
+       dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
 #endif
 
-       *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+       v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
 
 #ifdef __DML_VBA_DEBUG__
-       dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
-       dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
+       dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
+       dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
 #endif
 
        v->TotalActiveWriteback = 0;
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
-               if (WritebackEnable[k] == true) {
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+               if (v->WritebackEnable[k] == true) {
                        v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
                }
        }
 
        if (v->TotalActiveWriteback <= 1) {
-               *WritebackUrgentWatermark = WritebackLatency;
+               v->WritebackUrgentWatermark = v->WritebackLatency;
        } else {
-               *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+               v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
        }
 
        if (v->TotalActiveWriteback <= 1) {
-               *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+               v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
        } else {
-               *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+               v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
        }
 
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
                TotalPixelBW = TotalPixelBW
-                               + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
-                                               / (HTotal[k] / PixelClock[k]);
+                               + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
+                                               / (v->HTotal[k] / v->PixelClock[k]);
        }
 
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
                double EffectiveDETBufferSizeY = DETBufferSizeY[k];
 
                v->LBLatencyHidingSourceLinesY = dml_min(
-                               (double) MaxLineBufferLines,
-                               dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
+                               (double) v->MaxLineBufferLines,
+                               dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
 
                v->LBLatencyHidingSourceLinesC = dml_min(
-                               (double) MaxLineBufferLines,
-                               dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
+                               (double) v->MaxLineBufferLines,
+                               dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
 
-               EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
+               EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
 
-               EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+               EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
 
                if (UnboundedRequestEnabled) {
                        EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
-                                       + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+                                       + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
                }
 
                LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
                LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
-               FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+               FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
                if (BytePerPixelDETC[k] > 0) {
                        LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
                        LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
-                       FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
+                       FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
                } else {
                        LinesInDETC = 0;
                        FullDETBufferingTimeC = 999999;
                }
 
                ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
-                               - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+                               - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
 
-               if (NumberOfActivePlanes > 1) {
+               if (v->NumberOfActivePlanes > 1) {
                        ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
-                                       - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+                                       - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
                }
 
                if (BytePerPixelDETC[k] > 0) {
                        ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
-                                       - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+                                       - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
 
-                       if (NumberOfActivePlanes > 1) {
+                       if (v->NumberOfActivePlanes > 1) {
                                ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
-                                               - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
+                                               - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
                        }
                        v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
                } else {
                        v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
                }
 
-               if (WritebackEnable[k] == true) {
-                       WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
-                                       / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
-                       if (WritebackPixelFormat[k] == dm_444_64) {
+               if (v->WritebackEnable[k] == true) {
+                       WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+                                       / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+                       if (v->WritebackPixelFormat[k] == dm_444_64) {
                                WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
                        }
                        WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
@@ -5749,14 +5527,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
 
        v->MinActiveDRAMClockChangeMargin = 999999;
        PlaneWithMinActiveDRAMClockChangeMargin = 0;
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
                if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
                        v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
-                       if (BlendingAndTiming[k] == k) {
+                       if (v->BlendingAndTiming[k] == k) {
                                PlaneWithMinActiveDRAMClockChangeMargin = k;
                        } else {
-                               for (j = 0; j < NumberOfActivePlanes; ++j) {
-                                       if (BlendingAndTiming[k] == j) {
+                               for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+                                       if (v->BlendingAndTiming[k] == j) {
                                                PlaneWithMinActiveDRAMClockChangeMargin = j;
                                        }
                                }
@@ -5764,11 +5542,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
                }
        }
 
-       *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+       v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
 
        SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
-               if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+               if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
                                && v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
                        SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
                }
@@ -5776,25 +5554,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
 
        v->TotalNumberOfActiveOTG = 0;
 
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
-               if (BlendingAndTiming[k] == k) {
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+               if (v->BlendingAndTiming[k] == k) {
                        v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
                }
        }
 
        if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
                *DRAMClockChangeSupport = dm_dram_clock_change_vactive;
-       } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
+       } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
                        || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
                *DRAMClockChangeSupport = dm_dram_clock_change_vblank;
        } else {
                *DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
        }
 
-       *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
-       *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
-       *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
-       *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+       *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+       *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
+       *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+       *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
 
 #ifdef __DML_VBA_DEBUG__
        dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
index 34a5d0f..4bb3b31 100644 (file)
@@ -194,6 +194,9 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
                dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
                dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
 
+               if (bw_params->dram_channel_width_bytes > 0)
+                       dcn3_14_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes;
+
                if (bw_params->num_channels > 0)
                        dcn3_14_soc.num_chans = bw_params->num_channels;
 
@@ -262,7 +265,7 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
        }
 
        if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
-               dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31);
+               dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314);
        else
                dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
 }
index fc4d747..ee821c4 100644 (file)
@@ -61,7 +61,7 @@
 // fudge factor for min dcfclk calclation
 #define __DML_MIN_DCFCLK_FACTOR__   1.15
 
-struct {
+typedef struct {
        double DPPCLK;
        double DISPCLK;
        double PixelClock;
@@ -265,33 +265,13 @@ static void CalculateRowBandwidth(
 
 static void CalculateFlipSchedule(
                struct display_mode_lib *mode_lib,
+               unsigned int k,
                double HostVMInefficiencyFactor,
                double UrgentExtraLatency,
                double UrgentLatency,
-               unsigned int GPUVMMaxPageTableLevels,
-               bool HostVMEnable,
-               unsigned int HostVMMaxNonCachedPageTableLevels,
-               bool GPUVMEnable,
-               double HostVMMinPageSize,
                double PDEAndMetaPTEBytesPerFrame,
                double MetaRowBytes,
-               double DPTEBytesPerRow,
-               double BandwidthAvailableForImmediateFlip,
-               unsigned int TotImmediateFlipBytes,
-               enum source_format_class SourcePixelFormat,
-               double LineTime,
-               double VRatio,
-               double VRatioChroma,
-               double Tno_bw,
-               bool DCCEnable,
-               unsigned int dpte_row_height,
-               unsigned int meta_row_height,
-               unsigned int dpte_row_height_chroma,
-               unsigned int meta_row_height_chroma,
-               double *DestinationLinesToRequestVMInImmediateFlip,
-               double *DestinationLinesToRequestRowInImmediateFlip,
-               double *final_flip_bw,
-               bool *ImmediateFlipSupportedForPipe);
+               double DPTEBytesPerRow);
 static double CalculateWriteBackDelay(
                enum source_format_class WritebackPixelFormat,
                double WritebackHRatio,
@@ -325,64 +305,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
 static void CalculateWatermarksAndDRAMSpeedChangeSupport(
                struct display_mode_lib *mode_lib,
                unsigned int PrefetchMode,
-               unsigned int NumberOfActivePlanes,
-               unsigned int MaxLineBufferLines,
-               unsigned int LineBufferSize,
-               unsigned int WritebackInterfaceBufferSize,
                double DCFCLK,
                double ReturnBW,
-               bool SynchronizedVBlank,
-               unsigned int dpte_group_bytes[],
-               unsigned int MetaChunkSize,
                double UrgentLatency,
                double ExtraLatency,
-               double WritebackLatency,
-               double WritebackChunkSize,
                double SOCCLK,
-               double DRAMClockChangeLatency,
-               double SRExitTime,
-               double SREnterPlusExitTime,
-               double SRExitZ8Time,
-               double SREnterPlusExitZ8Time,
                double DCFCLKDeepSleep,
                unsigned int DETBufferSizeY[],
                unsigned int DETBufferSizeC[],
                unsigned int SwathHeightY[],
                unsigned int SwathHeightC[],
-               unsigned int LBBitPerPixel[],
                double SwathWidthY[],
                double SwathWidthC[],
-               double HRatio[],
-               double HRatioChroma[],
-               unsigned int vtaps[],
-               unsigned int VTAPsChroma[],
-               double VRatio[],
-               double VRatioChroma[],
-               unsigned int HTotal[],
-               double PixelClock[],
-               unsigned int BlendingAndTiming[],
                unsigned int DPPPerPlane[],
                double BytePerPixelDETY[],
                double BytePerPixelDETC[],
-               double DSTXAfterScaler[],
-               double DSTYAfterScaler[],
-               bool WritebackEnable[],
-               enum source_format_class WritebackPixelFormat[],
-               double WritebackDestinationWidth[],
-               double WritebackDestinationHeight[],
-               double WritebackSourceHeight[],
                bool UnboundedRequestEnabled,
                unsigned int CompressedBufferSizeInkByte,
                enum clock_change_support *DRAMClockChangeSupport,
-               double *UrgentWatermark,
-               double *WritebackUrgentWatermark,
-               double *DRAMClockChangeWatermark,
-               double *WritebackDRAMClockChangeWatermark,
                double *StutterExitWatermark,
                double *StutterEnterPlusExitWatermark,
                double *Z8StutterExitWatermark,
-               double *Z8StutterEnterPlusExitWatermark,
-               double *MinActiveDRAMClockChangeLatencySupported);
+               double *Z8StutterEnterPlusExitWatermark);
 
 static void CalculateDCFCLKDeepSleep(
                struct display_mode_lib *mode_lib,
@@ -1599,7 +1543,7 @@ static void CalculateDCCConfiguration(
        int segment_order_vert_contiguous_luma;
        int segment_order_vert_contiguous_chroma;
 
-       enum {
+       typedef enum {
                REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA
        } RequestType;
        RequestType RequestLuma;
@@ -2928,33 +2872,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
                        for (k = 0; k < v->NumberOfActivePlanes; ++k) {
                                CalculateFlipSchedule(
                                                mode_lib,
+                                               k,
                                                HostVMInefficiencyFactor,
                                                v->UrgentExtraLatency,
                                                v->UrgentLatency,
-                                               v->GPUVMMaxPageTableLevels,
-                                               v->HostVMEnable,
-                                               v->HostVMMaxNonCachedPageTableLevels,
-                                               v->GPUVMEnable,
-                                               v->HostVMMinPageSize,
                                                v->PDEAndMetaPTEBytesFrame[k],
                                                v->MetaRowByte[k],
-                                               v->PixelPTEBytesPerRow[k],
-                                               v->BandwidthAvailableForImmediateFlip,
-                                               v->TotImmediateFlipBytes,
-                                               v->SourcePixelFormat[k],
-                                               v->HTotal[k] / v->PixelClock[k],
-                                               v->VRatio[k],
-                                               v->VRatioChroma[k],
-                                               v->Tno_bw[k],
-                                               v->DCCEnable[k],
-                                               v->dpte_row_height[k],
-                                               v->meta_row_height[k],
-                                               v->dpte_row_height_chroma[k],
-                                               v->meta_row_height_chroma[k],
-                                               &v->DestinationLinesToRequestVMInImmediateFlip[k],
-                                               &v->DestinationLinesToRequestRowInImmediateFlip[k],
-                                               &v->final_flip_bw[k],
-                                               &v->ImmediateFlipSupportedForPipe[k]);
+                                               v->PixelPTEBytesPerRow[k]);
                        }
 
                        v->total_dcn_read_bw_with_flip = 0.0;
@@ -3041,64 +2965,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
                CalculateWatermarksAndDRAMSpeedChangeSupport(
                                mode_lib,
                                PrefetchMode,
-                               v->NumberOfActivePlanes,
-                               v->MaxLineBufferLines,
-                               v->LineBufferSize,
-                               v->WritebackInterfaceBufferSize,
                                v->DCFCLK,
                                v->ReturnBW,
-                               v->SynchronizedVBlank,
-                               v->dpte_group_bytes,
-                               v->MetaChunkSize,
                                v->UrgentLatency,
                                v->UrgentExtraLatency,
-                               v->WritebackLatency,
-                               v->WritebackChunkSize,
                                v->SOCCLK,
-                               v->DRAMClockChangeLatency,
-                               v->SRExitTime,
-                               v->SREnterPlusExitTime,
-                               v->SRExitZ8Time,
-                               v->SREnterPlusExitZ8Time,
                                v->DCFCLKDeepSleep,
                                v->DETBufferSizeY,
                                v->DETBufferSizeC,
                                v->SwathHeightY,
                                v->SwathHeightC,
-                               v->LBBitPerPixel,
                                v->SwathWidthY,
                                v->SwathWidthC,
-                               v->HRatio,
-                               v->HRatioChroma,
-                               v->vtaps,
-                               v->VTAPsChroma,
-                               v->VRatio,
-                               v->VRatioChroma,
-                               v->HTotal,
-                               v->PixelClock,
-                               v->BlendingAndTiming,
                                v->DPPPerPlane,
                                v->BytePerPixelDETY,
                                v->BytePerPixelDETC,
-                               v->DSTXAfterScaler,
-                               v->DSTYAfterScaler,
-                               v->WritebackEnable,
-                               v->WritebackPixelFormat,
-                               v->WritebackDestinationWidth,
-                               v->WritebackDestinationHeight,
-                               v->WritebackSourceHeight,
                                v->UnboundedRequestEnabled,
                                v->CompressedBufferSizeInkByte,
                                &DRAMClockChangeSupport,
-                               &v->UrgentWatermark,
-                               &v->WritebackUrgentWatermark,
-                               &v->DRAMClockChangeWatermark,
-                               &v->WritebackDRAMClockChangeWatermark,
                                &v->StutterExitWatermark,
                                &v->StutterEnterPlusExitWatermark,
                                &v->Z8StutterExitWatermark,
-                               &v->Z8StutterEnterPlusExitWatermark,
-                               &v->MinActiveDRAMClockChangeLatencySupported);
+                               &v->Z8StutterEnterPlusExitWatermark);
 
                for (k = 0; k < v->NumberOfActivePlanes; ++k) {
                        if (v->WritebackEnable[k] == true) {
@@ -3710,61 +3598,43 @@ static void CalculateRowBandwidth(
 
 static void CalculateFlipSchedule(
                struct display_mode_lib *mode_lib,
+               unsigned int k,
                double HostVMInefficiencyFactor,
                double UrgentExtraLatency,
                double UrgentLatency,
-               unsigned int GPUVMMaxPageTableLevels,
-               bool HostVMEnable,
-               unsigned int HostVMMaxNonCachedPageTableLevels,
-               bool GPUVMEnable,
-               double HostVMMinPageSize,
                double PDEAndMetaPTEBytesPerFrame,
                double MetaRowBytes,
-               double DPTEBytesPerRow,
-               double BandwidthAvailableForImmediateFlip,
-               unsigned int TotImmediateFlipBytes,
-               enum source_format_class SourcePixelFormat,
-               double LineTime,
-               double VRatio,
-               double VRatioChroma,
-               double Tno_bw,
-               bool DCCEnable,
-               unsigned int dpte_row_height,
-               unsigned int meta_row_height,
-               unsigned int dpte_row_height_chroma,
-               unsigned int meta_row_height_chroma,
-               double *DestinationLinesToRequestVMInImmediateFlip,
-               double *DestinationLinesToRequestRowInImmediateFlip,
-               double *final_flip_bw,
-               bool *ImmediateFlipSupportedForPipe)
+               double DPTEBytesPerRow)
 {
+       struct vba_vars_st *v = &mode_lib->vba;
        double min_row_time = 0.0;
        unsigned int HostVMDynamicLevelsTrips;
        double TimeForFetchingMetaPTEImmediateFlip;
        double TimeForFetchingRowInVBlankImmediateFlip;
        double ImmediateFlipBW;
+       double LineTime = v->HTotal[k] / v->PixelClock[k];
 
-       if (GPUVMEnable == true && HostVMEnable == true) {
-               HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+       if (v->GPUVMEnable == true && v->HostVMEnable == true) {
+               HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
        } else {
                HostVMDynamicLevelsTrips = 0;
        }
 
-       if (GPUVMEnable == true || DCCEnable == true) {
-               ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
+       if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
+               ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
        }
 
-       if (GPUVMEnable == true) {
+       if (v->GPUVMEnable == true) {
                TimeForFetchingMetaPTEImmediateFlip = dml_max3(
-                               Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
-                               UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
+                               v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
+                               UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
                                LineTime / 4.0);
        } else {
                TimeForFetchingMetaPTEImmediateFlip = 0;
        }
 
-       *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
-       if ((GPUVMEnable == true || DCCEnable == true)) {
+       v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
+       if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
                TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
                                (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
                                UrgentLatency * (HostVMDynamicLevelsTrips + 1),
@@ -3773,54 +3643,54 @@ static void CalculateFlipSchedule(
                TimeForFetchingRowInVBlankImmediateFlip = 0;
        }
 
-       *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
+       v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
 
-       if (GPUVMEnable == true) {
-               *final_flip_bw = dml_max(
-                               PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
-                               (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
-       } else if ((GPUVMEnable == true || DCCEnable == true)) {
-               *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
+       if (v->GPUVMEnable == true) {
+               v->final_flip_bw[k] = dml_max(
+                               PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
+                               (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
+       } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
+               v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
        } else {
-               *final_flip_bw = 0;
+               v->final_flip_bw[k] = 0;
        }
 
-       if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
-               if (GPUVMEnable == true && DCCEnable != true) {
-                       min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
-               } else if (GPUVMEnable != true && DCCEnable == true) {
-                       min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
+       if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
+               if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+                       min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
+               } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+                       min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
                } else {
                        min_row_time = dml_min4(
-                                       dpte_row_height * LineTime / VRatio,
-                                       meta_row_height * LineTime / VRatio,
-                                       dpte_row_height_chroma * LineTime / VRatioChroma,
-                                       meta_row_height_chroma * LineTime / VRatioChroma);
+                                       v->dpte_row_height[k] * LineTime / v->VRatio[k],
+                                       v->meta_row_height[k] * LineTime / v->VRatio[k],
+                                       v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
+                                       v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
                }
        } else {
-               if (GPUVMEnable == true && DCCEnable != true) {
-                       min_row_time = dpte_row_height * LineTime / VRatio;
-               } else if (GPUVMEnable != true && DCCEnable == true) {
-                       min_row_time = meta_row_height * LineTime / VRatio;
+               if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+                       min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
+               } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+                       min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
                } else {
-                       min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
+                       min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
                }
        }
 
-       if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
+       if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
                        || TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
-               *ImmediateFlipSupportedForPipe = false;
+               v->ImmediateFlipSupportedForPipe[k] = false;
        } else {
-               *ImmediateFlipSupportedForPipe = true;
+               v->ImmediateFlipSupportedForPipe[k] = true;
        }
 
 #ifdef __DML_VBA_DEBUG__
-       dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
-       dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
+       dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
+       dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
        dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
        dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
        dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
-       dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
+       dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
 #endif
 
 }
@@ -4071,9 +3941,7 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
 
        v->SourceFormatPixelAndScanSupport = true;
        for (k = 0; k < v->NumberOfActivePlanes; k++) {
-               if ((v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true))
-                               || ((v->SurfaceTiling[k] == dm_sw_64kb_d || v->SurfaceTiling[k] == dm_sw_64kb_d_t
-                                               || v->SurfaceTiling[k] == dm_sw_64kb_d_x) && !(v->SourcePixelFormat[k] == dm_444_64))) {
+               if (v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true)) {
                        v->SourceFormatPixelAndScanSupport = false;
                }
        }
@@ -5414,33 +5282,13 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
                                        for (k = 0; k < v->NumberOfActivePlanes; k++) {
                                                CalculateFlipSchedule(
                                                                mode_lib,
+                                                               k,
                                                                HostVMInefficiencyFactor,
                                                                v->ExtraLatency,
                                                                v->UrgLatency[i],
-                                                               v->GPUVMMaxPageTableLevels,
-                                                               v->HostVMEnable,
-                                                               v->HostVMMaxNonCachedPageTableLevels,
-                                                               v->GPUVMEnable,
-                                                               v->HostVMMinPageSize,
                                                                v->PDEAndMetaPTEBytesPerFrame[i][j][k],
                                                                v->MetaRowBytes[i][j][k],
-                                                               v->DPTEBytesPerRow[i][j][k],
-                                                               v->BandwidthAvailableForImmediateFlip,
-                                                               v->TotImmediateFlipBytes,
-                                                               v->SourcePixelFormat[k],
-                                                               v->HTotal[k] / v->PixelClock[k],
-                                                               v->VRatio[k],
-                                                               v->VRatioChroma[k],
-                                                               v->Tno_bw[k],
-                                                               v->DCCEnable[k],
-                                                               v->dpte_row_height[k],
-                                                               v->meta_row_height[k],
-                                                               v->dpte_row_height_chroma[k],
-                                                               v->meta_row_height_chroma[k],
-                                                               &v->DestinationLinesToRequestVMInImmediateFlip[k],
-                                                               &v->DestinationLinesToRequestRowInImmediateFlip[k],
-                                                               &v->final_flip_bw[k],
-                                                               &v->ImmediateFlipSupportedForPipe[k]);
+                                                               v->DPTEBytesPerRow[i][j][k]);
                                        }
                                        v->total_dcn_read_bw_with_flip = 0.0;
                                        for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -5498,64 +5346,28 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
                        CalculateWatermarksAndDRAMSpeedChangeSupport(
                                        mode_lib,
                                        v->PrefetchModePerState[i][j],
-                                       v->NumberOfActivePlanes,
-                                       v->MaxLineBufferLines,
-                                       v->LineBufferSize,
-                                       v->WritebackInterfaceBufferSize,
                                        v->DCFCLKState[i][j],
                                        v->ReturnBWPerState[i][j],
-                                       v->SynchronizedVBlank,
-                                       v->dpte_group_bytes,
-                                       v->MetaChunkSize,
                                        v->UrgLatency[i],
                                        v->ExtraLatency,
-                                       v->WritebackLatency,
-                                       v->WritebackChunkSize,
                                        v->SOCCLKPerState[i],
-                                       v->DRAMClockChangeLatency,
-                                       v->SRExitTime,
-                                       v->SREnterPlusExitTime,
-                                       v->SRExitZ8Time,
-                                       v->SREnterPlusExitZ8Time,
                                        v->ProjectedDCFCLKDeepSleep[i][j],
                                        v->DETBufferSizeYThisState,
                                        v->DETBufferSizeCThisState,
                                        v->SwathHeightYThisState,
                                        v->SwathHeightCThisState,
-                                       v->LBBitPerPixel,
                                        v->SwathWidthYThisState,
                                        v->SwathWidthCThisState,
-                                       v->HRatio,
-                                       v->HRatioChroma,
-                                       v->vtaps,
-                                       v->VTAPsChroma,
-                                       v->VRatio,
-                                       v->VRatioChroma,
-                                       v->HTotal,
-                                       v->PixelClock,
-                                       v->BlendingAndTiming,
                                        v->NoOfDPPThisState,
                                        v->BytePerPixelInDETY,
                                        v->BytePerPixelInDETC,
-                                       v->DSTXAfterScaler,
-                                       v->DSTYAfterScaler,
-                                       v->WritebackEnable,
-                                       v->WritebackPixelFormat,
-                                       v->WritebackDestinationWidth,
-                                       v->WritebackDestinationHeight,
-                                       v->WritebackSourceHeight,
                                        UnboundedRequestEnabledThisState,
                                        CompressedBufferSizeInkByteThisState,
                                        &v->DRAMClockChangeSupport[i][j],
-                                       &v->UrgentWatermark,
-                                       &v->WritebackUrgentWatermark,
-                                       &v->DRAMClockChangeWatermark,
-                                       &v->WritebackDRAMClockChangeWatermark,
-                                       &dummy,
                                        &dummy,
                                        &dummy,
                                        &dummy,
-                                       &v->MinActiveDRAMClockChangeLatencySupported);
+                                       &dummy);
                }
        }
 
@@ -5681,64 +5493,28 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
 static void CalculateWatermarksAndDRAMSpeedChangeSupport(
                struct display_mode_lib *mode_lib,
                unsigned int PrefetchMode,
-               unsigned int NumberOfActivePlanes,
-               unsigned int MaxLineBufferLines,
-               unsigned int LineBufferSize,
-               unsigned int WritebackInterfaceBufferSize,
                double DCFCLK,
                double ReturnBW,
-               bool SynchronizedVBlank,
-               unsigned int dpte_group_bytes[],
-               unsigned int MetaChunkSize,
                double UrgentLatency,
                double ExtraLatency,
-               double WritebackLatency,
-               double WritebackChunkSize,
                double SOCCLK,
-               double DRAMClockChangeLatency,
-               double SRExitTime,
-               double SREnterPlusExitTime,
-               double SRExitZ8Time,
-               double SREnterPlusExitZ8Time,
                double DCFCLKDeepSleep,
                unsigned int DETBufferSizeY[],
                unsigned int DETBufferSizeC[],
                unsigned int SwathHeightY[],
                unsigned int SwathHeightC[],
-               unsigned int LBBitPerPixel[],
                double SwathWidthY[],
                double SwathWidthC[],
-               double HRatio[],
-               double HRatioChroma[],
-               unsigned int vtaps[],
-               unsigned int VTAPsChroma[],
-               double VRatio[],
-               double VRatioChroma[],
-               unsigned int HTotal[],
-               double PixelClock[],
-               unsigned int BlendingAndTiming[],
                unsigned int DPPPerPlane[],
                double BytePerPixelDETY[],
                double BytePerPixelDETC[],
-               double DSTXAfterScaler[],
-               double DSTYAfterScaler[],
-               bool WritebackEnable[],
-               enum source_format_class WritebackPixelFormat[],
-               double WritebackDestinationWidth[],
-               double WritebackDestinationHeight[],
-               double WritebackSourceHeight[],
                bool UnboundedRequestEnabled,
                unsigned int CompressedBufferSizeInkByte,
                enum clock_change_support *DRAMClockChangeSupport,
-               double *UrgentWatermark,
-               double *WritebackUrgentWatermark,
-               double *DRAMClockChangeWatermark,
-               double *WritebackDRAMClockChangeWatermark,
                double *StutterExitWatermark,
                double *StutterEnterPlusExitWatermark,
                double *Z8StutterExitWatermark,
-               double *Z8StutterEnterPlusExitWatermark,
-               double *MinActiveDRAMClockChangeLatencySupported)
+               double *Z8StutterEnterPlusExitWatermark)
 {
        struct vba_vars_st *v = &mode_lib->vba;
        double EffectiveLBLatencyHidingY;
@@ -5758,103 +5534,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
        double TotalPixelBW = 0.0;
        int k, j;
 
-       *UrgentWatermark = UrgentLatency + ExtraLatency;
+       v->UrgentWatermark = UrgentLatency + ExtraLatency;
 
 #ifdef __DML_VBA_DEBUG__
        dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
        dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
-       dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
+       dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
 #endif
 
-       *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+       v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
 
 #ifdef __DML_VBA_DEBUG__
-       dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
-       dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
+       dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
+       dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
 #endif
 
        v->TotalActiveWriteback = 0;
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
-               if (WritebackEnable[k] == true) {
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+               if (v->WritebackEnable[k] == true) {
                        v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
                }
        }
 
        if (v->TotalActiveWriteback <= 1) {
-               *WritebackUrgentWatermark = WritebackLatency;
+               v->WritebackUrgentWatermark = v->WritebackLatency;
        } else {
-               *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+               v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
        }
 
        if (v->TotalActiveWriteback <= 1) {
-               *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+               v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
        } else {
-               *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+               v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
        }
 
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
                TotalPixelBW = TotalPixelBW
-                               + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
-                                               / (HTotal[k] / PixelClock[k]);
+                               + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
+                                               / (v->HTotal[k] / v->PixelClock[k]);
        }
 
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
                double EffectiveDETBufferSizeY = DETBufferSizeY[k];
 
                v->LBLatencyHidingSourceLinesY = dml_min(
-                               (double) MaxLineBufferLines,
-                               dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
+                               (double) v->MaxLineBufferLines,
+                               dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
 
                v->LBLatencyHidingSourceLinesC = dml_min(
-                               (double) MaxLineBufferLines,
-                               dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
+                               (double) v->MaxLineBufferLines,
+                               dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
 
-               EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
+               EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
 
-               EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+               EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
 
                if (UnboundedRequestEnabled) {
                        EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
-                                       + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+                                       + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
                }
 
                LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
                LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
-               FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+               FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
                if (BytePerPixelDETC[k] > 0) {
                        LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
                        LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
-                       FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
+                       FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
                } else {
                        LinesInDETC = 0;
                        FullDETBufferingTimeC = 999999;
                }
 
                ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
-                               - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+                               - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
 
-               if (NumberOfActivePlanes > 1) {
+               if (v->NumberOfActivePlanes > 1) {
                        ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
-                                       - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+                                       - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
                }
 
                if (BytePerPixelDETC[k] > 0) {
                        ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
-                                       - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+                                       - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
 
-                       if (NumberOfActivePlanes > 1) {
+                       if (v->NumberOfActivePlanes > 1) {
                                ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
-                                               - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
+                                               - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
                        }
                        v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
                } else {
                        v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
                }
 
-               if (WritebackEnable[k] == true) {
-                       WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
-                                       / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
-                       if (WritebackPixelFormat[k] == dm_444_64) {
+               if (v->WritebackEnable[k] == true) {
+                       WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+                                       / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+                       if (v->WritebackPixelFormat[k] == dm_444_64) {
                                WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
                        }
                        WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
@@ -5864,14 +5640,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
 
        v->MinActiveDRAMClockChangeMargin = 999999;
        PlaneWithMinActiveDRAMClockChangeMargin = 0;
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
                if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
                        v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
-                       if (BlendingAndTiming[k] == k) {
+                       if (v->BlendingAndTiming[k] == k) {
                                PlaneWithMinActiveDRAMClockChangeMargin = k;
                        } else {
-                               for (j = 0; j < NumberOfActivePlanes; ++j) {
-                                       if (BlendingAndTiming[k] == j) {
+                               for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+                                       if (v->BlendingAndTiming[k] == j) {
                                                PlaneWithMinActiveDRAMClockChangeMargin = j;
                                        }
                                }
@@ -5879,11 +5655,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
                }
        }
 
-       *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+       v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
 
        SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
-               if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+               if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
                                && v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
                        SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
                }
@@ -5891,25 +5667,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
 
        v->TotalNumberOfActiveOTG = 0;
 
-       for (k = 0; k < NumberOfActivePlanes; ++k) {
-               if (BlendingAndTiming[k] == k) {
+       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+               if (v->BlendingAndTiming[k] == k) {
                        v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
                }
        }
 
        if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
                *DRAMClockChangeSupport = dm_dram_clock_change_vactive;
-       } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
+       } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
                        || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
                *DRAMClockChangeSupport = dm_dram_clock_change_vblank;
        } else {
                *DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
        }
 
-       *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
-       *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
-       *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
-       *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+       *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+       *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
+       *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+       *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
 
 #ifdef __DML_VBA_DEBUG__
        dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
@@ -7157,12 +6933,13 @@ static double CalculateExtraLatencyBytes(
                        HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1);
                else
                        HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2);
-       else
+       } else {
                HostVMDynamicLevels = 0;
+       }
 
        ret = ReorderingBytes + (TotalNumberOfActiveDPP * PixelChunkSizeInKByte + TotalNumberOfDCCActiveDPP * MetaChunkSize) * 1024.0;
 
-       if (GPUVMEnable == true)
+       if (GPUVMEnable == true) {
                for (k = 0; k < NumberOfActivePlanes; ++k)
                        ret = ret + NumberOfDPP[k] * dpte_group_bytes[k] * (1 + 8 * HostVMDynamicLevels) * HostVMInefficiencyFactor;
        }
index 8e4c9d0..f436869 100644 (file)
@@ -244,6 +244,50 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
 }
 
 /**
+ * Finds dummy_latency_index when MCLK switching using firmware based
+ * vblank stretch is enabled. This function will iterate through the
+ * table of dummy pstate latencies until the lowest value that allows
+ * dm_allow_self_refresh_and_mclk_switch to happen is found
+ */
+int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
+                                                           struct dc_state *context,
+                                                           display_e2e_pipe_params_st *pipes,
+                                                           int pipe_cnt,
+                                                           int vlevel)
+{
+       const int max_latency_table_entries = 4;
+       const struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+       int dummy_latency_index = 0;
+
+       dc_assert_fp_enabled();
+
+       while (dummy_latency_index < max_latency_table_entries) {
+               context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+                               dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
+               dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+
+               if (vlevel < context->bw_ctx.dml.vba.soc.num_states &&
+                               vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported)
+                       break;
+
+               dummy_latency_index++;
+       }
+
+       if (dummy_latency_index == max_latency_table_entries) {
+               ASSERT(dummy_latency_index != max_latency_table_entries);
+               /* If the execution gets here, it means dummy p_states are
+                * not possible. This should never happen and would mean
+                * something is severely wrong.
+                * Here we reset dummy_latency_index to 3, because it is
+                * better to have underflows than system crashes.
+                */
+               dummy_latency_index = max_latency_table_entries - 1;
+       }
+
+       return dummy_latency_index;
+}
+
+/**
  * dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes
  * and populate pipe_ctx with those params.
  *
@@ -1646,7 +1690,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
                        dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context);
 
                if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
-                       dummy_latency_index = dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
+                       dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
                                context, pipes, pipe_cnt, vlevel);
 
                        /* After calling dcn30_find_dummy_latency_index_for_fw_based_mclk_switch
index 3ed06ab..6ce2210 100644 (file)
@@ -71,4 +71,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
 
 void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
 
+int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
+                                                           struct dc_state *context,
+                                                           display_e2e_pipe_params_st *pipes,
+                                                           int pipe_cnt,
+                                                           int vlevel);
+
 #endif
index cb20257..6980f69 100644 (file)
@@ -755,30 +755,18 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
                        v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelY = v->BytePerPixelY[k];
                        v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelC = v->BytePerPixelC[k];
                        v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.ProgressiveToInterlaceUnitInOPP = mode_lib->vba.ProgressiveToInterlaceUnitInOPP;
-                       v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
-                                       &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, v->DSCDelay[k],
-                                       mode_lib->vba.DPPCLKDelaySubtotal + mode_lib->vba.DPPCLKDelayCNVCFormater,
-                                       mode_lib->vba.DPPCLKDelaySCL,
-                                       mode_lib->vba.DPPCLKDelaySCLLBOnly,
-                                       mode_lib->vba.DPPCLKDelayCNVCCursor,
-                                       mode_lib->vba.DISPCLKDelaySubtotal,
-                                       (unsigned int) (v->SwathWidthY[k] / mode_lib->vba.HRatio[k]),
-                                       mode_lib->vba.OutputFormat[k],
-                                       mode_lib->vba.MaxInterDCNTileRepeaters,
+                       v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(
+                                       v,
+                                       k,
+                                       v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
+                                       &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe,
+                                       v->DSCDelay[k],
+                                       (unsigned int) (v->SwathWidthY[k] / v->HRatio[k]),
                                        dml_min(v->VStartupLines, v->MaxVStartupLines[k]),
                                        v->MaxVStartupLines[k],
-                                       mode_lib->vba.GPUVMMaxPageTableLevels,
-                                       mode_lib->vba.GPUVMEnable,
-                                       mode_lib->vba.HostVMEnable,
-                                       mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
-                                       mode_lib->vba.HostVMMinPageSize,
-                                       mode_lib->vba.DynamicMetadataEnable[k],
-                                       mode_lib->vba.DynamicMetadataVMEnabled,
-                                       mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
-                                       mode_lib->vba.DynamicMetadataTransmittedBytes[k],
                                        v->UrgentLatency,
                                        v->UrgentExtraLatency,
-                                       mode_lib->vba.TCalc,
+                                       v->TCalc,
                                        v->PDEAndMetaPTEBytesFrame[k],
                                        v->MetaRowByte[k],
                                        v->PixelPTEBytesPerRow[k],
@@ -792,8 +780,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
                                        v->MaxNumSwathC[k],
                                        v->swath_width_luma_ub[k],
                                        v->swath_width_chroma_ub[k],
-                                       mode_lib->vba.SwathHeightY[k],
-                                       mode_lib->vba.SwathHeightC[k],
+                                       v->SwathHeightY[k],
+                                       v->SwathHeightC[k],
                                        TWait,
                                        /* Output */
                                        &v->DSTXAfterScaler[k],
@@ -1163,58 +1151,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
                v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SMNLatency = mode_lib->vba.SMNLatency;
 
                dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
-                       mode_lib->vba.USRRetrainingRequiredFinal,
-                       mode_lib->vba.UsesMALLForPStateChange,
-                       mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
-                       mode_lib->vba.NumberOfActiveSurfaces,
-                       mode_lib->vba.MaxLineBufferLines,
-                       mode_lib->vba.LineBufferSizeFinal,
-                       mode_lib->vba.WritebackInterfaceBufferSize,
-                       mode_lib->vba.DCFCLK,
-                       mode_lib->vba.ReturnBW,
-                       mode_lib->vba.SynchronizeTimingsFinal,
-                       mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
-                       mode_lib->vba.DRRDisplay,
-                       v->dpte_group_bytes,
-                       v->meta_row_height,
-                       v->meta_row_height_chroma,
+                       v,
+                       v->PrefetchModePerState[v->VoltageLevel][v->maxMpcComb],
+                       v->DCFCLK,
+                       v->ReturnBW,
                        v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters,
-                       mode_lib->vba.WritebackChunkSize,
-                       mode_lib->vba.SOCCLK,
+                       v->SOCCLK,
                        v->DCFCLKDeepSleep,
-                       mode_lib->vba.DETBufferSizeY,
-                       mode_lib->vba.DETBufferSizeC,
-                       mode_lib->vba.SwathHeightY,
-                       mode_lib->vba.SwathHeightC,
-                       mode_lib->vba.LBBitPerPixel,
+                       v->DETBufferSizeY,
+                       v->DETBufferSizeC,
+                       v->SwathHeightY,
+                       v->SwathHeightC,
                        v->SwathWidthY,
                        v->SwathWidthC,
-                       mode_lib->vba.HRatio,
-                       mode_lib->vba.HRatioChroma,
-                       mode_lib->vba.vtaps,
-                       mode_lib->vba.VTAPsChroma,
-                       mode_lib->vba.VRatio,
-                       mode_lib->vba.VRatioChroma,
-                       mode_lib->vba.HTotal,
-                       mode_lib->vba.VTotal,
-                       mode_lib->vba.VActive,
-                       mode_lib->vba.PixelClock,
-                       mode_lib->vba.BlendingAndTiming,
-                       mode_lib->vba.DPPPerPlane,
+                       v->DPPPerPlane,
                        v->BytePerPixelDETY,
                        v->BytePerPixelDETC,
                        v->DSTXAfterScaler,
                        v->DSTYAfterScaler,
-                       mode_lib->vba.WritebackEnable,
-                       mode_lib->vba.WritebackPixelFormat,
-                       mode_lib->vba.WritebackDestinationWidth,
-                       mode_lib->vba.WritebackDestinationHeight,
-                       mode_lib->vba.WritebackSourceHeight,
                        v->UnboundedRequestEnabled,
                        v->CompressedBufferSizeInkByte,
 
                        /* Output */
-                       &v->Watermark,
                        &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_dramchange_support,
                        v->MaxActiveDRAMClockChangeLatencySupported,
                        v->SubViewportLinesNeededInMALL,
@@ -1806,10 +1764,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                                &mode_lib->vba.Read256BlockHeightC[k],
                                &mode_lib->vba.Read256BlockWidthY[k],
                                &mode_lib->vba.Read256BlockWidthC[k],
-                               &mode_lib->vba.MicroTileHeightY[k],
-                               &mode_lib->vba.MicroTileHeightC[k],
-                               &mode_lib->vba.MicroTileWidthY[k],
-                               &mode_lib->vba.MicroTileWidthC[k]);
+                               &mode_lib->vba.MacroTileHeightY[k],
+                               &mode_lib->vba.MacroTileHeightC[k],
+                               &mode_lib->vba.MacroTileWidthY[k],
+                               &mode_lib->vba.MacroTileWidthC[k]);
        }
 
        /*Bandwidth Support Check*/
@@ -2034,6 +1992,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                                dml32_CalculateODMMode(
                                                mode_lib->vba.MaximumPixelsPerLinePerDSCUnit,
                                                mode_lib->vba.HActive[k],
+                                               mode_lib->vba.OutputFormat[k],
                                                mode_lib->vba.Output[k],
                                                mode_lib->vba.ODMUse[k],
                                                mode_lib->vba.MaxDispclk[i],
@@ -2055,6 +2014,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                                dml32_CalculateODMMode(
                                                mode_lib->vba.MaximumPixelsPerLinePerDSCUnit,
                                                mode_lib->vba.HActive[k],
+                                               mode_lib->vba.OutputFormat[k],
                                                mode_lib->vba.Output[k],
                                                mode_lib->vba.ODMUse[k],
                                                mode_lib->vba.MaxDispclk[i],
@@ -2659,10 +2619,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                        mode_lib->vba.Read256BlockWidthC,
                        mode_lib->vba.Read256BlockHeightY,
                        mode_lib->vba.Read256BlockHeightC,
-                       mode_lib->vba.MicroTileWidthY,
-                       mode_lib->vba.MicroTileWidthC,
-                       mode_lib->vba.MicroTileHeightY,
-                       mode_lib->vba.MicroTileHeightC,
+                       mode_lib->vba.MacroTileWidthY,
+                       mode_lib->vba.MacroTileWidthC,
+                       mode_lib->vba.MacroTileHeightY,
+                       mode_lib->vba.MacroTileHeightC,
 
                        /* Output */
                        mode_lib->vba.SurfaceSizeInMALL,
@@ -2709,10 +2669,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesY = mode_lib->vba.Read256BlockHeightY[k];
                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidth256BytesC = mode_lib->vba.Read256BlockWidthC[k];
                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesC = mode_lib->vba.Read256BlockHeightC[k];
-                               v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthY = mode_lib->vba.MicroTileWidthY[k];
-                               v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightY = mode_lib->vba.MicroTileHeightY[k];
-                               v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthC = mode_lib->vba.MicroTileWidthC[k];
-                               v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightC = mode_lib->vba.MicroTileHeightC[k];
+                               v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthY = mode_lib->vba.MacroTileWidthY[k];
+                               v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightY = mode_lib->vba.MacroTileHeightY[k];
+                               v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthC = mode_lib->vba.MacroTileWidthC[k];
+                               v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightC = mode_lib->vba.MacroTileHeightC[k];
                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].InterlaceEnable = mode_lib->vba.Interlace[k];
                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].HTotal = mode_lib->vba.HTotal[k];
                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].DCCEnable = mode_lib->vba.DCCEnable[k];
@@ -3258,63 +3218,47 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 
                                        mode_lib->vba.NoTimeForPrefetch[i][j][k] =
                                                dml32_CalculatePrefetchSchedule(
+                                                       v,
+                                                       k,
                                                        v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor,
                                                        &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe,
-                                                       mode_lib->vba.DSCDelayPerState[i][k],
-                                                       mode_lib->vba.DPPCLKDelaySubtotal +
-                                                               mode_lib->vba.DPPCLKDelayCNVCFormater,
-                                                       mode_lib->vba.DPPCLKDelaySCL,
-                                                       mode_lib->vba.DPPCLKDelaySCLLBOnly,
-                                                       mode_lib->vba.DPPCLKDelayCNVCCursor,
-                                                       mode_lib->vba.DISPCLKDelaySubtotal,
-                                                       mode_lib->vba.SwathWidthYThisState[k] /
-                                                               mode_lib->vba.HRatio[k],
-                                                       mode_lib->vba.OutputFormat[k],
-                                                       mode_lib->vba.MaxInterDCNTileRepeaters,
-                                                       dml_min(mode_lib->vba.MaxVStartup,
-                                                                       mode_lib->vba.MaximumVStartup[i][j][k]),
-                                                       mode_lib->vba.MaximumVStartup[i][j][k],
-                                                       mode_lib->vba.GPUVMMaxPageTableLevels,
-                                                       mode_lib->vba.GPUVMEnable, mode_lib->vba.HostVMEnable,
-                                                       mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
-                                                       mode_lib->vba.HostVMMinPageSize,
-                                                       mode_lib->vba.DynamicMetadataEnable[k],
-                                                       mode_lib->vba.DynamicMetadataVMEnabled,
-                                                       mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
-                                                       mode_lib->vba.DynamicMetadataTransmittedBytes[k],
-                                                       mode_lib->vba.UrgLatency[i],
-                                                       mode_lib->vba.ExtraLatency,
-                                                       mode_lib->vba.TimeCalc,
-                                                       mode_lib->vba.PDEAndMetaPTEBytesPerFrame[i][j][k],
-                                                       mode_lib->vba.MetaRowBytes[i][j][k],
-                                                       mode_lib->vba.DPTEBytesPerRow[i][j][k],
-                                                       mode_lib->vba.PrefetchLinesY[i][j][k],
-                                                       mode_lib->vba.SwathWidthYThisState[k],
-                                                       mode_lib->vba.PrefillY[k],
-                                                       mode_lib->vba.MaxNumSwY[k],
-                                                       mode_lib->vba.PrefetchLinesC[i][j][k],
-                                                       mode_lib->vba.SwathWidthCThisState[k],
-                                                       mode_lib->vba.PrefillC[k],
-                                                       mode_lib->vba.MaxNumSwC[k],
-                                                       mode_lib->vba.swath_width_luma_ub_this_state[k],
-                                                       mode_lib->vba.swath_width_chroma_ub_this_state[k],
-                                                       mode_lib->vba.SwathHeightYThisState[k],
-                                                       mode_lib->vba.SwathHeightCThisState[k], mode_lib->vba.TWait,
+                                                       v->DSCDelayPerState[i][k],
+                                                       v->SwathWidthYThisState[k] / v->HRatio[k],
+                                                       dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
+                                                       v->MaximumVStartup[i][j][k],
+                                                       v->UrgLatency[i],
+                                                       v->ExtraLatency,
+                                                       v->TimeCalc,
+                                                       v->PDEAndMetaPTEBytesPerFrame[i][j][k],
+                                                       v->MetaRowBytes[i][j][k],
+                                                       v->DPTEBytesPerRow[i][j][k],
+                                                       v->PrefetchLinesY[i][j][k],
+                                                       v->SwathWidthYThisState[k],
+                                                       v->PrefillY[k],
+                                                       v->MaxNumSwY[k],
+                                                       v->PrefetchLinesC[i][j][k],
+                                                       v->SwathWidthCThisState[k],
+                                                       v->PrefillC[k],
+                                                       v->MaxNumSwC[k],
+                                                       v->swath_width_luma_ub_this_state[k],
+                                                       v->swath_width_chroma_ub_this_state[k],
+                                                       v->SwathHeightYThisState[k],
+                                                       v->SwathHeightCThisState[k], v->TWait,
 
                                                        /* Output */
                                                        &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k],
                                                        &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler[k],
-                                                       &mode_lib->vba.LineTimesForPrefetch[k],
-                                                       &mode_lib->vba.PrefetchBW[k],
-                                                       &mode_lib->vba.LinesForMetaPTE[k],
-                                                       &mode_lib->vba.LinesForMetaAndDPTERow[k],
-                                                       &mode_lib->vba.VRatioPreY[i][j][k],
-                                                       &mode_lib->vba.VRatioPreC[i][j][k],
-                                                       &mode_lib->vba.RequiredPrefetchPixelDataBWLuma[0][0][k],
-                                                       &mode_lib->vba.RequiredPrefetchPixelDataBWChroma[0][0][k],
-                                                       &mode_lib->vba.NoTimeForDynamicMetadata[i][j][k],
-                                                       &mode_lib->vba.Tno_bw[k],
-                                                       &mode_lib->vba.prefetch_vmrow_bw[k],
+                                                       &v->LineTimesForPrefetch[k],
+                                                       &v->PrefetchBW[k],
+                                                       &v->LinesForMetaPTE[k],
+                                                       &v->LinesForMetaAndDPTERow[k],
+                                                       &v->VRatioPreY[i][j][k],
+                                                       &v->VRatioPreC[i][j][k],
+                                                       &v->RequiredPrefetchPixelDataBWLuma[0][0][k],
+                                                       &v->RequiredPrefetchPixelDataBWChroma[0][0][k],
+                                                       &v->NoTimeForDynamicMetadata[i][j][k],
+                                                       &v->Tno_bw[k],
+                                                       &v->prefetch_vmrow_bw[k],
                                                        &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[0],         // double *Tdmdl_vm
                                                        &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[1],         // double *Tdmdl
                                                        &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[2],         // double *TSetup
@@ -3557,62 +3501,32 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 
                        {
                                dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
-                                               mode_lib->vba.USRRetrainingRequiredFinal,
-                                               mode_lib->vba.UsesMALLForPStateChange,
-                                               mode_lib->vba.PrefetchModePerState[i][j],
-                                               mode_lib->vba.NumberOfActiveSurfaces,
-                                               mode_lib->vba.MaxLineBufferLines,
-                                               mode_lib->vba.LineBufferSizeFinal,
-                                               mode_lib->vba.WritebackInterfaceBufferSize,
-                                               mode_lib->vba.DCFCLKState[i][j],
-                                               mode_lib->vba.ReturnBWPerState[i][j],
-                                               mode_lib->vba.SynchronizeTimingsFinal,
-                                               mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
-                                               mode_lib->vba.DRRDisplay,
-                                               mode_lib->vba.dpte_group_bytes,
-                                               mode_lib->vba.meta_row_height,
-                                               mode_lib->vba.meta_row_height_chroma,
+                                               v,
+                                               v->PrefetchModePerState[i][j],
+                                               v->DCFCLKState[i][j],
+                                               v->ReturnBWPerState[i][j],
                                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters,
-                                               mode_lib->vba.WritebackChunkSize,
-                                               mode_lib->vba.SOCCLKPerState[i],
-                                               mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j],
-                                               mode_lib->vba.DETBufferSizeYThisState,
-                                               mode_lib->vba.DETBufferSizeCThisState,
-                                               mode_lib->vba.SwathHeightYThisState,
-                                               mode_lib->vba.SwathHeightCThisState,
-                                               mode_lib->vba.LBBitPerPixel,
-                                               mode_lib->vba.SwathWidthYThisState, // 24
-                                               mode_lib->vba.SwathWidthCThisState,
-                                               mode_lib->vba.HRatio,
-                                               mode_lib->vba.HRatioChroma,
-                                               mode_lib->vba.vtaps,
-                                               mode_lib->vba.VTAPsChroma,
-                                               mode_lib->vba.VRatio,
-                                               mode_lib->vba.VRatioChroma,
-                                               mode_lib->vba.HTotal,
-                                               mode_lib->vba.VTotal,
-                                               mode_lib->vba.VActive,
-                                               mode_lib->vba.PixelClock,
-                                               mode_lib->vba.BlendingAndTiming,
-                                               mode_lib->vba.NoOfDPPThisState,
-                                               mode_lib->vba.BytePerPixelInDETY,
-                                               mode_lib->vba.BytePerPixelInDETC,
+                                               v->SOCCLKPerState[i],
+                                               v->ProjectedDCFCLKDeepSleep[i][j],
+                                               v->DETBufferSizeYThisState,
+                                               v->DETBufferSizeCThisState,
+                                               v->SwathHeightYThisState,
+                                               v->SwathHeightCThisState,
+                                               v->SwathWidthYThisState, // 24
+                                               v->SwathWidthCThisState,
+                                               v->NoOfDPPThisState,
+                                               v->BytePerPixelInDETY,
+                                               v->BytePerPixelInDETC,
                                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler,
                                                v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler,
-                                               mode_lib->vba.WritebackEnable,
-                                               mode_lib->vba.WritebackPixelFormat,
-                                               mode_lib->vba.WritebackDestinationWidth,
-                                               mode_lib->vba.WritebackDestinationHeight,
-                                               mode_lib->vba.WritebackSourceHeight,
-                                               mode_lib->vba.UnboundedRequestEnabledThisState,
-                                               mode_lib->vba.CompressedBufferSizeInkByteThisState,
+                                               v->UnboundedRequestEnabledThisState,
+                                               v->CompressedBufferSizeInkByteThisState,
 
                                                /* Output */
-                                               &mode_lib->vba.Watermark, // Store the values in vba
-                                               &mode_lib->vba.DRAMClockChangeSupport[i][j],
+                                               &v->DRAMClockChangeSupport[i][j],
                                                &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[0], // double *MaxActiveDRAMClockChangeLatencySupported
                                                &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer[0], // Long SubViewportLinesNeededInMALL[]
-                                               &mode_lib->vba.FCLKChangeSupport[i][j],
+                                               &v->FCLKChangeSupport[i][j],
                                                &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[1], // double *MinActiveFCLKChangeLatencySupported
                                                &mode_lib->vba.USRRetrainingSupport[i][j],
                                                mode_lib->vba.ActiveDRAMClockChangeLatencyMargin);
index 05fc14a..365d290 100644 (file)
@@ -27,6 +27,8 @@
 #include "display_mode_vba_32.h"
 #include "../display_mode_lib.h"
 
+#define DCN32_MAX_FMT_420_BUFFER_WIDTH 4096
+
 unsigned int dml32_dscceComputeDelay(
                unsigned int bpc,
                double BPP,
@@ -1182,6 +1184,7 @@ void dml32_CalculateDETBufferSize(
 void dml32_CalculateODMMode(
                unsigned int MaximumPixelsPerLinePerDSCUnit,
                unsigned int HActive,
+               enum output_format_class OutFormat,
                enum output_encoder_class Output,
                enum odm_combine_policy ODMUse,
                double StateDispclk,
@@ -1253,6 +1256,29 @@ void dml32_CalculateODMMode(
                else
                        *TotalAvailablePipesSupport = false;
        }
+       if (OutFormat == dm_420 && HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH &&
+                       ODMUse != dm_odm_combine_policy_4to1) {
+               if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 4) {
+                       *ODMMode = dm_odm_combine_mode_disabled;
+                       *NumberOfDPP = 0;
+                       *TotalAvailablePipesSupport = false;
+               } else if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 2 ||
+                               *ODMMode == dm_odm_combine_mode_4to1) {
+                       *ODMMode = dm_odm_combine_mode_4to1;
+                       *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineFourToOne;
+                       *NumberOfDPP = 4;
+               } else {
+                       *ODMMode = dm_odm_combine_mode_2to1;
+                       *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
+                       *NumberOfDPP = 2;
+               }
+       }
+       if (Output == dm_hdmi && OutFormat == dm_420 &&
+                       HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH) {
+               *ODMMode = dm_odm_combine_mode_disabled;
+               *NumberOfDPP = 0;
+               *TotalAvailablePipesSupport = false;
+       }
 }
 
 double dml32_CalculateRequiredDispclk(
@@ -3363,28 +3389,14 @@ double dml32_CalculateExtraLatency(
 } // CalculateExtraLatency
 
 bool dml32_CalculatePrefetchSchedule(
+               struct vba_vars_st *v,
+               unsigned int k,
                double HostVMInefficiencyFactor,
                DmlPipe *myPipe,
                unsigned int DSCDelay,
-               double DPPCLKDelaySubtotalPlusCNVCFormater,
-               double DPPCLKDelaySCL,
-               double DPPCLKDelaySCLLBOnly,
-               double DPPCLKDelayCNVCCursor,
-               double DISPCLKDelaySubtotal,
                unsigned int DPP_RECOUT_WIDTH,
-               enum output_format_class OutputFormat,
-               unsigned int MaxInterDCNTileRepeaters,
                unsigned int VStartup,
                unsigned int MaxVStartup,
-               unsigned int GPUVMPageTableLevels,
-               bool GPUVMEnable,
-               bool HostVMEnable,
-               unsigned int HostVMMaxNonCachedPageTableLevels,
-               double HostVMMinPageSize,
-               bool DynamicMetadataEnable,
-               bool DynamicMetadataVMEnabled,
-               int DynamicMetadataLinesBeforeActiveRequired,
-               unsigned int DynamicMetadataTransmittedBytes,
                double UrgentLatency,
                double UrgentExtraLatency,
                double TCalc,
@@ -3425,6 +3437,7 @@ bool dml32_CalculatePrefetchSchedule(
                double   *VUpdateWidthPix,
                double   *VReadyOffsetPix)
 {
+       double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater;
        bool MyError = false;
        unsigned int DPPCycles, DISPCLKCycles;
        double DSTTotalPixelsAfterScaler;
@@ -3461,27 +3474,27 @@ bool dml32_CalculatePrefetchSchedule(
        double  Tsw_est1 = 0;
        double  Tsw_est3 = 0;
 
-       if (GPUVMEnable == true && HostVMEnable == true)
-               HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+       if (v->GPUVMEnable == true && v->HostVMEnable == true)
+               HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
        else
                HostVMDynamicLevelsTrips = 0;
 #ifdef __DML_VBA_DEBUG__
-       dml_print("DML::%s: GPUVMEnable = %d\n", __func__, GPUVMEnable);
-       dml_print("DML::%s: GPUVMPageTableLevels = %d\n", __func__, GPUVMPageTableLevels);
+       dml_print("DML::%s: v->GPUVMEnable = %d\n", __func__, v->GPUVMEnable);
+       dml_print("DML::%s: v->GPUVMMaxPageTableLevels = %d\n", __func__, v->GPUVMMaxPageTableLevels);
        dml_print("DML::%s: DCCEnable = %d\n", __func__, myPipe->DCCEnable);
-       dml_print("DML::%s: HostVMEnable=%d HostVMInefficiencyFactor=%f\n",
-                       __func__, HostVMEnable, HostVMInefficiencyFactor);
+       dml_print("DML::%s: v->HostVMEnable=%d HostVMInefficiencyFactor=%f\n",
+                       __func__, v->HostVMEnable, HostVMInefficiencyFactor);
 #endif
        dml32_CalculateVUpdateAndDynamicMetadataParameters(
-                       MaxInterDCNTileRepeaters,
+                       v->MaxInterDCNTileRepeaters,
                        myPipe->Dppclk,
                        myPipe->Dispclk,
                        myPipe->DCFClkDeepSleep,
                        myPipe->PixelClock,
                        myPipe->HTotal,
                        myPipe->VBlank,
-                       DynamicMetadataTransmittedBytes,
-                       DynamicMetadataLinesBeforeActiveRequired,
+                       v->DynamicMetadataTransmittedBytes[k],
+                       v->DynamicMetadataLinesBeforeActiveRequired[k],
                        myPipe->InterlaceEnable,
                        myPipe->ProgressiveToInterlaceUnitInOPP,
                        TSetup,
@@ -3496,19 +3509,19 @@ bool dml32_CalculatePrefetchSchedule(
 
        LineTime = myPipe->HTotal / myPipe->PixelClock;
        trip_to_mem = UrgentLatency;
-       Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
+       Tvm_trips = UrgentExtraLatency + trip_to_mem * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
 
-       if (DynamicMetadataVMEnabled == true)
+       if (v->DynamicMetadataVMEnabled == true)
                *Tdmdl = TWait + Tvm_trips + trip_to_mem;
        else
                *Tdmdl = TWait + UrgentExtraLatency;
 
 #ifdef __DML_VBA_ALLOW_DELTA__
-       if (DynamicMetadataEnable == false)
+       if (v->DynamicMetadataEnable[k] == false)
                *Tdmdl = 0.0;
 #endif
 
-       if (DynamicMetadataEnable == true) {
+       if (v->DynamicMetadataEnable[k] == true) {
                if (VStartup * LineTime < *TSetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
                        *NotEnoughTimeForDynamicMetadata = true;
 #ifdef __DML_VBA_DEBUG__
@@ -3528,17 +3541,17 @@ bool dml32_CalculatePrefetchSchedule(
                *NotEnoughTimeForDynamicMetadata = false;
        }
 
-       *Tdmdl_vm =  (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true &&
-                       GPUVMEnable == true ? TWait + Tvm_trips : 0);
+       *Tdmdl_vm =  (v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true &&
+                       v->GPUVMEnable == true ? TWait + Tvm_trips : 0);
 
        if (myPipe->ScalerEnabled)
-               DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
+               DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCL;
        else
-               DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
+               DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCLLBOnly;
 
-       DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
+       DPPCycles = DPPCycles + myPipe->NumberOfCursors * v->DPPCLKDelayCNVCCursor;
 
-       DISPCLKCycles = DISPCLKDelaySubtotal;
+       DISPCLKCycles = v->DISPCLKDelaySubtotal;
 
        if (myPipe->Dppclk == 0.0 || myPipe->Dispclk == 0.0)
                return true;
@@ -3564,7 +3577,7 @@ bool dml32_CalculatePrefetchSchedule(
        dml_print("DML::%s: DSTXAfterScaler: %d\n", __func__,  *DSTXAfterScaler);
 #endif
 
-       if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP))
+       if (v->OutputFormat[k] == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP))
                *DSTYAfterScaler = 1;
        else
                *DSTYAfterScaler = 0;
@@ -3581,13 +3594,13 @@ bool dml32_CalculatePrefetchSchedule(
 
        Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1);
 
-       if (GPUVMEnable == true) {
+       if (v->GPUVMEnable == true) {
                Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1.0) / 4.0 * LineTime;
                Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
-               if (GPUVMPageTableLevels >= 3) {
+               if (v->GPUVMMaxPageTableLevels >= 3) {
                        *Tno_bw = UrgentExtraLatency + trip_to_mem *
-                                       (double) ((GPUVMPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
-               } else if (GPUVMPageTableLevels == 1 && myPipe->DCCEnable != true) {
+                                       (double) ((v->GPUVMMaxPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
+               } else if (v->GPUVMMaxPageTableLevels == 1 && myPipe->DCCEnable != true) {
                        Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / LineTime, 1.0) /
                                        4.0 * LineTime; // VBA_ERROR
                        *Tno_bw = UrgentExtraLatency;
@@ -3622,7 +3635,7 @@ bool dml32_CalculatePrefetchSchedule(
        min_Lsw = dml_max(min_Lsw, 1.0);
        Lsw_oto = dml_ceil(4.0 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1.0) / 4.0;
 
-       if (GPUVMEnable == true) {
+       if (v->GPUVMEnable == true) {
                Tvm_oto = dml_max3(
                                Tvm_trips,
                                *Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
@@ -3630,7 +3643,7 @@ bool dml32_CalculatePrefetchSchedule(
        } else
                Tvm_oto = LineTime / 4.0;
 
-       if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+       if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
                Tr0_oto = dml_max4(
                                Tr0_trips,
                                (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
@@ -3833,7 +3846,7 @@ bool dml32_CalculatePrefetchSchedule(
 #endif
 
                        if (prefetch_bw_equ > 0) {
-                               if (GPUVMEnable == true) {
+                               if (v->GPUVMEnable == true) {
                                        Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
                                                        HostVMInefficiencyFactor / prefetch_bw_equ,
                                                        Tvm_trips, LineTime / 4);
@@ -3841,7 +3854,7 @@ bool dml32_CalculatePrefetchSchedule(
                                        Tvm_equ = LineTime / 4;
                                }
 
-                               if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+                               if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
                                        Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
                                                        HostVMInefficiencyFactor) / prefetch_bw_equ, Tr0_trips,
                                                        (LineTime - Tvm_equ) / 2, LineTime / 4);
@@ -4206,58 +4219,28 @@ void dml32_CalculateFlipSchedule(
 } // CalculateFlipSchedule
 
 void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
-               bool USRRetrainingRequiredFinal,
-               enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
+               struct vba_vars_st *v,
                unsigned int PrefetchMode,
-               unsigned int NumberOfActiveSurfaces,
-               unsigned int MaxLineBufferLines,
-               unsigned int LineBufferSize,
-               unsigned int WritebackInterfaceBufferSize,
                double DCFCLK,
                double ReturnBW,
-               bool SynchronizeTimingsFinal,
-               bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
-               bool DRRDisplay[],
-               unsigned int dpte_group_bytes[],
-               unsigned int meta_row_height[],
-               unsigned int meta_row_height_chroma[],
                SOCParametersList mmSOCParameters,
-               unsigned int WritebackChunkSize,
                double SOCCLK,
                double DCFClkDeepSleep,
                unsigned int DETBufferSizeY[],
                unsigned int DETBufferSizeC[],
                unsigned int SwathHeightY[],
                unsigned int SwathHeightC[],
-               unsigned int LBBitPerPixel[],
                double SwathWidthY[],
                double SwathWidthC[],
-               double HRatio[],
-               double HRatioChroma[],
-               unsigned int VTaps[],
-               unsigned int VTapsChroma[],
-               double VRatio[],
-               double VRatioChroma[],
-               unsigned int HTotal[],
-               unsigned int VTotal[],
-               unsigned int VActive[],
-               double PixelClock[],
-               unsigned int BlendingAndTiming[],
                unsigned int DPPPerSurface[],
                double BytePerPixelDETY[],
                double BytePerPixelDETC[],
                double DSTXAfterScaler[],
                double DSTYAfterScaler[],
-               bool WritebackEnable[],
-               enum source_format_class WritebackPixelFormat[],
-               double WritebackDestinationWidth[],
-               double WritebackDestinationHeight[],
-               double WritebackSourceHeight[],
                bool UnboundedRequestEnabled,
                unsigned int CompressedBufferSizeInkByte,
 
                /* Output */
-               Watermarks *Watermark,
                enum clock_change_support *DRAMClockChangeSupport,
                double MaxActiveDRAMClockChangeLatencySupported[],
                unsigned int SubViewportLinesNeededInMALL[],
@@ -4299,136 +4282,136 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
        unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
        unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
 
-       Watermark->UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
-       Watermark->USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
+       v->Watermark.UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
+       v->Watermark.USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
                        + mmSOCParameters.USRRetrainingLatency + mmSOCParameters.SMNLatency;
-       Watermark->DRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + Watermark->UrgentWatermark;
-       Watermark->FCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + Watermark->UrgentWatermark;
-       Watermark->StutterExitWatermark = mmSOCParameters.SRExitTime + mmSOCParameters.ExtraLatency
+       v->Watermark.DRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + v->Watermark.UrgentWatermark;
+       v->Watermark.FCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + v->Watermark.UrgentWatermark;
+       v->Watermark.StutterExitWatermark = mmSOCParameters.SRExitTime + mmSOCParameters.ExtraLatency
                        + 10 / DCFClkDeepSleep;
-       Watermark->StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitTime + mmSOCParameters.ExtraLatency
+       v->Watermark.StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitTime + mmSOCParameters.ExtraLatency
                        + 10 / DCFClkDeepSleep;
-       Watermark->Z8StutterExitWatermark = mmSOCParameters.SRExitZ8Time + mmSOCParameters.ExtraLatency
+       v->Watermark.Z8StutterExitWatermark = mmSOCParameters.SRExitZ8Time + mmSOCParameters.ExtraLatency
                        + 10 / DCFClkDeepSleep;
-       Watermark->Z8StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitZ8Time
+       v->Watermark.Z8StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitZ8Time
                        + mmSOCParameters.ExtraLatency + 10 / DCFClkDeepSleep;
 
 #ifdef __DML_VBA_DEBUG__
        dml_print("DML::%s: UrgentLatency = %f\n", __func__, mmSOCParameters.UrgentLatency);
        dml_print("DML::%s: ExtraLatency = %f\n", __func__, mmSOCParameters.ExtraLatency);
        dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, mmSOCParameters.DRAMClockChangeLatency);
-       dml_print("DML::%s: UrgentWatermark = %f\n", __func__, Watermark->UrgentWatermark);
-       dml_print("DML::%s: USRRetrainingWatermark = %f\n", __func__, Watermark->USRRetrainingWatermark);
-       dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, Watermark->DRAMClockChangeWatermark);
-       dml_print("DML::%s: FCLKChangeWatermark = %f\n", __func__, Watermark->FCLKChangeWatermark);
-       dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, Watermark->StutterExitWatermark);
-       dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, Watermark->StutterEnterPlusExitWatermark);
-       dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, Watermark->Z8StutterExitWatermark);
+       dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->Watermark.UrgentWatermark);
+       dml_print("DML::%s: USRRetrainingWatermark = %f\n", __func__, v->Watermark.USRRetrainingWatermark);
+       dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->Watermark.DRAMClockChangeWatermark);
+       dml_print("DML::%s: FCLKChangeWatermark = %f\n", __func__, v->Watermark.FCLKChangeWatermark);
+       dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, v->Watermark.StutterExitWatermark);
+       dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, v->Watermark.StutterEnterPlusExitWatermark);
+       dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, v->Watermark.Z8StutterExitWatermark);
        dml_print("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n",
-                       __func__, Watermark->Z8StutterEnterPlusExitWatermark);
+                       __func__, v->Watermark.Z8StutterEnterPlusExitWatermark);
 #endif
 
 
        TotalActiveWriteback = 0;
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
-               if (WritebackEnable[k] == true)
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+               if (v->WritebackEnable[k] == true)
                        TotalActiveWriteback = TotalActiveWriteback + 1;
        }
 
        if (TotalActiveWriteback <= 1) {
-               Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
+               v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
        } else {
-               Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
-                               + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+               v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
+                               + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
        }
-       if (USRRetrainingRequiredFinal)
-               Watermark->WritebackUrgentWatermark = Watermark->WritebackUrgentWatermark
+       if (v->USRRetrainingRequiredFinal)
+               v->Watermark.WritebackUrgentWatermark = v->Watermark.WritebackUrgentWatermark
                                + mmSOCParameters.USRRetrainingLatency;
 
        if (TotalActiveWriteback <= 1) {
-               Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+               v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
                                + mmSOCParameters.WritebackLatency;
-               Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+               v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
                                + mmSOCParameters.WritebackLatency;
        } else {
-               Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
-                               + mmSOCParameters.WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
-               Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
-                               + mmSOCParameters.WritebackLatency + WritebackChunkSize * 1024 / 32 / SOCCLK;
+               v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+                               + mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+               v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+                               + mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024 / 32 / SOCCLK;
        }
 
-       if (USRRetrainingRequiredFinal)
-               Watermark->WritebackDRAMClockChangeWatermark = Watermark->WritebackDRAMClockChangeWatermark
+       if (v->USRRetrainingRequiredFinal)
+               v->Watermark.WritebackDRAMClockChangeWatermark = v->Watermark.WritebackDRAMClockChangeWatermark
                                + mmSOCParameters.USRRetrainingLatency;
 
-       if (USRRetrainingRequiredFinal)
-               Watermark->WritebackFCLKChangeWatermark = Watermark->WritebackFCLKChangeWatermark
+       if (v->USRRetrainingRequiredFinal)
+               v->Watermark.WritebackFCLKChangeWatermark = v->Watermark.WritebackFCLKChangeWatermark
                                + mmSOCParameters.USRRetrainingLatency;
 
 #ifdef __DML_VBA_DEBUG__
        dml_print("DML::%s: WritebackDRAMClockChangeWatermark = %f\n",
-                       __func__, Watermark->WritebackDRAMClockChangeWatermark);
-       dml_print("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, Watermark->WritebackFCLKChangeWatermark);
-       dml_print("DML::%s: WritebackUrgentWatermark = %f\n", __func__, Watermark->WritebackUrgentWatermark);
-       dml_print("DML::%s: USRRetrainingRequiredFinal = %d\n", __func__, USRRetrainingRequiredFinal);
+                       __func__, v->Watermark.WritebackDRAMClockChangeWatermark);
+       dml_print("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, v->Watermark.WritebackFCLKChangeWatermark);
+       dml_print("DML::%s: WritebackUrgentWatermark = %f\n", __func__, v->Watermark.WritebackUrgentWatermark);
+       dml_print("DML::%s: v->USRRetrainingRequiredFinal = %d\n", __func__, v->USRRetrainingRequiredFinal);
        dml_print("DML::%s: USRRetrainingLatency = %f\n", __func__, mmSOCParameters.USRRetrainingLatency);
 #endif
 
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
-               TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] +
-                               SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k]) / (HTotal[k] / PixelClock[k]);
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+               TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] +
+                               SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k]) / (v->HTotal[k] / v->PixelClock[k]);
        }
 
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
 
-               LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1);
-               LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1);
+               LBLatencyHidingSourceLinesY[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
+               LBLatencyHidingSourceLinesC[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
 
 
 #ifdef __DML_VBA_DEBUG__
-               dml_print("DML::%s: k=%d, MaxLineBufferLines = %d\n", __func__, k, MaxLineBufferLines);
-               dml_print("DML::%s: k=%d, LineBufferSize     = %d\n", __func__, k, LineBufferSize);
-               dml_print("DML::%s: k=%d, LBBitPerPixel      = %d\n", __func__, k, LBBitPerPixel[k]);
-               dml_print("DML::%s: k=%d, HRatio             = %f\n", __func__, k, HRatio[k]);
-               dml_print("DML::%s: k=%d, VTaps              = %d\n", __func__, k, VTaps[k]);
+               dml_print("DML::%s: k=%d, v->MaxLineBufferLines = %d\n", __func__, k, v->MaxLineBufferLines);
+               dml_print("DML::%s: k=%d, v->LineBufferSizeFinal     = %d\n", __func__, k, v->LineBufferSizeFinal);
+               dml_print("DML::%s: k=%d, v->LBBitPerPixel      = %d\n", __func__, k, v->LBBitPerPixel[k]);
+               dml_print("DML::%s: k=%d, v->HRatio             = %f\n", __func__, k, v->HRatio[k]);
+               dml_print("DML::%s: k=%d, v->vtaps              = %d\n", __func__, k, v->vtaps[k]);
 #endif
 
-               EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]);
-               EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+               EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
+               EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
                EffectiveDETBufferSizeY = DETBufferSizeY[k];
 
                if (UnboundedRequestEnabled) {
                        EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
                                        + CompressedBufferSizeInkByte * 1024
-                                                       * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k])
-                                                       / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+                                                       * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k])
+                                                       / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
                }
 
                LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
                LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
-               FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+               FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
 
                ActiveClockChangeLatencyHidingY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
-                               - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k];
+                               - (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k];
 
-               if (NumberOfActiveSurfaces > 1) {
+               if (v->NumberOfActiveSurfaces > 1) {
                        ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY
-                                       - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightY[k] * HTotal[k]
-                                                       / PixelClock[k] / VRatio[k];
+                                       - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k]
+                                                       / v->PixelClock[k] / v->VRatio[k];
                }
 
                if (BytePerPixelDETC[k] > 0) {
                        LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
                        LinesInDETCRoundedDownToSwath[k] = dml_floor(LinesInDETC[k], SwathHeightC[k]);
-                       FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k])
-                                       / VRatioChroma[k];
+                       FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k])
+                                       / v->VRatioChroma[k];
                        ActiveClockChangeLatencyHidingC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
-                                       - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k]
-                                                       / PixelClock[k];
-                       if (NumberOfActiveSurfaces > 1) {
+                                       - (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k]
+                                                       / v->PixelClock[k];
+                       if (v->NumberOfActiveSurfaces > 1) {
                                ActiveClockChangeLatencyHidingC = ActiveClockChangeLatencyHidingC
-                                               - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightC[k] * HTotal[k]
-                                                               / PixelClock[k] / VRatioChroma[k];
+                                               - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightC[k] * v->HTotal[k]
+                                                               / v->PixelClock[k] / v->VRatioChroma[k];
                        }
                        ActiveClockChangeLatencyHiding = dml_min(ActiveClockChangeLatencyHidingY,
                                        ActiveClockChangeLatencyHidingC);
@@ -4436,24 +4419,24 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                        ActiveClockChangeLatencyHiding = ActiveClockChangeLatencyHidingY;
                }
 
-               ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
-                               - Watermark->DRAMClockChangeWatermark;
-               ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
-                               - Watermark->FCLKChangeWatermark;
-               USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark;
-
-               if (WritebackEnable[k]) {
-                       WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024
-                                       / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k]
-                                                       / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
-                       if (WritebackPixelFormat[k] == dm_444_64)
+               ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark
+                               - v->Watermark.DRAMClockChangeWatermark;
+               ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark
+                               - v->Watermark.FCLKChangeWatermark;
+               USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.USRRetrainingWatermark;
+
+               if (v->WritebackEnable[k]) {
+                       WritebackLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+                                       / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
+                                                       / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+                       if (v->WritebackPixelFormat[k] == dm_444_64)
                                WritebackLatencyHiding = WritebackLatencyHiding / 2;
 
                        WritebackDRAMClockChangeLatencyMargin = WritebackLatencyHiding
-                                       - Watermark->WritebackDRAMClockChangeWatermark;
+                                       - v->Watermark.WritebackDRAMClockChangeWatermark;
 
                        WritebackFCLKChangeLatencyMargin = WritebackLatencyHiding
-                                       - Watermark->WritebackFCLKChangeWatermark;
+                                       - v->Watermark.WritebackFCLKChangeWatermark;
 
                        ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMargin[k],
                                        WritebackFCLKChangeLatencyMargin);
@@ -4461,22 +4444,22 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                                        WritebackDRAMClockChangeLatencyMargin);
                }
                MaxActiveDRAMClockChangeLatencySupported[k] =
-                               (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
+                               (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
                                                0 :
                                                (ActiveDRAMClockChangeLatencyMargin[k]
                                                                + mmSOCParameters.DRAMClockChangeLatency);
        }
 
-       for (i = 0; i < NumberOfActiveSurfaces; ++i) {
-               for (j = 0; j < NumberOfActiveSurfaces; ++j) {
+       for (i = 0; i < v->NumberOfActiveSurfaces; ++i) {
+               for (j = 0; j < v->NumberOfActiveSurfaces; ++j) {
                        if (i == j ||
-                                       (BlendingAndTiming[i] == i && BlendingAndTiming[j] == i) ||
-                                       (BlendingAndTiming[j] == j && BlendingAndTiming[i] == j) ||
-                                       (BlendingAndTiming[i] == BlendingAndTiming[j] && BlendingAndTiming[i] != i) ||
-                                       (SynchronizeTimingsFinal && PixelClock[i] == PixelClock[j] &&
-                                       HTotal[i] == HTotal[j] && VTotal[i] == VTotal[j] &&
-                                       VActive[i] == VActive[j]) || (SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
-                                       (DRRDisplay[i] || DRRDisplay[j]))) {
+                                       (v->BlendingAndTiming[i] == i && v->BlendingAndTiming[j] == i) ||
+                                       (v->BlendingAndTiming[j] == j && v->BlendingAndTiming[i] == j) ||
+                                       (v->BlendingAndTiming[i] == v->BlendingAndTiming[j] && v->BlendingAndTiming[i] != i) ||
+                                       (v->SynchronizeTimingsFinal && v->PixelClock[i] == v->PixelClock[j] &&
+                                       v->HTotal[i] == v->HTotal[j] && v->VTotal[i] == v->VTotal[j] &&
+                                       v->VActive[i] == v->VActive[j]) || (v->SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
+                                       (v->DRRDisplay[i] || v->DRRDisplay[j]))) {
                                SynchronizedSurfaces[i][j] = true;
                        } else {
                                SynchronizedSurfaces[i][j] = false;
@@ -4484,8 +4467,8 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                }
        }
 
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
-               if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+               if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
                                (!FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
                                ActiveFCLKChangeLatencyMargin[k] < MinActiveFCLKChangeMargin)) {
                        FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
@@ -4497,9 +4480,9 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
        *MinActiveFCLKChangeLatencySupported = MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
 
        SameTimingForFCLKChange = true;
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
                if (!SynchronizedSurfaces[k][SurfaceWithMinActiveFCLKChangeMargin]) {
-                       if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+                       if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
                                        (SameTimingForFCLKChange ||
                                        ActiveFCLKChangeLatencyMargin[k] <
                                        SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
@@ -4519,17 +4502,17 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
        }
 
        *USRRetrainingSupport = true;
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
-               if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+               if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
                                (USRRetrainingLatencyMargin[k] < 0)) {
                        *USRRetrainingSupport = false;
                }
        }
 
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
-               if (UseMALLForPStateChange[k] != dm_use_mall_pstate_change_full_frame &&
-                               UseMALLForPStateChange[k] != dm_use_mall_pstate_change_sub_viewport &&
-                               UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+               if (v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_full_frame &&
+                               v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_sub_viewport &&
+                               v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
                                ActiveDRAMClockChangeLatencyMargin[k] < 0) {
                        if (PrefetchMode > 0) {
                                DRAMClockChangeSupportNumber = 2;
@@ -4543,10 +4526,10 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                }
        }
 
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
-               if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+               if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
                        DRAMClockChangeMethod = 1;
-               else if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
+               else if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
                        DRAMClockChangeMethod = 2;
        }
 
@@ -4573,16 +4556,16 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
                        *DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
        }
 
-       for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+       for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
                unsigned int dst_y_pstate;
                unsigned int src_y_pstate_l;
                unsigned int src_y_pstate_c;
                unsigned int src_y_ahead_l, src_y_ahead_c, sub_vp_lines_l, sub_vp_lines_c;
 
-               dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (HTotal[k] / PixelClock[k]), 1);
-               src_y_pstate_l = dml_ceil(dst_y_pstate * VRatio[k], SwathHeightY[k]);
+               dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (v->HTotal[k] / v->PixelClock[k]), 1);
+               src_y_pstate_l = dml_ceil(dst_y_pstate * v->VRatio[k], SwathHeightY[k]);
                src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + LBLatencyHidingSourceLinesY[k];
-               sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + meta_row_height[k];
+               sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + v->meta_row_height[k];
 
 #ifdef __DML_VBA_DEBUG__
 dml_print("DML::%s: k=%d, DETBufferSizeY               = %d\n", __func__, k, DETBufferSizeY[k]);
@@ -4593,21 +4576,21 @@ dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY  = %d\n", __func__, k, LBL
 dml_print("DML::%s: k=%d, dst_y_pstate      = %d\n", __func__, k, dst_y_pstate);
 dml_print("DML::%s: k=%d, src_y_pstate_l    = %d\n", __func__, k, src_y_pstate_l);
 dml_print("DML::%s: k=%d, src_y_ahead_l     = %d\n", __func__, k, src_y_ahead_l);
-dml_print("DML::%s: k=%d, meta_row_height   = %d\n", __func__, k, meta_row_height[k]);
+dml_print("DML::%s: k=%d, v->meta_row_height   = %d\n", __func__, k, v->meta_row_height[k]);
 dml_print("DML::%s: k=%d, sub_vp_lines_l    = %d\n", __func__, k, sub_vp_lines_l);
 #endif
                SubViewportLinesNeededInMALL[k] = sub_vp_lines_l;
 
                if (BytePerPixelDETC[k] > 0) {
-                       src_y_pstate_c = dml_ceil(dst_y_pstate * VRatioChroma[k], SwathHeightC[k]);
+                       src_y_pstate_c = dml_ceil(dst_y_pstate * v->VRatioChroma[k], SwathHeightC[k]);
                        src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + LBLatencyHidingSourceLinesC[k];
-                       sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + meta_row_height_chroma[k];
+                       sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + v->meta_row_height_chroma[k];
                        SubViewportLinesNeededInMALL[k] = dml_max(sub_vp_lines_l, sub_vp_lines_c);
 
 #ifdef __DML_VBA_DEBUG__
 dml_print("DML::%s: k=%d, src_y_pstate_c            = %d\n", __func__, k, src_y_pstate_c);
 dml_print("DML::%s: k=%d, src_y_ahead_c             = %d\n", __func__, k, src_y_ahead_c);
-dml_print("DML::%s: k=%d, meta_row_height_chroma    = %d\n", __func__, k, meta_row_height_chroma[k]);
+dml_print("DML::%s: k=%d, v->meta_row_height_chroma    = %d\n", __func__, k, v->meta_row_height_chroma[k]);
 dml_print("DML::%s: k=%d, sub_vp_lines_c            = %d\n", __func__, k, sub_vp_lines_c);
 #endif
                }
index d293856..0b427d8 100644 (file)
@@ -30,6 +30,7 @@
 #include "os_types.h"
 #include "../dc_features.h"
 #include "../display_mode_structs.h"
+#include "dml/display_mode_vba.h"
 
 unsigned int dml32_dscceComputeDelay(
                unsigned int bpc,
@@ -215,6 +216,7 @@ void dml32_CalculateDETBufferSize(
 void dml32_CalculateODMMode(
                unsigned int MaximumPixelsPerLinePerDSCUnit,
                unsigned int HActive,
+               enum output_format_class OutFormat,
                enum output_encoder_class Output,
                enum odm_combine_policy ODMUse,
                double StateDispclk,
@@ -712,28 +714,14 @@ double dml32_CalculateExtraLatency(
                unsigned int HostVMMaxNonCachedPageTableLevels);
 
 bool dml32_CalculatePrefetchSchedule(
+               struct vba_vars_st *v,
+               unsigned int k,
                double HostVMInefficiencyFactor,
                DmlPipe *myPipe,
                unsigned int DSCDelay,
-               double DPPCLKDelaySubtotalPlusCNVCFormater,
-               double DPPCLKDelaySCL,
-               double DPPCLKDelaySCLLBOnly,
-               double DPPCLKDelayCNVCCursor,
-               double DISPCLKDelaySubtotal,
                unsigned int DPP_RECOUT_WIDTH,
-               enum output_format_class OutputFormat,
-               unsigned int MaxInterDCNTileRepeaters,
                unsigned int VStartup,
                unsigned int MaxVStartup,
-               unsigned int GPUVMPageTableLevels,
-               bool GPUVMEnable,
-               bool HostVMEnable,
-               unsigned int HostVMMaxNonCachedPageTableLevels,
-               double HostVMMinPageSize,
-               bool DynamicMetadataEnable,
-               bool DynamicMetadataVMEnabled,
-               int DynamicMetadataLinesBeforeActiveRequired,
-               unsigned int DynamicMetadataTransmittedBytes,
                double UrgentLatency,
                double UrgentExtraLatency,
                double TCalc,
@@ -807,58 +795,28 @@ void dml32_CalculateFlipSchedule(
                bool *ImmediateFlipSupportedForPipe);
 
 void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
-               bool USRRetrainingRequiredFinal,
-               enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
+               struct vba_vars_st *v,
                unsigned int PrefetchMode,
-               unsigned int NumberOfActiveSurfaces,
-               unsigned int MaxLineBufferLines,
-               unsigned int LineBufferSize,
-               unsigned int WritebackInterfaceBufferSize,
                double DCFCLK,
                double ReturnBW,
-               bool SynchronizeTimingsFinal,
-               bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
-               bool DRRDisplay[],
-               unsigned int dpte_group_bytes[],
-               unsigned int meta_row_height[],
-               unsigned int meta_row_height_chroma[],
                SOCParametersList mmSOCParameters,
-               unsigned int WritebackChunkSize,
                double SOCCLK,
                double DCFClkDeepSleep,
                unsigned int DETBufferSizeY[],
                unsigned int DETBufferSizeC[],
                unsigned int SwathHeightY[],
                unsigned int SwathHeightC[],
-               unsigned int LBBitPerPixel[],
                double SwathWidthY[],
                double SwathWidthC[],
-               double HRatio[],
-               double HRatioChroma[],
-               unsigned int VTaps[],
-               unsigned int VTapsChroma[],
-               double VRatio[],
-               double VRatioChroma[],
-               unsigned int HTotal[],
-               unsigned int VTotal[],
-               unsigned int VActive[],
-               double PixelClock[],
-               unsigned int BlendingAndTiming[],
                unsigned int DPPPerSurface[],
                double BytePerPixelDETY[],
                double BytePerPixelDETC[],
                double DSTXAfterScaler[],
                double DSTYAfterScaler[],
-               bool WritebackEnable[],
-               enum source_format_class WritebackPixelFormat[],
-               double WritebackDestinationWidth[],
-               double WritebackDestinationHeight[],
-               double WritebackSourceHeight[],
                bool UnboundedRequestEnabled,
                unsigned int CompressedBufferSizeInkByte,
 
                /* Output */
-               Watermarks *Watermark,
                enum clock_change_support *DRAMClockChangeSupport,
                double MaxActiveDRAMClockChangeLatencySupported[],
                unsigned int SubViewportLinesNeededInMALL[],
index 5d27ff0..f5400ed 100644 (file)
@@ -35,6 +35,8 @@
 #include "dcn30/display_rq_dlg_calc_30.h"
 #include "dcn31/display_mode_vba_31.h"
 #include "dcn31/display_rq_dlg_calc_31.h"
+#include "dcn314/display_mode_vba_314.h"
+#include "dcn314/display_rq_dlg_calc_314.h"
 #include "dcn32/display_mode_vba_32.h"
 #include "dcn32/display_rq_dlg_calc_32.h"
 #include "dml_logger.h"
@@ -74,6 +76,13 @@ const struct dml_funcs dml31_funcs = {
        .rq_dlg_get_rq_reg = dml31_rq_dlg_get_rq_reg
 };
 
+const struct dml_funcs dml314_funcs = {
+       .validate = dml314_ModeSupportAndSystemConfigurationFull,
+       .recalculate = dml314_recalculate,
+       .rq_dlg_get_dlg_reg = dml314_rq_dlg_get_dlg_reg,
+       .rq_dlg_get_rq_reg = dml314_rq_dlg_get_rq_reg
+};
+
 const struct dml_funcs dml32_funcs = {
        .validate = dml32_ModeSupportAndSystemConfigurationFull,
     .recalculate = dml32_recalculate,
@@ -107,6 +116,9 @@ void dml_init_instance(struct display_mode_lib *lib,
        case DML_PROJECT_DCN31_FPGA:
                lib->funcs = dml31_funcs;
                break;
+       case DML_PROJECT_DCN314:
+               lib->funcs = dml314_funcs;
+               break;
        case DML_PROJECT_DCN32:
                lib->funcs = dml32_funcs;
                break;
index 2bdd6ed..b1878a1 100644 (file)
@@ -41,6 +41,7 @@ enum dml_project {
        DML_PROJECT_DCN30,
        DML_PROJECT_DCN31,
        DML_PROJECT_DCN31_FPGA,
+       DML_PROJECT_DCN314,
        DML_PROJECT_DCN32,
 };
 
index 492aec6..2051dda 100644 (file)
@@ -651,10 +651,10 @@ struct vba_vars_st {
 
        unsigned int OutputTypeAndRatePerState[DC__VOLTAGE_STATES][DC__NUM_DPP__MAX];
        double RequiredDISPCLKPerSurface[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
-       unsigned int MicroTileHeightY[DC__NUM_DPP__MAX];
-       unsigned int MicroTileHeightC[DC__NUM_DPP__MAX];
-       unsigned int MicroTileWidthY[DC__NUM_DPP__MAX];
-       unsigned int MicroTileWidthC[DC__NUM_DPP__MAX];
+       unsigned int MacroTileHeightY[DC__NUM_DPP__MAX];
+       unsigned int MacroTileHeightC[DC__NUM_DPP__MAX];
+       unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
+       unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
        bool ImmediateFlipRequiredFinal;
        bool DCCProgrammingAssumesScanDirectionUnknownFinal;
        bool EnoughWritebackUnits;
@@ -800,8 +800,6 @@ struct vba_vars_st {
        double PSCL_FACTOR[DC__NUM_DPP__MAX];
        double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX];
        double MaximumVStartup[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
-       unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
-       unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
        double AlignedDCCMetaPitch[DC__NUM_DPP__MAX];
        double AlignedYPitch[DC__NUM_DPP__MAX];
        double AlignedCPitch[DC__NUM_DPP__MAX];
index 5d2b028..d9f1b0a 100644 (file)
@@ -214,6 +214,7 @@ struct dummy_pstate_entry {
 struct clk_bw_params {
        unsigned int vram_type;
        unsigned int num_channels;
+       unsigned int dram_channel_width_bytes;
        unsigned int dispclk_vco_khz;
        unsigned int dc_mode_softmax_memclk;
        struct clk_limit_table clk_table;
index 5815876..7614125 100644 (file)
@@ -219,6 +219,10 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
        struct dc_state *context,
        uint8_t disabled_master_pipe_idx);
 
+void reset_sync_context_for_pipe(const struct dc *dc,
+       struct dc_state *context,
+       uint8_t pipe_idx);
+
 uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter);
 
 const struct link_hwss *get_link_hwss(const struct dc_link *link,
index 859ffd8..04f7656 100644 (file)
@@ -1600,6 +1600,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
        struct fixed31_32 lut2;
        struct fixed31_32 delta_lut;
        struct fixed31_32 delta_index;
+       const struct fixed31_32 one = dc_fixpt_from_int(1);
 
        i = 0;
        /* fixed_pt library has problems handling too small values */
@@ -1628,6 +1629,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
                        } else
                                hw_x = coordinates_x[i].x;
 
+                       if (dc_fixpt_le(one, hw_x))
+                               hw_x = one;
+
                        norm_x = dc_fixpt_mul(norm_factor, hw_x);
                        index = dc_fixpt_floor(norm_x);
                        if (index < 0 || index > 255)
index 6db67f0..644ea15 100644 (file)
@@ -368,6 +368,17 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
                smu_baco->platform_support =
                        (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
                                                                        false;
+
+               /*
+                * Disable BACO entry/exit completely on below SKUs to
+                * avoid hardware intermittent failures.
+                */
+               if (((adev->pdev->device == 0x73A1) &&
+                   (adev->pdev->revision == 0x00)) ||
+                   ((adev->pdev->device == 0x73BF) &&
+                   (adev->pdev->revision == 0xCF)))
+                       smu_baco->platform_support = false;
+
        }
 }
 
index 24488f4..93f9b83 100644 (file)
@@ -209,7 +209,8 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
        if (!adev->scpm_enabled)
                return 0;
 
-       if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7))
+       if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) ||
+           (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)))
                return 0;
 
        /* override pptable_id from driver parameter */
@@ -218,27 +219,6 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
                dev_info(adev->dev, "override pptable id %d\n", pptable_id);
        } else {
                pptable_id = smu->smu_table.boot_values.pp_table_id;
-
-               /*
-                * Temporary solution for SMU V13.0.0 with SCPM enabled:
-                *   - use vbios carried pptable when pptable_id is 3664, 3715 or 3795
-                *   - use 36831 soft pptable when pptable_id is 3683
-                */
-               if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
-                       switch (pptable_id) {
-                       case 3664:
-                       case 3715:
-                       case 3795:
-                               pptable_id = 0;
-                               break;
-                       case 3683:
-                               pptable_id = 36831;
-                               break;
-                       default:
-                               dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
-                               return -EINVAL;
-                       }
-               }
        }
 
        /* "pptable_id == 0" means vbios carries the pptable. */
@@ -471,26 +451,6 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
        } else {
                pptable_id = smu->smu_table.boot_values.pp_table_id;
 
-               /*
-                * Temporary solution for SMU V13.0.0 with SCPM disabled:
-                *   - use 3664, 3683 or 3715 on request
-                *   - use 3664 when pptable_id is 0
-                * TODO: drop these when the pptable carried in vbios is ready.
-                */
-               if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
-                       switch (pptable_id) {
-                       case 0:
-                               pptable_id = 3664;
-                               break;
-                       case 3664:
-                       case 3683:
-                       case 3715:
-                               break;
-                       default:
-                               dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
-                               return -EINVAL;
-                       }
-               }
        }
 
        /* force using vbios pptable in sriov mode */
index 7db2fd9..0963275 100644 (file)
@@ -410,58 +410,11 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
        struct amdgpu_device *adev = smu->adev;
-       uint32_t pptable_id;
        int ret = 0;
 
-       /*
-        * With SCPM enabled, the pptable used will be signed. It cannot
-        * be used directly by driver. To get the raw pptable, we need to
-        * rely on the combo pptable(and its revelant SMU message).
-        */
-       if (adev->scpm_enabled) {
-               ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
-                                                       &smu_table->power_play_table,
-                                                       &smu_table->power_play_table_size);
-       } else {
-               /* override pptable_id from driver parameter */
-               if (amdgpu_smu_pptable_id >= 0) {
-                       pptable_id = amdgpu_smu_pptable_id;
-                       dev_info(adev->dev, "override pptable id %d\n", pptable_id);
-               } else {
-                       pptable_id = smu_table->boot_values.pp_table_id;
-               }
-
-               /*
-                * Temporary solution for SMU V13.0.0 with SCPM disabled:
-                *   - use vbios carried pptable when pptable_id is 3664, 3715 or 3795
-                *   - use soft pptable when pptable_id is 3683
-                */
-               if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
-                       switch (pptable_id) {
-                       case 3664:
-                       case 3715:
-                       case 3795:
-                               pptable_id = 0;
-                               break;
-                       case 3683:
-                               break;
-                       default:
-                               dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
-                               return -EINVAL;
-                       }
-               }
-
-               /* force using vbios pptable in sriov mode */
-               if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
-                       ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
-                                                               &smu_table->power_play_table,
-                                                               &smu_table->power_play_table_size);
-               else
-                       ret = smu_v13_0_get_pptable_from_firmware(smu,
-                                                                 &smu_table->power_play_table,
-                                                                 &smu_table->power_play_table_size,
-                                                                 pptable_id);
-       }
+       ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
+                                               &smu_table->power_play_table,
+                                               &smu_table->power_play_table_size);
        if (ret)
                return ret;
 
index dd32b48..ce96234 100644 (file)
@@ -581,11 +581,9 @@ static const struct psb_offset cdv_regmap[2] = {
 static int cdv_chip_setup(struct drm_device *dev)
 {
        struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
-       struct pci_dev *pdev = to_pci_dev(dev->dev);
        INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
 
-       if (pci_enable_msi(pdev))
-               dev_warn(dev->dev, "Enabling MSI failed!\n");
+       dev_priv->use_msi = true;
        dev_priv->regmap = cdv_regmap;
        gma_get_core_freq(dev);
        psb_intel_opregion_init(dev);
index dffe374..4b7627a 100644 (file)
@@ -112,12 +112,12 @@ static void psb_gem_free_object(struct drm_gem_object *obj)
 {
        struct psb_gem_object *pobj = to_psb_gem_object(obj);
 
-       drm_gem_object_release(obj);
-
        /* Undo the mmap pin if we are destroying the object */
        if (pobj->mmapping)
                psb_gem_unpin(pobj);
 
+       drm_gem_object_release(obj);
+
        WARN_ON(pobj->in_gart && !pobj->stolen);
 
        release_resource(&pobj->resource);
index bd40c04..2f52ece 100644 (file)
@@ -532,15 +532,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc,
                WARN_ON(drm_crtc_vblank_get(crtc) != 0);
 
                gma_crtc->page_flip_event = event;
+               spin_unlock_irqrestore(&dev->event_lock, flags);
 
                /* Call this locked if we want an event at vblank interrupt. */
                ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
                if (ret) {
-                       gma_crtc->page_flip_event = NULL;
-                       drm_crtc_vblank_put(crtc);
+                       spin_lock_irqsave(&dev->event_lock, flags);
+                       if (gma_crtc->page_flip_event) {
+                               gma_crtc->page_flip_event = NULL;
+                               drm_crtc_vblank_put(crtc);
+                       }
+                       spin_unlock_irqrestore(&dev->event_lock, flags);
                }
-
-               spin_unlock_irqrestore(&dev->event_lock, flags);
        } else {
                ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
        }
index 5923a9c..f90e628 100644 (file)
@@ -501,12 +501,9 @@ static const struct psb_offset oaktrail_regmap[2] = {
 static int oaktrail_chip_setup(struct drm_device *dev)
 {
        struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
-       struct pci_dev *pdev = to_pci_dev(dev->dev);
        int ret;
 
-       if (pci_enable_msi(pdev))
-               dev_warn(dev->dev, "Enabling MSI failed!\n");
-
+       dev_priv->use_msi = true;
        dev_priv->regmap = oaktrail_regmap;
 
        ret = mid_chip_setup(dev);
index b91de6d..6687308 100644 (file)
@@ -139,8 +139,6 @@ static void gma_suspend_pci(struct pci_dev *pdev)
        dev_priv->regs.saveBSM = bsm;
        pci_read_config_dword(pdev, 0xFC, &vbt);
        dev_priv->regs.saveVBT = vbt;
-       pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
-       pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
 
        pci_disable_device(pdev);
        pci_set_power_state(pdev, PCI_D3hot);
@@ -168,9 +166,6 @@ static bool gma_resume_pci(struct pci_dev *pdev)
        pci_restore_state(pdev);
        pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM);
        pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT);
-       /* restoring MSI address and data in PCIx space */
-       pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
-       pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
        ret = pci_enable_device(pdev);
 
        if (ret != 0)
@@ -223,8 +218,7 @@ int gma_power_resume(struct device *_dev)
        mutex_lock(&power_mutex);
        gma_resume_pci(pdev);
        gma_resume_display(pdev);
-       gma_irq_preinstall(dev);
-       gma_irq_postinstall(dev);
+       gma_irq_install(dev);
        mutex_unlock(&power_mutex);
        return 0;
 }
index 1d8744f..54e756b 100644 (file)
@@ -383,7 +383,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
        PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
        spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
 
-       gma_irq_install(dev, pdev->irq);
+       gma_irq_install(dev);
 
        dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 
index 0ea3d23..731cc35 100644 (file)
@@ -490,6 +490,7 @@ struct drm_psb_private {
        int rpm_enabled;
 
        /* MID specific */
+       bool use_msi;
        bool has_gct;
        struct oaktrail_gct_data gct_data;
 
@@ -499,10 +500,6 @@ struct drm_psb_private {
        /* Register state */
        struct psb_save_area regs;
 
-       /* MSI reg save */
-       uint32_t msi_addr;
-       uint32_t msi_data;
-
        /* Hotplug handling */
        struct work_struct hotplug_work;
 
index e6e6d61..038f18e 100644 (file)
@@ -316,17 +316,24 @@ void gma_irq_postinstall(struct drm_device *dev)
        spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
 }
 
-int gma_irq_install(struct drm_device *dev, unsigned int irq)
+int gma_irq_install(struct drm_device *dev)
 {
+       struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+       struct pci_dev *pdev = to_pci_dev(dev->dev);
        int ret;
 
-       if (irq == IRQ_NOTCONNECTED)
+       if (dev_priv->use_msi && pci_enable_msi(pdev)) {
+               dev_warn(dev->dev, "Enabling MSI failed!\n");
+               dev_priv->use_msi = false;
+       }
+
+       if (pdev->irq == IRQ_NOTCONNECTED)
                return -ENOTCONN;
 
        gma_irq_preinstall(dev);
 
        /* PCI devices require shared interrupts. */
-       ret = request_irq(irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
+       ret = request_irq(pdev->irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
        if (ret)
                return ret;
 
@@ -369,6 +376,8 @@ void gma_irq_uninstall(struct drm_device *dev)
        spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
 
        free_irq(pdev->irq, dev);
+       if (dev_priv->use_msi)
+               pci_disable_msi(pdev);
 }
 
 int gma_crtc_enable_vblank(struct drm_crtc *crtc)
index b51e395..7648f69 100644 (file)
@@ -17,7 +17,7 @@ struct drm_device;
 
 void gma_irq_preinstall(struct drm_device *dev);
 void gma_irq_postinstall(struct drm_device *dev);
-int  gma_irq_install(struct drm_device *dev, unsigned int irq);
+int  gma_irq_install(struct drm_device *dev);
 void gma_irq_uninstall(struct drm_device *dev);
 
 int  gma_crtc_enable_vblank(struct drm_crtc *crtc);
index 073adfe..4e41c14 100644 (file)
@@ -2,6 +2,7 @@
 config DRM_HISI_HIBMC
        tristate "DRM Support for Hisilicon Hibmc"
        depends on DRM && PCI && (ARM64 || COMPILE_TEST)
+       depends on MMU
        select DRM_KMS_HELPER
        select DRM_VRAM_HELPER
        select DRM_TTM
index 6d11e79..f84d397 100644 (file)
@@ -23,9 +23,6 @@
 #define DRIVER_MAJOR 1
 #define DRIVER_MINOR 0
 
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
-
 DEFINE_DRM_GEM_FOPS(hv_fops);
 
 static struct drm_driver hyperv_driver = {
@@ -133,7 +130,6 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
        }
 
        ret = hyperv_setup_vram(hv, hdev);
-
        if (ret)
                goto err_vmbus_close;
 
@@ -150,18 +146,20 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
 
        ret = hyperv_mode_config_init(hv);
        if (ret)
-               goto err_vmbus_close;
+               goto err_free_mmio;
 
        ret = drm_dev_register(dev, 0);
        if (ret) {
                drm_err(dev, "Failed to register drm driver.\n");
-               goto err_vmbus_close;
+               goto err_free_mmio;
        }
 
        drm_fbdev_generic_setup(dev, 0);
 
        return 0;
 
+err_free_mmio:
+       vmbus_free_mmio(hv->mem->start, hv->fb_size);
 err_vmbus_close:
        vmbus_close(hdev->channel);
 err_hv_set_drv_data:
index 885c74f..1390729 100644 (file)
@@ -1629,6 +1629,8 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
        /* FIXME: initialize from VBT */
        vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
 
+       vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
+
        ret = intel_dsc_compute_params(crtc_state);
        if (ret)
                return ret;
index ac90d45..3ed7eea 100644 (file)
@@ -389,23 +389,13 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp)
        return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
 }
 
-static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy)
-{
-       u32 voltage;
-
-       voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK;
-
-       return voltage == VOLTAGE_INFO_0_85V;
-}
-
 static int icl_max_source_rate(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
        enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
 
-       if (intel_phy_is_combo(dev_priv, phy) &&
-           (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp)))
+       if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
                return 540000;
 
        return 810000;
@@ -413,23 +403,7 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
 
 static int ehl_max_source_rate(struct intel_dp *intel_dp)
 {
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-       enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
-
-       if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy))
-               return 540000;
-
-       return 810000;
-}
-
-static int dg1_max_source_rate(struct intel_dp *intel_dp)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
-
-       if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy))
+       if (intel_dp_is_edp(intel_dp))
                return 540000;
 
        return 810000;
@@ -491,7 +465,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
                        max_rate = dg2_max_source_rate(intel_dp);
                else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
                         IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
-                       max_rate = dg1_max_source_rate(intel_dp);
+                       max_rate = 810000;
                else if (IS_JSL_EHL(dev_priv))
                        max_rate = ehl_max_source_rate(intel_dp);
                else
@@ -1395,6 +1369,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
         * DP_DSC_RC_BUF_SIZE for this.
         */
        vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+       vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
 
        /*
         * Slice Height of 8 works for all currently available panels. So start
index 43e1bbc..ca530f0 100644 (file)
@@ -460,7 +460,6 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
        u8 i = 0;
 
        vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
-       vdsc_cfg->pic_height = pipe_config->hw.adjusted_mode.crtc_vdisplay;
        vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
                                             pipe_config->dsc.slice_count);
 
index dabdfe0..0bcde53 100644 (file)
@@ -1269,6 +1269,10 @@ static void i915_gem_context_release_work(struct work_struct *work)
        trace_i915_context_free(ctx);
        GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
+       spin_lock(&ctx->i915->gem.contexts.lock);
+       list_del(&ctx->link);
+       spin_unlock(&ctx->i915->gem.contexts.lock);
+
        if (ctx->syncobj)
                drm_syncobj_put(ctx->syncobj);
 
@@ -1521,10 +1525,6 @@ static void context_close(struct i915_gem_context *ctx)
 
        ctx->file_priv = ERR_PTR(-EBADF);
 
-       spin_lock(&ctx->i915->gem.contexts.lock);
-       list_del(&ctx->link);
-       spin_unlock(&ctx->i915->gem.contexts.lock);
-
        client = ctx->client;
        if (client) {
                spin_lock(&client->ctx_lock);
index 834c707..3e91f44 100644 (file)
@@ -1438,7 +1438,12 @@ void intel_guc_busyness_park(struct intel_gt *gt)
        if (!guc_submission_initialized(guc))
                return;
 
-       cancel_delayed_work(&guc->timestamp.work);
+       /*
+        * There is a race with suspend flow where the worker runs after suspend
+        * and causes an unclaimed register access warning. Cancel the worker
+        * synchronously here.
+        */
+       cancel_delayed_work_sync(&guc->timestamp.work);
 
        /*
         * Before parking, we should sample engine busyness stats if we need to.
index 702e5b8..b605d0c 100644 (file)
@@ -1191,7 +1191,8 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
 
        intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
 
-       i915_gem_drain_freed_objects(dev_priv);
+       /* Flush any outstanding work, including i915_gem_context.release_work. */
+       i915_gem_drain_workqueue(dev_priv);
 
        drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
 }
index 3168d70..135d04c 100644 (file)
 
 #define GT0_PERF_LIMIT_REASONS         _MMIO(0x1381a8)
 #define   GT0_PERF_LIMIT_REASONS_MASK  0xde3
-#define   PROCHOT_MASK                 REG_BIT(1)
-#define   THERMAL_LIMIT_MASK           REG_BIT(2)
-#define   RATL_MASK                    REG_BIT(6)
-#define   VR_THERMALERT_MASK           REG_BIT(7)
-#define   VR_TDC_MASK                  REG_BIT(8)
-#define   POWER_LIMIT_4_MASK           REG_BIT(9)
-#define   POWER_LIMIT_1_MASK           REG_BIT(11)
-#define   POWER_LIMIT_2_MASK           REG_BIT(12)
+#define   PROCHOT_MASK                 REG_BIT(0)
+#define   THERMAL_LIMIT_MASK           REG_BIT(1)
+#define   RATL_MASK                    REG_BIT(5)
+#define   VR_THERMALERT_MASK           REG_BIT(6)
+#define   VR_TDC_MASK                  REG_BIT(7)
+#define   POWER_LIMIT_4_MASK           REG_BIT(8)
+#define   POWER_LIMIT_1_MASK           REG_BIT(10)
+#define   POWER_LIMIT_2_MASK           REG_BIT(11)
 
 #define CHV_CLK_CTL1                   _MMIO(0x101100)
 #define VLV_CLK_CTL2                   _MMIO(0x101104)
index 2603717..373582c 100644 (file)
@@ -1882,12 +1882,13 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
                enum dma_resv_usage usage;
                int idx;
 
-               obj->read_domains = 0;
                if (flags & EXEC_OBJECT_WRITE) {
                        usage = DMA_RESV_USAGE_WRITE;
                        obj->write_domain = I915_GEM_DOMAIN_RENDER;
+                       obj->read_domains = 0;
                } else {
                        usage = DMA_RESV_USAGE_READ;
+                       obj->write_domain = 0;
                }
 
                dma_fence_array_for_each(curr, idx, fence)
index 2d72cc5..6b6d533 100644 (file)
@@ -157,7 +157,7 @@ static void mtk_dither_config(struct device *dev, unsigned int w,
 {
        struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
 
-       mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
+       mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
        mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs,
                      DISP_REG_DITHER_CFG);
        mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG,
index 9cc406e..3b7d130 100644 (file)
@@ -685,6 +685,16 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
        if (--dsi->refcount != 0)
                return;
 
+       /*
+        * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
+        * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
+        * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
+        * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
+        * after dsi is fully set.
+        */
+       mtk_dsi_stop(dsi);
+
+       mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
        mtk_dsi_reset_engine(dsi);
        mtk_dsi_lane0_ulp_mode_enter(dsi);
        mtk_dsi_clk_ulp_mode_enter(dsi);
@@ -735,17 +745,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
        if (!dsi->enabled)
                return;
 
-       /*
-        * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
-        * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
-        * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
-        * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
-        * after dsi is fully set.
-        */
-       mtk_dsi_stop(dsi);
-
-       mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
-
        dsi->enabled = false;
 }
 
@@ -808,10 +807,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
 
 static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
        .attach = mtk_dsi_bridge_attach,
+       .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
        .atomic_disable = mtk_dsi_bridge_atomic_disable,
+       .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
        .atomic_enable = mtk_dsi_bridge_atomic_enable,
        .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable,
        .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable,
+       .atomic_reset = drm_atomic_helper_bridge_reset,
        .mode_set = mtk_dsi_bridge_mode_set,
 };
 
index b9ac932..03acc68 100644 (file)
@@ -170,7 +170,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
 
        /* Enable OSD and BLK0, set max global alpha */
        priv->viu.osd1_ctrl_stat = OSD_ENABLE |
-                                  (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
+                                  (0x100 << OSD_GLOBAL_ALPHA_SHIFT) |
                                   OSD_BLK0_ENABLE;
 
        priv->viu.osd1_ctrl_stat2 = readl(priv->io_base +
index bb7e109..d4b9078 100644 (file)
@@ -94,7 +94,7 @@ static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv,
                priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12));
        writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
                priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21));
-       writel((m[11] & 0x1fff) << 16,
+       writel((m[11] & 0x1fff),
                priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22));
 
        writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
index 251a1bb..a222bf7 100644 (file)
@@ -262,7 +262,11 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret)
                return ret;
 
-       drm_fbdev_generic_setup(dev, 0);
+       /*
+        * FIXME: A 24-bit color depth does not work with 24 bpp on
+        * G200ER. Force 32 bpp.
+        */
+       drm_fbdev_generic_setup(dev, 32);
 
        return 0;
 }
index cdb154c..b75c690 100644 (file)
@@ -1295,7 +1295,8 @@ static const struct panel_desc innolux_n116bca_ea1 = {
        },
        .delay = {
                .hpd_absent = 200,
-               .prepare_to_enable = 80,
+               .enable = 80,
+               .disable = 50,
                .unprepare = 500,
        },
 };
index ff5e1a4..1e716c2 100644 (file)
@@ -2257,7 +2257,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
                .enable = 200,
                .disable = 20,
        },
-       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+       .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
        .connector_type = DRM_MODE_CONNECTOR_LVDS,
 };
 
index c204e9b..518ee13 100644 (file)
@@ -283,8 +283,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
        return ret;
 }
 
-static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
-                                      struct drm_display_mode *mode)
+static enum drm_mode_status
+cdn_dp_connector_mode_valid(struct drm_connector *connector,
+                           struct drm_display_mode *mode)
 {
        struct cdn_dp_device *dp = connector_to_dp(connector);
        struct drm_display_info *display_info = &dp->connector.display_info;
index e4631f5..f9aa8b9 100644 (file)
@@ -1439,11 +1439,15 @@ static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
                die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX;
                die |= RK3568_SYS_DSP_INFACE_EN_HDMI |
                           FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id);
+               dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL;
+               dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags);
                break;
        case ROCKCHIP_VOP2_EP_EDP0:
                die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX;
                die |= RK3568_SYS_DSP_INFACE_EN_EDP |
                           FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id);
+               dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL;
+               dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags);
                break;
        case ROCKCHIP_VOP2_EP_MIPI0:
                die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX;
index 660036d..922d83e 100644 (file)
@@ -129,7 +129,7 @@ static void fcopy_send_data(struct work_struct *dummy)
 
        /*
         * The  strings sent from the host are encoded in
-        * in utf16; convert it to utf8 strings.
+        * utf16; convert it to utf8 strings.
         * The host assures us that the utf16 strings will not exceed
         * the max lengths specified. We will however, reserve room
         * for the string terminating character - in the utf16s_utf8s()
index 23c680d..3c833ea 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/kernel.h>
 #include <linux/syscore_ops.h>
 #include <linux/dma-map-ops.h>
+#include <linux/pci.h>
 #include <clocksource/hyperv_timer.h>
 #include "hyperv_vmbus.h"
 
@@ -2262,26 +2263,43 @@ static int vmbus_acpi_remove(struct acpi_device *device)
 
 static void vmbus_reserve_fb(void)
 {
-       int size;
+       resource_size_t start = 0, size;
+       struct pci_dev *pdev;
+
+       if (efi_enabled(EFI_BOOT)) {
+               /* Gen2 VM: get FB base from EFI framebuffer */
+               start = screen_info.lfb_base;
+               size = max_t(__u32, screen_info.lfb_size, 0x800000);
+       } else {
+               /* Gen1 VM: get FB base from PCI */
+               pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+                                     PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+               if (!pdev)
+                       return;
+
+               if (pdev->resource[0].flags & IORESOURCE_MEM) {
+                       start = pci_resource_start(pdev, 0);
+                       size = pci_resource_len(pdev, 0);
+               }
+
+               /*
+                * Release the PCI device so hyperv_drm or hyperv_fb driver can
+                * grab it later.
+                */
+               pci_dev_put(pdev);
+       }
+
+       if (!start)
+               return;
+
        /*
         * Make a claim for the frame buffer in the resource tree under the
         * first node, which will be the one below 4GB.  The length seems to
         * be underreported, particularly in a Generation 1 VM.  So start out
         * reserving a larger area and make it smaller until it succeeds.
         */
-
-       if (screen_info.lfb_base) {
-               if (efi_enabled(EFI_BOOT))
-                       size = max_t(__u32, screen_info.lfb_size, 0x800000);
-               else
-                       size = max_t(__u32, screen_info.lfb_size, 0x4000000);
-
-               for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
-                       fb_mmio = __request_region(hyperv_mmio,
-                                                  screen_info.lfb_base, size,
-                                                  fb_mmio_name, 0);
-               }
-       }
+       for (; !fb_mmio && (size >= 0x100000); size >>= 1)
+               fb_mmio = __request_region(hyperv_mmio, start, size, fb_mmio_name, 0);
 }
 
 /**
@@ -2313,7 +2331,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
                        bool fb_overlap_ok)
 {
        struct resource *iter, *shadow;
-       resource_size_t range_min, range_max, start;
+       resource_size_t range_min, range_max, start, end;
        const char *dev_n = dev_name(&device_obj->device);
        int retval;
 
@@ -2348,6 +2366,14 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
                range_max = iter->end;
                start = (range_min + align - 1) & ~(align - 1);
                for (; start + size - 1 <= range_max; start += align) {
+                       end = start + size - 1;
+
+                       /* Skip the whole fb_mmio region if not fb_overlap_ok */
+                       if (!fb_overlap_ok && fb_mmio &&
+                           (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
+                            ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
+                               continue;
+
                        shadow = __request_region(iter, start, size, NULL,
                                                  IORESOURCE_BUSY);
                        if (!shadow)
index e47fa34..3082183 100644 (file)
@@ -1583,7 +1583,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
        if (i2c_imx->dma)
                i2c_imx_dma_free(i2c_imx);
 
-       if (ret == 0) {
+       if (ret >= 0) {
                /* setup chip registers to defaults */
                imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
                imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
index 8716032..ad5efd7 100644 (file)
@@ -6,6 +6,7 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/bitfield.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
  */
 #define MLXBF_I2C_TYU_PLL_OUT_FREQ  (400 * 1000 * 1000)
 /* Reference clock for Bluefield - 156 MHz. */
-#define MLXBF_I2C_PLL_IN_FREQ       (156 * 1000 * 1000)
+#define MLXBF_I2C_PLL_IN_FREQ       156250000ULL
 
 /* Constant used to determine the PLL frequency. */
-#define MLNXBF_I2C_COREPLL_CONST    16384
+#define MLNXBF_I2C_COREPLL_CONST    16384ULL
+
+#define MLXBF_I2C_FREQUENCY_1GHZ  1000000000ULL
 
 /* PLL registers. */
-#define MLXBF_I2C_CORE_PLL_REG0         0x0
 #define MLXBF_I2C_CORE_PLL_REG1         0x4
 #define MLXBF_I2C_CORE_PLL_REG2         0x8
 
 #define MLXBF_I2C_COREPLL_FREQ          MLXBF_I2C_TYU_PLL_OUT_FREQ
 
 /* Core PLL TYU configuration. */
-#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK   GENMASK(12, 0)
-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK  GENMASK(3, 0)
-#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK   GENMASK(5, 0)
-
-#define MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT  3
-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT 16
-#define MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT  20
+#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK   GENMASK(15, 3)
+#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK  GENMASK(19, 16)
+#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK   GENMASK(25, 20)
 
 /* Core PLL YU configuration. */
 #define MLXBF_I2C_COREPLL_CORE_F_YU_MASK    GENMASK(25, 0)
 #define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK   GENMASK(3, 0)
-#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK    GENMASK(5, 0)
+#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK    GENMASK(31, 26)
 
-#define MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT   0
-#define MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT  1
-#define MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT   26
 
 /* Core PLL frequency. */
 static u64 mlxbf_i2c_corepll_frequency;
@@ -479,8 +474,6 @@ static struct mutex mlxbf_i2c_bus_lock;
 #define MLXBF_I2C_MASK_8    GENMASK(7, 0)
 #define MLXBF_I2C_MASK_16   GENMASK(15, 0)
 
-#define MLXBF_I2C_FREQUENCY_1GHZ  1000000000
-
 /*
  * Function to poll a set of bits at a specific address; it checks whether
  * the bits are equal to zero when eq_zero is set to 'true', and not equal
@@ -669,7 +662,7 @@ static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave,
        /* Clear status bits. */
        writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS);
        /* Set the cause data. */
-       writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR);
+       writel(~0x0, priv->mst_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR);
        /* Zero PEC byte. */
        writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC);
        /* Zero byte count. */
@@ -738,6 +731,9 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
                if (flags & MLXBF_I2C_F_WRITE) {
                        write_en = 1;
                        write_len += operation->length;
+                       if (data_idx + operation->length >
+                                       MLXBF_I2C_MASTER_DATA_DESC_SIZE)
+                               return -ENOBUFS;
                        memcpy(data_desc + data_idx,
                               operation->buffer, operation->length);
                        data_idx += operation->length;
@@ -1407,24 +1403,19 @@ static int mlxbf_i2c_init_master(struct platform_device *pdev,
        return 0;
 }
 
-static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
+static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
 {
-       u64 core_frequency, pad_frequency;
+       u64 core_frequency;
        u8 core_od, core_r;
        u32 corepll_val;
        u16 core_f;
 
-       pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
-
        corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
 
        /* Get Core PLL configuration bits. */
-       core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) &
-                       MLXBF_I2C_COREPLL_CORE_F_TYU_MASK;
-       core_od = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT) &
-                       MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK;
-       core_r = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT) &
-                       MLXBF_I2C_COREPLL_CORE_R_TYU_MASK;
+       core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_TYU_MASK, corepll_val);
+       core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK, corepll_val);
+       core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_TYU_MASK, corepll_val);
 
        /*
         * Compute PLL output frequency as follow:
@@ -1436,31 +1427,26 @@ static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
         * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
         * and PadFrequency, respectively.
         */
-       core_frequency = pad_frequency * (++core_f);
+       core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f);
        core_frequency /= (++core_r) * (++core_od);
 
        return core_frequency;
 }
 
-static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
+static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
 {
        u32 corepll_reg1_val, corepll_reg2_val;
-       u64 corepll_frequency, pad_frequency;
+       u64 corepll_frequency;
        u8 core_od, core_r;
        u32 core_f;
 
-       pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
-
        corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
        corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2);
 
        /* Get Core PLL configuration bits */
-       core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) &
-                       MLXBF_I2C_COREPLL_CORE_F_YU_MASK;
-       core_r = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT) &
-                       MLXBF_I2C_COREPLL_CORE_R_YU_MASK;
-       core_od = rol32(corepll_reg2_val,  MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT) &
-                       MLXBF_I2C_COREPLL_CORE_OD_YU_MASK;
+       core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_YU_MASK, corepll_reg1_val);
+       core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_YU_MASK, corepll_reg1_val);
+       core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_YU_MASK, corepll_reg2_val);
 
        /*
         * Compute PLL output frequency as follow:
@@ -1472,7 +1458,7 @@ static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
         * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
         * and PadFrequency, respectively.
         */
-       corepll_frequency = (pad_frequency * core_f) / MLNXBF_I2C_COREPLL_CONST;
+       corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST;
        corepll_frequency /= (++core_r) * (++core_od);
 
        return corepll_frequency;
@@ -2180,14 +2166,14 @@ static struct mlxbf_i2c_chip_info mlxbf_i2c_chip[] = {
                        [1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1],
                        [2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1]
                },
-               .calculate_freq = mlxbf_calculate_freq_from_tyu
+               .calculate_freq = mlxbf_i2c_calculate_freq_from_tyu
        },
        [MLXBF_I2C_CHIP_TYPE_2] = {
                .type = MLXBF_I2C_CHIP_TYPE_2,
                .shared_res = {
                        [0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2]
                },
-               .calculate_freq = mlxbf_calculate_freq_from_yu
+               .calculate_freq = mlxbf_i2c_calculate_freq_from_yu
        }
 };
 
index 774507b..313904b 100644 (file)
@@ -243,9 +243,10 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
                                   int (*deselect)(struct i2c_mux_core *, u32))
 {
        struct i2c_mux_core *muxc;
+       size_t mux_size;
 
-       muxc = devm_kzalloc(dev, struct_size(muxc, adapter, max_adapters)
-                           + sizeof_priv, GFP_KERNEL);
+       mux_size = struct_size(muxc, adapter, max_adapters);
+       muxc = devm_kzalloc(dev, size_add(mux_size, sizeof_priv), GFP_KERNEL);
        if (!muxc)
                return NULL;
        if (sizeof_priv)
index 497c912..5a8f780 100644 (file)
@@ -2349,13 +2349,6 @@ static int dmar_device_hotplug(acpi_handle handle, bool insert)
        if (!dmar_in_use())
                return 0;
 
-       /*
-        * It's unlikely that any I/O board is hot added before the IOMMU
-        * subsystem is initialized.
-        */
-       if (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled)
-               return -EOPNOTSUPP;
-
        if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
                tmp = handle;
        } else {
index 1f2cd43..31bc50e 100644 (file)
@@ -399,7 +399,7 @@ static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
 {
        unsigned long fl_sagaw, sl_sagaw;
 
-       fl_sagaw = BIT(2) | (cap_fl1gp_support(iommu->cap) ? BIT(3) : 0);
+       fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0);
        sl_sagaw = cap_sagaw(iommu->cap);
 
        /* Second level only. */
@@ -3019,7 +3019,13 @@ static int __init init_dmars(void)
 
 #ifdef CONFIG_INTEL_IOMMU_SVM
                if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
+                       /*
+                        * Call dmar_alloc_hwirq() with dmar_global_lock held,
+                        * could cause possible lock race condition.
+                        */
+                       up_write(&dmar_global_lock);
                        ret = intel_svm_enable_prq(iommu);
+                       down_write(&dmar_global_lock);
                        if (ret)
                                goto free_iommu;
                }
@@ -3932,6 +3938,7 @@ int __init intel_iommu_init(void)
        force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
                    platform_optin_force_iommu();
 
+       down_write(&dmar_global_lock);
        if (dmar_table_init()) {
                if (force_on)
                        panic("tboot: Failed to initialize DMAR table\n");
@@ -3944,6 +3951,16 @@ int __init intel_iommu_init(void)
                goto out_free_dmar;
        }
 
+       up_write(&dmar_global_lock);
+
+       /*
+        * The bus notifier takes the dmar_global_lock, so lockdep will
+        * complain later when we register it under the lock.
+        */
+       dmar_register_bus_notifier();
+
+       down_write(&dmar_global_lock);
+
        if (!no_iommu)
                intel_iommu_debugfs_init();
 
@@ -3988,9 +4005,11 @@ int __init intel_iommu_init(void)
                pr_err("Initialization failed\n");
                goto out_free_dmar;
        }
+       up_write(&dmar_global_lock);
 
        init_iommu_pm_ops();
 
+       down_read(&dmar_global_lock);
        for_each_active_iommu(iommu, drhd) {
                /*
                 * The flush queue implementation does not perform
@@ -4008,11 +4027,13 @@ int __init intel_iommu_init(void)
                                       "%s", iommu->name);
                iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
        }
+       up_read(&dmar_global_lock);
 
        bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
        if (si_domain && !hw_pass_through)
                register_memory_notifier(&intel_iommu_memory_nb);
 
+       down_read(&dmar_global_lock);
        if (probe_acpi_namespace_devices())
                pr_warn("ACPI name space devices didn't probe correctly\n");
 
@@ -4023,15 +4044,17 @@ int __init intel_iommu_init(void)
 
                iommu_disable_protect_mem_regions(iommu);
        }
+       up_read(&dmar_global_lock);
 
-       intel_iommu_enabled = 1;
-       dmar_register_bus_notifier();
        pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
 
+       intel_iommu_enabled = 1;
+
        return 0;
 
 out_free_dmar:
        intel_iommu_free_dmars();
+       up_write(&dmar_global_lock);
        return ret;
 }
 
index 7835bb0..e012b21 100644 (file)
@@ -511,7 +511,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
 
        if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
                return -ENODEV;
-       if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc))
+       if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[0].desc))
                return -ENODEV;
 
        switch (fc_usb->udev->speed) {
index 184608b..e58a1e0 100644 (file)
@@ -88,8 +88,9 @@ static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
 static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL;
 static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
 
-static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
-       MULTICAST_LACPDU_ADDR;
+const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = {
+       0x01, 0x80, 0xC2, 0x00, 0x00, 0x02
+};
 
 /* ================= main 802.3ad protocol functions ================== */
 static int ad_lacpdu_send(struct port *port);
index 5c2febe..86d4230 100644 (file)
@@ -865,12 +865,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
        dev_uc_unsync(slave_dev, bond_dev);
        dev_mc_unsync(slave_dev, bond_dev);
 
-       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
-               /* del lacpdu mc addr from mc list */
-               u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
-               dev_mc_del(slave_dev, lacpdu_multicast);
-       }
+       if (BOND_MODE(bond) == BOND_MODE_8023AD)
+               dev_mc_del(slave_dev, lacpdu_mcast_addr);
 }
 
 /*--------------------------- Active slave change ---------------------------*/
@@ -890,7 +886,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
                if (bond->dev->flags & IFF_ALLMULTI)
                        dev_set_allmulti(old_active->dev, -1);
 
-               bond_hw_addr_flush(bond->dev, old_active->dev);
+               if (bond->dev->flags & IFF_UP)
+                       bond_hw_addr_flush(bond->dev, old_active->dev);
        }
 
        if (new_active) {
@@ -901,10 +898,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
                if (bond->dev->flags & IFF_ALLMULTI)
                        dev_set_allmulti(new_active->dev, 1);
 
-               netif_addr_lock_bh(bond->dev);
-               dev_uc_sync(new_active->dev, bond->dev);
-               dev_mc_sync(new_active->dev, bond->dev);
-               netif_addr_unlock_bh(bond->dev);
+               if (bond->dev->flags & IFF_UP) {
+                       netif_addr_lock_bh(bond->dev);
+                       dev_uc_sync(new_active->dev, bond->dev);
+                       dev_mc_sync(new_active->dev, bond->dev);
+                       netif_addr_unlock_bh(bond->dev);
+               }
        }
 }
 
@@ -2166,16 +2165,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
                        }
                }
 
-               netif_addr_lock_bh(bond_dev);
-               dev_mc_sync_multiple(slave_dev, bond_dev);
-               dev_uc_sync_multiple(slave_dev, bond_dev);
-               netif_addr_unlock_bh(bond_dev);
-
-               if (BOND_MODE(bond) == BOND_MODE_8023AD) {
-                       /* add lacpdu mc addr to mc list */
-                       u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+               if (bond_dev->flags & IFF_UP) {
+                       netif_addr_lock_bh(bond_dev);
+                       dev_mc_sync_multiple(slave_dev, bond_dev);
+                       dev_uc_sync_multiple(slave_dev, bond_dev);
+                       netif_addr_unlock_bh(bond_dev);
 
-                       dev_mc_add(slave_dev, lacpdu_multicast);
+                       if (BOND_MODE(bond) == BOND_MODE_8023AD)
+                               dev_mc_add(slave_dev, lacpdu_mcast_addr);
                }
        }
 
@@ -2447,7 +2444,8 @@ static int __bond_release_one(struct net_device *bond_dev,
                if (old_flags & IFF_ALLMULTI)
                        dev_set_allmulti(slave_dev, -1);
 
-               bond_hw_addr_flush(bond_dev, slave_dev);
+               if (old_flags & IFF_UP)
+                       bond_hw_addr_flush(bond_dev, slave_dev);
        }
 
        slave_disable_netpoll(slave);
@@ -4184,6 +4182,12 @@ static int bond_open(struct net_device *bond_dev)
        struct list_head *iter;
        struct slave *slave;
 
+       if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
+               bond->rr_tx_counter = alloc_percpu(u32);
+               if (!bond->rr_tx_counter)
+                       return -ENOMEM;
+       }
+
        /* reset slave->backup and slave->inactive */
        if (bond_has_slaves(bond)) {
                bond_for_each_slave(bond, slave, iter) {
@@ -4221,6 +4225,9 @@ static int bond_open(struct net_device *bond_dev)
                /* register to receive LACPDUs */
                bond->recv_probe = bond_3ad_lacpdu_recv;
                bond_3ad_initiate_agg_selection(bond, 1);
+
+               bond_for_each_slave(bond, slave, iter)
+                       dev_mc_add(slave->dev, lacpdu_mcast_addr);
        }
 
        if (bond_mode_can_use_xmit_hash(bond))
@@ -4232,6 +4239,7 @@ static int bond_open(struct net_device *bond_dev)
 static int bond_close(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct slave *slave;
 
        bond_work_cancel_all(bond);
        bond->send_peer_notif = 0;
@@ -4239,6 +4247,19 @@ static int bond_close(struct net_device *bond_dev)
                bond_alb_deinitialize(bond);
        bond->recv_probe = NULL;
 
+       if (bond_uses_primary(bond)) {
+               rcu_read_lock();
+               slave = rcu_dereference(bond->curr_active_slave);
+               if (slave)
+                       bond_hw_addr_flush(bond_dev, slave->dev);
+               rcu_read_unlock();
+       } else {
+               struct list_head *iter;
+
+               bond_for_each_slave(bond, slave, iter)
+                       bond_hw_addr_flush(bond_dev, slave->dev);
+       }
+
        return 0;
 }
 
@@ -6228,15 +6249,6 @@ static int bond_init(struct net_device *bond_dev)
        if (!bond->wq)
                return -ENOMEM;
 
-       if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
-               bond->rr_tx_counter = alloc_percpu(u32);
-               if (!bond->rr_tx_counter) {
-                       destroy_workqueue(bond->wq);
-                       bond->wq = NULL;
-                       return -ENOMEM;
-               }
-       }
-
        spin_lock_init(&bond->stats_lock);
        netdev_lockdep_set_classes(bond_dev);
 
index f857968..ccb438e 100644 (file)
@@ -941,11 +941,6 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
        u32 reg_ctrl, reg_id, reg_iflag1;
        int i;
 
-       if (unlikely(drop)) {
-               skb = ERR_PTR(-ENOBUFS);
-               goto mark_as_read;
-       }
-
        mb = flexcan_get_mb(priv, n);
 
        if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
@@ -974,6 +969,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
                reg_ctrl = priv->read(&mb->can_ctrl);
        }
 
+       if (unlikely(drop)) {
+               skb = ERR_PTR(-ENOBUFS);
+               goto mark_as_read;
+       }
+
        if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
                skb = alloc_canfd_skb(offload->dev, &cfd);
        else
index baf749c..c1ff3c0 100644 (file)
@@ -824,6 +824,7 @@ static int gs_can_open(struct net_device *netdev)
                flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
 
        /* finally start device */
+       dev->can.state = CAN_STATE_ERROR_ACTIVE;
        dm->mode = cpu_to_le32(GS_CAN_MODE_START);
        dm->flags = cpu_to_le32(flags);
        rc = usb_control_msg(interface_to_usbdev(dev->iface),
@@ -835,13 +836,12 @@ static int gs_can_open(struct net_device *netdev)
        if (rc < 0) {
                netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
                kfree(dm);
+               dev->can.state = CAN_STATE_STOPPED;
                return rc;
        }
 
        kfree(dm);
 
-       dev->can.state = CAN_STATE_ERROR_ACTIVE;
-
        parent->active_channels++;
        if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
                netif_start_queue(netdev);
@@ -925,17 +925,21 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
 }
 
 /* blink LED's for finding the this interface */
-static int gs_usb_set_phys_id(struct net_device *dev,
+static int gs_usb_set_phys_id(struct net_device *netdev,
                              enum ethtool_phys_id_state state)
 {
+       const struct gs_can *dev = netdev_priv(netdev);
        int rc = 0;
 
+       if (!(dev->feature & GS_CAN_FEATURE_IDENTIFY))
+               return -EOPNOTSUPP;
+
        switch (state) {
        case ETHTOOL_ID_ACTIVE:
-               rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON);
+               rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_ON);
                break;
        case ETHTOOL_ID_INACTIVE:
-               rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF);
+               rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_OFF);
                break;
        default:
                break;
@@ -1072,9 +1076,10 @@ static struct gs_can *gs_make_candev(unsigned int channel,
                dev->feature |= GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX |
                        GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO;
 
-       if (le32_to_cpu(dconf->sw_version) > 1)
-               if (feature & GS_CAN_FEATURE_IDENTIFY)
-                       netdev->ethtool_ops = &gs_usb_ethtool_ops;
+       /* GS_CAN_FEATURE_IDENTIFY is only supported for sw_version > 1 */
+       if (!(le32_to_cpu(dconf->sw_version) > 1 &&
+             feature & GS_CAN_FEATURE_IDENTIFY))
+               dev->feature &= ~GS_CAN_FEATURE_IDENTIFY;
 
        kfree(bt_const);
 
index daedd2b..5579644 100644 (file)
@@ -244,10 +244,6 @@ void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port)
                lan937x_port_cfg(dev, port, REG_PORT_CTRL_0,
                                 PORT_TAIL_TAG_ENABLE, true);
 
-       /* disable frame check length field */
-       lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0, PORT_CHECK_LENGTH,
-                        false);
-
        /* set back pressure for half duplex */
        lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE,
                         true);
index 8859586..8a0af37 100644 (file)
@@ -94,11 +94,8 @@ static int aq_ndev_close(struct net_device *ndev)
        int err = 0;
 
        err = aq_nic_stop(aq_nic);
-       if (err < 0)
-               goto err_exit;
        aq_nic_deinit(aq_nic, true);
 
-err_exit:
        return err;
 }
 
index f46eefb..96da0ba 100644 (file)
@@ -659,7 +659,6 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
 
        for (i = 0; i < nr_pkts; i++) {
                struct bnxt_sw_tx_bd *tx_buf;
-               bool compl_deferred = false;
                struct sk_buff *skb;
                int j, last;
 
@@ -668,6 +667,8 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
                skb = tx_buf->skb;
                tx_buf->skb = NULL;
 
+               tx_bytes += skb->len;
+
                if (tx_buf->is_push) {
                        tx_buf->is_push = 0;
                        goto next_tx_int;
@@ -688,8 +689,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
                }
                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
                        if (bp->flags & BNXT_FLAG_CHIP_P5) {
+                               /* PTP worker takes ownership of the skb */
                                if (!bnxt_get_tx_ts_p5(bp, skb))
-                                       compl_deferred = true;
+                                       skb = NULL;
                                else
                                        atomic_inc(&bp->ptp_cfg->tx_avail);
                        }
@@ -698,9 +700,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
 next_tx_int:
                cons = NEXT_TX(cons);
 
-               tx_bytes += skb->len;
-               if (!compl_deferred)
-                       dev_kfree_skb_any(skb);
+               dev_kfree_skb_any(skb);
        }
 
        netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
index 7f3c087..8e31636 100644 (file)
@@ -317,9 +317,9 @@ void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
 
        if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters &
            (PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
-            PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE))) {
+            PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE))) {
                ptp->tstamp_filters &= ~(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
-                                        PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE);
+                                        PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE);
                netdev_warn(bp->dev, "Unsupported FW for all RX pkts timestamp filter\n");
        }
 
index a139f2e..e0e8dfd 100644 (file)
@@ -9,7 +9,6 @@ fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
 
 obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
 fsl-enetc-vf-y := enetc_vf.o $(common-objs)
-fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
 
 obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o
 fsl-enetc-ierb-y := enetc_ierb.o
index 4470a4a..9f5b921 100644 (file)
@@ -2432,7 +2432,7 @@ int enetc_close(struct net_device *ndev)
        return 0;
 }
 
-static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
 {
        struct enetc_ndev_priv *priv = netdev_priv(ndev);
        struct tc_mqprio_qopt *mqprio = type_data;
@@ -2486,25 +2486,6 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
        return 0;
 }
 
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
-                  void *type_data)
-{
-       switch (type) {
-       case TC_SETUP_QDISC_MQPRIO:
-               return enetc_setup_tc_mqprio(ndev, type_data);
-       case TC_SETUP_QDISC_TAPRIO:
-               return enetc_setup_tc_taprio(ndev, type_data);
-       case TC_SETUP_QDISC_CBS:
-               return enetc_setup_tc_cbs(ndev, type_data);
-       case TC_SETUP_QDISC_ETF:
-               return enetc_setup_tc_txtime(ndev, type_data);
-       case TC_SETUP_BLOCK:
-               return enetc_setup_tc_psfp(ndev, type_data);
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
 static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
                                struct netlink_ext_ack *extack)
 {
@@ -2600,29 +2581,6 @@ static int enetc_set_rss(struct net_device *ndev, int en)
        return 0;
 }
 
-static int enetc_set_psfp(struct net_device *ndev, int en)
-{
-       struct enetc_ndev_priv *priv = netdev_priv(ndev);
-       int err;
-
-       if (en) {
-               err = enetc_psfp_enable(priv);
-               if (err)
-                       return err;
-
-               priv->active_offloads |= ENETC_F_QCI;
-               return 0;
-       }
-
-       err = enetc_psfp_disable(priv);
-       if (err)
-               return err;
-
-       priv->active_offloads &= ~ENETC_F_QCI;
-
-       return 0;
-}
-
 static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
 {
        struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -2641,11 +2599,9 @@ static void enetc_enable_txvlan(struct net_device *ndev, bool en)
                enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
 }
 
-int enetc_set_features(struct net_device *ndev,
-                      netdev_features_t features)
+void enetc_set_features(struct net_device *ndev, netdev_features_t features)
 {
        netdev_features_t changed = ndev->features ^ features;
-       int err = 0;
 
        if (changed & NETIF_F_RXHASH)
                enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
@@ -2657,11 +2613,6 @@ int enetc_set_features(struct net_device *ndev,
        if (changed & NETIF_F_HW_VLAN_CTAG_TX)
                enetc_enable_txvlan(ndev,
                                    !!(features & NETIF_F_HW_VLAN_CTAG_TX));
-
-       if (changed & NETIF_F_HW_TC)
-               err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
-
-       return err;
 }
 
 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
index 29922c2..2cfe694 100644 (file)
@@ -393,11 +393,9 @@ void enetc_start(struct net_device *ndev);
 void enetc_stop(struct net_device *ndev);
 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
 struct net_device_stats *enetc_get_stats(struct net_device *ndev);
-int enetc_set_features(struct net_device *ndev,
-                      netdev_features_t features);
+void enetc_set_features(struct net_device *ndev, netdev_features_t features);
 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
-                  void *type_data);
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
 int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
 int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
                   struct xdp_frame **frames, u32 flags);
@@ -465,6 +463,7 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
 int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
 int enetc_psfp_init(struct enetc_ndev_priv *priv);
 int enetc_psfp_clean(struct enetc_ndev_priv *priv);
+int enetc_set_psfp(struct net_device *ndev, bool en);
 
 static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
 {
@@ -540,4 +539,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
 {
        return 0;
 }
+
+static inline int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+       return 0;
+}
 #endif
index c4a0e83..bb77502 100644 (file)
@@ -709,6 +709,13 @@ static int enetc_pf_set_features(struct net_device *ndev,
 {
        netdev_features_t changed = ndev->features ^ features;
        struct enetc_ndev_priv *priv = netdev_priv(ndev);
+       int err;
+
+       if (changed & NETIF_F_HW_TC) {
+               err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
+               if (err)
+                       return err;
+       }
 
        if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
                struct enetc_pf *pf = enetc_si_priv(priv->si);
@@ -722,7 +729,28 @@ static int enetc_pf_set_features(struct net_device *ndev,
        if (changed & NETIF_F_LOOPBACK)
                enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
 
-       return enetc_set_features(ndev, features);
+       enetc_set_features(ndev, features);
+
+       return 0;
+}
+
+static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+                            void *type_data)
+{
+       switch (type) {
+       case TC_SETUP_QDISC_MQPRIO:
+               return enetc_setup_tc_mqprio(ndev, type_data);
+       case TC_SETUP_QDISC_TAPRIO:
+               return enetc_setup_tc_taprio(ndev, type_data);
+       case TC_SETUP_QDISC_CBS:
+               return enetc_setup_tc_cbs(ndev, type_data);
+       case TC_SETUP_QDISC_ETF:
+               return enetc_setup_tc_txtime(ndev, type_data);
+       case TC_SETUP_BLOCK:
+               return enetc_setup_tc_psfp(ndev, type_data);
+       default:
+               return -EOPNOTSUPP;
+       }
 }
 
 static const struct net_device_ops enetc_ndev_ops = {
@@ -739,7 +767,7 @@ static const struct net_device_ops enetc_ndev_ops = {
        .ndo_set_vf_spoofchk    = enetc_pf_set_vf_spoofchk,
        .ndo_set_features       = enetc_pf_set_features,
        .ndo_eth_ioctl          = enetc_ioctl,
-       .ndo_setup_tc           = enetc_setup_tc,
+       .ndo_setup_tc           = enetc_pf_setup_tc,
        .ndo_bpf                = enetc_setup_bpf,
        .ndo_xdp_xmit           = enetc_xdp_xmit,
 };
index 582a663..f8a2f02 100644 (file)
@@ -1517,6 +1517,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
        }
 }
 
+int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+       struct enetc_ndev_priv *priv = netdev_priv(ndev);
+       int err;
+
+       if (en) {
+               err = enetc_psfp_enable(priv);
+               if (err)
+                       return err;
+
+               priv->active_offloads |= ENETC_F_QCI;
+               return 0;
+       }
+
+       err = enetc_psfp_disable(priv);
+       if (err)
+               return err;
+
+       priv->active_offloads &= ~ENETC_F_QCI;
+
+       return 0;
+}
+
 int enetc_psfp_init(struct enetc_ndev_priv *priv)
 {
        if (epsfp.psfp_sfi_bitmap)
index 1792430..dfcaac3 100644 (file)
@@ -88,7 +88,20 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
 static int enetc_vf_set_features(struct net_device *ndev,
                                 netdev_features_t features)
 {
-       return enetc_set_features(ndev, features);
+       enetc_set_features(ndev, features);
+
+       return 0;
+}
+
+static int enetc_vf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+                            void *type_data)
+{
+       switch (type) {
+       case TC_SETUP_QDISC_MQPRIO:
+               return enetc_setup_tc_mqprio(ndev, type_data);
+       default:
+               return -EOPNOTSUPP;
+       }
 }
 
 /* Probing/ Init */
@@ -100,7 +113,7 @@ static const struct net_device_ops enetc_ndev_ops = {
        .ndo_set_mac_address    = enetc_vf_set_mac_addr,
        .ndo_set_features       = enetc_vf_set_features,
        .ndo_eth_ioctl          = enetc_ioctl,
-       .ndo_setup_tc           = enetc_setup_tc,
+       .ndo_setup_tc           = enetc_vf_setup_tc,
 };
 
 static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
index d77ee89..a5fed00 100644 (file)
@@ -561,6 +561,7 @@ struct fec_enet_private {
        struct clk *clk_2x_txclk;
 
        bool ptp_clk_on;
+       struct mutex ptp_clk_mutex;
        unsigned int num_tx_queues;
        unsigned int num_rx_queues;
 
@@ -638,13 +639,6 @@ struct fec_enet_private {
        int pps_enable;
        unsigned int next_counter;
 
-       struct {
-               struct timespec64 ts_phc;
-               u64 ns_sys;
-               u32 at_corr;
-               u8 at_inc_corr;
-       } ptp_saved_state;
-
        u64 ethtool_stats[];
 };
 
@@ -655,8 +649,5 @@ void fec_ptp_disable_hwts(struct net_device *ndev);
 int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
 int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
 
-void fec_ptp_save_state(struct fec_enet_private *fep);
-int fec_ptp_restore_state(struct fec_enet_private *fep);
-
 /****************************************************************************/
 #endif /* FEC_H */
index 6152f6d..92c55e1 100644 (file)
@@ -286,11 +286,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
 #define FEC_MMFR_TA            (2 << 16)
 #define FEC_MMFR_DATA(v)       (v & 0xffff)
 /* FEC ECR bits definition */
-#define FEC_ECR_RESET   BIT(0)
-#define FEC_ECR_ETHEREN BIT(1)
-#define FEC_ECR_MAGICEN BIT(2)
-#define FEC_ECR_SLEEP   BIT(3)
-#define FEC_ECR_EN1588  BIT(4)
+#define FEC_ECR_MAGICEN                (1 << 2)
+#define FEC_ECR_SLEEP          (1 << 3)
 
 #define FEC_MII_TIMEOUT                30000 /* us */
 
@@ -986,9 +983,6 @@ fec_restart(struct net_device *ndev)
        u32 temp_mac[2];
        u32 rcntl = OPT_FRAME_SIZE | 0x04;
        u32 ecntl = 0x2; /* ETHEREN */
-       struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS };
-
-       fec_ptp_save_state(fep);
 
        /* Whack a reset.  We should wait for this.
         * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
@@ -1142,7 +1136,7 @@ fec_restart(struct net_device *ndev)
        }
 
        if (fep->bufdesc_ex)
-               ecntl |= FEC_ECR_EN1588;
+               ecntl |= (1 << 4);
 
        if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
            fep->rgmii_txc_dly)
@@ -1163,14 +1157,6 @@ fec_restart(struct net_device *ndev)
        if (fep->bufdesc_ex)
                fec_ptp_start_cyclecounter(ndev);
 
-       /* Restart PPS if needed */
-       if (fep->pps_enable) {
-               /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */
-               fep->pps_enable = 0;
-               fec_ptp_restore_state(fep);
-               fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1);
-       }
-
        /* Enable interrupts we wish to service */
        if (fep->link)
                writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1221,8 +1207,6 @@ fec_stop(struct net_device *ndev)
        struct fec_enet_private *fep = netdev_priv(ndev);
        u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
        u32 val;
-       struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS };
-       u32 ecntl = 0;
 
        /* We cannot expect a graceful transmit stop without link !!! */
        if (fep->link) {
@@ -1232,8 +1216,6 @@ fec_stop(struct net_device *ndev)
                        netdev_err(ndev, "Graceful transmit stop did not complete!\n");
        }
 
-       fec_ptp_save_state(fep);
-
        /* Whack a reset.  We should wait for this.
         * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
         * instead of reset MAC itself.
@@ -1253,28 +1235,12 @@ fec_stop(struct net_device *ndev)
        writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
        writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 
-       if (fep->bufdesc_ex)
-               ecntl |= FEC_ECR_EN1588;
-
        /* We have to keep ENET enabled to have MII interrupt stay working */
        if (fep->quirks & FEC_QUIRK_ENET_MAC &&
                !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
-               ecntl |= FEC_ECR_ETHEREN;
+               writel(2, fep->hwp + FEC_ECNTRL);
                writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
        }
-
-       writel(ecntl, fep->hwp + FEC_ECNTRL);
-
-       if (fep->bufdesc_ex)
-               fec_ptp_start_cyclecounter(ndev);
-
-       /* Restart PPS if needed */
-       if (fep->pps_enable) {
-               /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */
-               fep->pps_enable = 0;
-               fec_ptp_restore_state(fep);
-               fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1);
-       }
 }
 
 
@@ -2029,7 +1995,6 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
 static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
-       unsigned long flags;
        int ret;
 
        if (enable) {
@@ -2038,15 +2003,15 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
                        return ret;
 
                if (fep->clk_ptp) {
-                       spin_lock_irqsave(&fep->tmreg_lock, flags);
+                       mutex_lock(&fep->ptp_clk_mutex);
                        ret = clk_prepare_enable(fep->clk_ptp);
                        if (ret) {
-                               spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+                               mutex_unlock(&fep->ptp_clk_mutex);
                                goto failed_clk_ptp;
                        } else {
                                fep->ptp_clk_on = true;
                        }
-                       spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+                       mutex_unlock(&fep->ptp_clk_mutex);
                }
 
                ret = clk_prepare_enable(fep->clk_ref);
@@ -2061,10 +2026,10 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
        } else {
                clk_disable_unprepare(fep->clk_enet_out);
                if (fep->clk_ptp) {
-                       spin_lock_irqsave(&fep->tmreg_lock, flags);
+                       mutex_lock(&fep->ptp_clk_mutex);
                        clk_disable_unprepare(fep->clk_ptp);
                        fep->ptp_clk_on = false;
-                       spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+                       mutex_unlock(&fep->ptp_clk_mutex);
                }
                clk_disable_unprepare(fep->clk_ref);
                clk_disable_unprepare(fep->clk_2x_txclk);
@@ -2077,10 +2042,10 @@ failed_clk_2x_txclk:
                clk_disable_unprepare(fep->clk_ref);
 failed_clk_ref:
        if (fep->clk_ptp) {
-               spin_lock_irqsave(&fep->tmreg_lock, flags);
+               mutex_lock(&fep->ptp_clk_mutex);
                clk_disable_unprepare(fep->clk_ptp);
                fep->ptp_clk_on = false;
-               spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+               mutex_unlock(&fep->ptp_clk_mutex);
        }
 failed_clk_ptp:
        clk_disable_unprepare(fep->clk_enet_out);
@@ -3915,7 +3880,7 @@ fec_probe(struct platform_device *pdev)
        }
 
        fep->ptp_clk_on = false;
-       spin_lock_init(&fep->tmreg_lock);
+       mutex_init(&fep->ptp_clk_mutex);
 
        /* clk_ref is optional, depends on board */
        fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
index 8dd5a26..3dc3c0b 100644 (file)
@@ -365,19 +365,21 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
  */
 static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 {
-       struct fec_enet_private *fep =
+       struct fec_enet_private *adapter =
            container_of(ptp, struct fec_enet_private, ptp_caps);
        u64 ns;
        unsigned long flags;
 
-       spin_lock_irqsave(&fep->tmreg_lock, flags);
+       mutex_lock(&adapter->ptp_clk_mutex);
        /* Check the ptp clock */
-       if (!fep->ptp_clk_on) {
-               spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+       if (!adapter->ptp_clk_on) {
+               mutex_unlock(&adapter->ptp_clk_mutex);
                return -EINVAL;
        }
-       ns = timecounter_read(&fep->tc);
-       spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+       spin_lock_irqsave(&adapter->tmreg_lock, flags);
+       ns = timecounter_read(&adapter->tc);
+       spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+       mutex_unlock(&adapter->ptp_clk_mutex);
 
        *ts = ns_to_timespec64(ns);
 
@@ -402,10 +404,10 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
        unsigned long flags;
        u32 counter;
 
-       spin_lock_irqsave(&fep->tmreg_lock, flags);
+       mutex_lock(&fep->ptp_clk_mutex);
        /* Check the ptp clock */
        if (!fep->ptp_clk_on) {
-               spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+               mutex_unlock(&fep->ptp_clk_mutex);
                return -EINVAL;
        }
 
@@ -415,9 +417,11 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
         */
        counter = ns & fep->cc.mask;
 
+       spin_lock_irqsave(&fep->tmreg_lock, flags);
        writel(counter, fep->hwp + FEC_ATIME);
        timecounter_init(&fep->tc, &fep->cc, ns);
        spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+       mutex_unlock(&fep->ptp_clk_mutex);
        return 0;
 }
 
@@ -514,11 +518,13 @@ static void fec_time_keep(struct work_struct *work)
        struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
        unsigned long flags;
 
-       spin_lock_irqsave(&fep->tmreg_lock, flags);
+       mutex_lock(&fep->ptp_clk_mutex);
        if (fep->ptp_clk_on) {
+               spin_lock_irqsave(&fep->tmreg_lock, flags);
                timecounter_read(&fep->tc);
+               spin_unlock_irqrestore(&fep->tmreg_lock, flags);
        }
-       spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+       mutex_unlock(&fep->ptp_clk_mutex);
 
        schedule_delayed_work(&fep->time_keep, HZ);
 }
@@ -593,6 +599,8 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
        }
        fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
 
+       spin_lock_init(&fep->tmreg_lock);
+
        fec_ptp_start_cyclecounter(ndev);
 
        INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
@@ -625,36 +633,7 @@ void fec_ptp_stop(struct platform_device *pdev)
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
-       if (fep->pps_enable)
-               fec_ptp_enable_pps(fep, 0);
-
        cancel_delayed_work_sync(&fep->time_keep);
        if (fep->ptp_clock)
                ptp_clock_unregister(fep->ptp_clock);
 }
-
-void fec_ptp_save_state(struct fec_enet_private *fep)
-{
-       u32 atime_inc_corr;
-
-       fec_ptp_gettime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc);
-       fep->ptp_saved_state.ns_sys = ktime_get_ns();
-
-       fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR);
-       atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK;
-       fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET);
-}
-
-int fec_ptp_restore_state(struct fec_enet_private *fep)
-{
-       u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
-       u64 ns_sys;
-
-       writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR);
-       atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET;
-       writel(atime_inc, fep->hwp + FEC_ATIME_INC);
-
-       ns_sys = ktime_get_ns() - fep->ptp_saved_state.ns_sys;
-       timespec64_add_ns(&fep->ptp_saved_state.ts_phc, ns_sys);
-       return fec_ptp_settime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc);
-}
index 8c93962..2e6461b 100644 (file)
@@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv,
        int err;
 
        err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
-                            &buf_state->addr, DMA_FROM_DEVICE, GFP_KERNEL);
+                            &buf_state->addr, DMA_FROM_DEVICE, GFP_ATOMIC);
        if (err)
                return err;
 
index 10c1e1e..e3d9804 100644 (file)
@@ -5909,6 +5909,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi)
 }
 
 /**
+ * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
+ * @vsi: Pointer to vsi structure
+ * @max_tx_rate: max TX rate in bytes to be converted into Mbits
+ *
+ * Helper function to convert units before send to set BW limit
+ **/
+static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
+{
+       if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
+               dev_warn(&vsi->back->pdev->dev,
+                        "Setting max tx rate to minimum usable value of 50Mbps.\n");
+               max_tx_rate = I40E_BW_CREDIT_DIVISOR;
+       } else {
+               do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
+       }
+
+       return max_tx_rate;
+}
+
+/**
  * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
  * @vsi: VSI to be configured
  * @seid: seid of the channel/VSI
@@ -5930,10 +5950,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
                        max_tx_rate, seid);
                return -EINVAL;
        }
-       if (max_tx_rate && max_tx_rate < 50) {
+       if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
                dev_warn(&pf->pdev->dev,
                         "Setting max tx rate to minimum usable value of 50Mbps.\n");
-               max_tx_rate = 50;
+               max_tx_rate = I40E_BW_CREDIT_DIVISOR;
        }
 
        /* Tx rate credits are in values of 50Mbps, 0 is disabled */
@@ -8224,9 +8244,9 @@ config_tc:
 
        if (i40e_is_tc_mqprio_enabled(pf)) {
                if (vsi->mqprio_qopt.max_rate[0]) {
-                       u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+                       u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+                                                 vsi->mqprio_qopt.max_rate[0]);
 
-                       do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
                        ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
                        if (!ret) {
                                u64 credits = max_tx_rate;
@@ -10971,10 +10991,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
        }
 
        if (vsi->mqprio_qopt.max_rate[0]) {
-               u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+               u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+                                                 vsi->mqprio_qopt.max_rate[0]);
                u64 credits = 0;
 
-               do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
                ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
                if (ret)
                        goto end_unlock;
index 4f184c5..7e9f6a6 100644 (file)
@@ -2039,6 +2039,25 @@ static void i40e_del_qch(struct i40e_vf *vf)
 }
 
 /**
+ * i40e_vc_get_max_frame_size
+ * @vf: pointer to the VF
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ **/
+static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
+{
+       u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
+
+       if (vf->port_vlan_id)
+               max_frame_size -= VLAN_HLEN;
+
+       return max_frame_size;
+}
+
+/**
  * i40e_vc_get_vf_resources_msg
  * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
@@ -2139,6 +2158,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
        vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
        vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
        vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+       vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
 
        if (vf->lan_vsi_idx) {
                vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
index 10aa99d..0c89f16 100644 (file)
@@ -1077,7 +1077,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
 {
        struct iavf_adapter *adapter = netdev_priv(netdev);
        struct sockaddr *addr = p;
-       bool handle_mac = iavf_is_mac_set_handled(netdev, addr->sa_data);
        int ret;
 
        if (!is_valid_ether_addr(addr->sa_data))
@@ -1094,10 +1093,9 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
                return 0;
        }
 
-       if (handle_mac)
-               goto done;
-
-       ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, false, msecs_to_jiffies(2500));
+       ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
+                                              iavf_is_mac_set_handled(netdev, addr->sa_data),
+                                              msecs_to_jiffies(2500));
 
        /* If ret < 0 then it means wait was interrupted.
         * If ret == 0 then it means we got a timeout.
@@ -1111,7 +1109,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
        if (!ret)
                return -EAGAIN;
 
-done:
        if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
                return -EACCES;
 
index 06d1879..18b6a70 100644 (file)
@@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
 {
        u32 head, tail;
 
+       /* underlying hardware might not allow access and/or always return
+        * 0 for the head/tail registers so just use the cached values
+        */
        head = ring->next_to_clean;
-       tail = readl(ring->tail);
+       tail = ring->next_to_use;
 
        if (head != tail)
                return (head < tail) ?
@@ -1390,7 +1393,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
 #endif
        struct sk_buff *skb;
 
-       if (!rx_buffer)
+       if (!rx_buffer || !size)
                return NULL;
        /* prefetch first cache line of first page */
        va = page_address(rx_buffer->page) + rx_buffer->page_offset;
@@ -1548,7 +1551,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
                /* exit if we failed to retrieve a buffer */
                if (!skb) {
                        rx_ring->rx_stats.alloc_buff_failed++;
-                       if (rx_buffer)
+                       if (rx_buffer && size)
                                rx_buffer->pagecnt_bias++;
                        break;
                }
index 15ee85d..5a9e656 100644 (file)
@@ -269,11 +269,14 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
 void iavf_configure_queues(struct iavf_adapter *adapter)
 {
        struct virtchnl_vsi_queue_config_info *vqci;
-       struct virtchnl_queue_pair_info *vqpi;
+       int i, max_frame = adapter->vf_res->max_mtu;
        int pairs = adapter->num_active_queues;
-       int i, max_frame = IAVF_MAX_RXBUFFER;
+       struct virtchnl_queue_pair_info *vqpi;
        size_t len;
 
+       if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
+               max_frame = IAVF_MAX_RXBUFFER;
+
        if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
                /* bail because we already have a command pending */
                dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
index 0c4ec92..58d483e 100644 (file)
@@ -914,7 +914,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
  */
 static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
 {
-       u16 offset = 0, qmap = 0, tx_count = 0, pow = 0;
+       u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
        u16 num_txq_per_tc, num_rxq_per_tc;
        u16 qcount_tx = vsi->alloc_txq;
        u16 qcount_rx = vsi->alloc_rxq;
@@ -981,23 +981,25 @@ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
         * at least 1)
         */
        if (offset)
-               vsi->num_rxq = offset;
+               rx_count = offset;
        else
-               vsi->num_rxq = num_rxq_per_tc;
+               rx_count = num_rxq_per_tc;
 
-       if (vsi->num_rxq > vsi->alloc_rxq) {
+       if (rx_count > vsi->alloc_rxq) {
                dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
-                       vsi->num_rxq, vsi->alloc_rxq);
+                       rx_count, vsi->alloc_rxq);
                return -EINVAL;
        }
 
-       vsi->num_txq = tx_count;
-       if (vsi->num_txq > vsi->alloc_txq) {
+       if (tx_count > vsi->alloc_txq) {
                dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
-                       vsi->num_txq, vsi->alloc_txq);
+                       tx_count, vsi->alloc_txq);
                return -EINVAL;
        }
 
+       vsi->num_txq = tx_count;
+       vsi->num_rxq = rx_count;
+
        if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
                dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
                /* since there is a chance that num_rxq could have been changed
@@ -3490,6 +3492,7 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
        u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
        u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
        int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
+       u16 new_txq, new_rxq;
        u8 netdev_tc = 0;
        int i;
 
@@ -3530,21 +3533,24 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
                }
        }
 
-       /* Set actual Tx/Rx queue pairs */
-       vsi->num_txq = offset + qcount_tx;
-       if (vsi->num_txq > vsi->alloc_txq) {
+       new_txq = offset + qcount_tx;
+       if (new_txq > vsi->alloc_txq) {
                dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
-                       vsi->num_txq, vsi->alloc_txq);
+                       new_txq, vsi->alloc_txq);
                return -EINVAL;
        }
 
-       vsi->num_rxq = offset + qcount_rx;
-       if (vsi->num_rxq > vsi->alloc_rxq) {
+       new_rxq = offset + qcount_rx;
+       if (new_rxq > vsi->alloc_rxq) {
                dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
-                       vsi->num_rxq, vsi->alloc_rxq);
+                       new_rxq, vsi->alloc_rxq);
                return -EINVAL;
        }
 
+       /* Set actual Tx/Rx queue pairs */
+       vsi->num_txq = new_txq;
+       vsi->num_rxq = new_rxq;
+
        /* Setup queue TC[0].qmap for given VSI context */
        ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
        ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
@@ -3576,6 +3582,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
 {
        u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
        struct ice_pf *pf = vsi->back;
+       struct ice_tc_cfg old_tc_cfg;
        struct ice_vsi_ctx *ctx;
        struct device *dev;
        int i, ret = 0;
@@ -3600,6 +3607,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
                        max_txqs[i] = vsi->num_txq;
        }
 
+       memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
        vsi->tc_cfg.ena_tc = ena_tc;
        vsi->tc_cfg.numtc = num_tc;
 
@@ -3616,8 +3624,10 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
        else
                ret = ice_vsi_setup_q_map(vsi, ctx);
 
-       if (ret)
+       if (ret) {
+               memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
                goto out;
+       }
 
        /* must to indicate which section of VSI context are being modified */
        ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
index 8c30eea..e109cb9 100644 (file)
@@ -2399,8 +2399,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
                return -EBUSY;
        }
 
-       ice_unplug_aux_dev(pf);
-
        switch (reset) {
        case ICE_RESET_PFR:
                set_bit(ICE_PFR_REQ, pf->state);
@@ -6651,7 +6649,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
  */
 int ice_down(struct ice_vsi *vsi)
 {
-       int i, tx_err, rx_err, link_err = 0, vlan_err = 0;
+       int i, tx_err, rx_err, vlan_err = 0;
 
        WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
 
@@ -6685,20 +6683,13 @@ int ice_down(struct ice_vsi *vsi)
 
        ice_napi_disable_all(vsi);
 
-       if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
-               link_err = ice_force_phys_link_state(vsi, false);
-               if (link_err)
-                       netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
-                                  vsi->vsi_num, link_err);
-       }
-
        ice_for_each_txq(vsi, i)
                ice_clean_tx_ring(vsi->tx_rings[i]);
 
        ice_for_each_rxq(vsi, i)
                ice_clean_rx_ring(vsi->rx_rings[i]);
 
-       if (tx_err || rx_err || link_err || vlan_err) {
+       if (tx_err || rx_err || vlan_err) {
                netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
                           vsi->vsi_num, vsi->vsw->sw_id);
                return -EIO;
@@ -6860,6 +6851,8 @@ int ice_vsi_open(struct ice_vsi *vsi)
        if (err)
                goto err_setup_rx;
 
+       ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
+
        if (vsi->type == ICE_VSI_PF) {
                /* Notify the stack of the actual queue counts. */
                err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
@@ -8892,6 +8885,16 @@ int ice_stop(struct net_device *netdev)
                return -EBUSY;
        }
 
+       if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
+               int link_err = ice_force_phys_link_state(vsi, false);
+
+               if (link_err) {
+                       netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
+                                  vsi->vsi_num, link_err);
+                       return -EIO;
+               }
+       }
+
        ice_vsi_close(vsi);
 
        return 0;
index 836dce8..97453d1 100644 (file)
@@ -610,7 +610,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
        if (test_bit(ICE_VSI_DOWN, vsi->state))
                return -ENETDOWN;
 
-       if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
+       if (!ice_is_xdp_ena_vsi(vsi))
                return -ENXIO;
 
        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -621,6 +621,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
                xdp_ring = vsi->xdp_rings[queue_index];
                spin_lock(&xdp_ring->tx_lock);
        } else {
+               /* Generally, should not happen */
+               if (unlikely(queue_index >= vsi->num_xdp_txq))
+                       return -ENXIO;
                xdp_ring = vsi->xdp_rings[queue_index];
        }
 
index ede3e53..a895862 100644 (file)
@@ -368,6 +368,7 @@ static int prestera_port_sfp_bind(struct prestera_port *port)
        if (!sw->np)
                return 0;
 
+       of_node_get(sw->np);
        ports = of_find_node_by_name(sw->np, "ports");
 
        for_each_child_of_node(ports, node) {
@@ -417,6 +418,7 @@ static int prestera_port_sfp_bind(struct prestera_port *port)
        }
 
 out:
+       of_node_put(node);
        of_node_put(ports);
        return err;
 }
index f538a74..59470d9 100644 (file)
@@ -872,6 +872,7 @@ static void prestera_pci_remove(struct pci_dev *pdev)
 static const struct pci_device_id prestera_pci_devices[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC804) },
        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC80C) },
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xCC1E) },
        { }
 };
 MODULE_DEVICE_TABLE(pci, prestera_pci_devices);
index 5ace460..b344632 100644 (file)
@@ -1458,7 +1458,7 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
 
 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
 {
-       return !eth->hwlro;
+       return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
 }
 
 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
index 85155cd..4aeb927 100644 (file)
@@ -179,6 +179,9 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
        /* Only return ad bits of the gw register */
        ret &= MLXBF_GIGE_MDIO_GW_AD_MASK;
 
+       /* The MDIO lock is set on read. To release it, clear gw register */
+       writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+
        return ret;
 }
 
@@ -203,6 +206,9 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
                                        temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK),
                                        5, 1000000);
 
+       /* The MDIO lock is set on read. To release it, clear gw register */
+       writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+
        return ret;
 }
 
index 5f92401..a6f99b4 100644 (file)
@@ -397,6 +397,11 @@ static void mana_gd_process_eq_events(void *arg)
                        break;
                }
 
+               /* Per GDMA spec, rmb is necessary after checking owner_bits, before
+                * reading eqe.
+                */
+               rmb();
+
                mana_gd_process_eqe(eq);
 
                eq->head++;
@@ -1134,6 +1139,11 @@ static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
        if (WARN_ON_ONCE(owner_bits != new_bits))
                return -1;
 
+       /* Per GDMA spec, rmb is necessary after checking owner_bits, before
+        * reading completion info
+        */
+       rmb();
+
        comp->wq_num = cqe->cqe_info.wq_num;
        comp->is_sq = cqe->cqe_info.is_sq;
        memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
@@ -1465,10 +1475,6 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
-#ifndef PCI_VENDOR_ID_MICROSOFT
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#endif
-
 static const struct pci_device_id mana_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) },
        { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) },
index b357ac4..7e32b04 100644 (file)
@@ -1449,6 +1449,8 @@ static int ravb_phy_init(struct net_device *ndev)
                phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
        }
 
+       /* Indicate that the MAC is responsible for managing PHY PM */
+       phydev->mac_managed_pm = true;
        phy_attached_info(phydev);
 
        return 0;
index 67ade78..7fd8828 100644 (file)
@@ -2029,6 +2029,8 @@ static int sh_eth_phy_init(struct net_device *ndev)
        if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
                phy_set_max_speed(phydev, SPEED_100);
 
+       /* Indicate that the MAC is responsible for managing PHY PM */
+       phydev->mac_managed_pm = true;
        phy_attached_info(phydev);
 
        return 0;
index 032b8c0..5b4d661 100644 (file)
@@ -319,7 +319,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
                efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
                efx->n_rx_channels = 1;
                efx->n_tx_channels = 1;
-               efx->tx_channel_offset = 1;
+               efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
                efx->n_xdp_channels = 0;
                efx->xdp_channel_offset = efx->n_channels;
                efx->legacy_irq = efx->pci_dev->irq;
index 017212a..f54ebd0 100644 (file)
@@ -320,7 +320,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
                efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0);
                efx->n_rx_channels = 1;
                efx->n_tx_channels = 1;
-               efx->tx_channel_offset = 1;
+               efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0;
                efx->n_xdp_channels = 0;
                efx->xdp_channel_offset = efx->n_channels;
                efx->legacy_irq = efx->pci_dev->irq;
index e166dcb..91e8759 100644 (file)
@@ -336,7 +336,7 @@ netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb,
                 * previous packets out.
                 */
                if (!netdev_xmit_more())
-                       efx_tx_send_pending(tx_queue->channel);
+                       efx_tx_send_pending(efx_get_tx_channel(efx, index));
                return NETDEV_TX_OK;
        }
 
index d124740..c5f88f7 100644 (file)
@@ -549,7 +549,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
                 * previous packets out.
                 */
                if (!netdev_xmit_more())
-                       efx_tx_send_pending(tx_queue->channel);
+                       efx_tx_send_pending(efx_get_tx_channel(efx, index));
                return NETDEV_TX_OK;
        }
 
index 8594ee8..88aa0d3 100644 (file)
@@ -2020,9 +2020,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
 
                        skb_reserve(copy_skb, 2);
                        skb_put(copy_skb, len);
-                       dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+                       dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
                        skb_copy_from_linear_data(skb, copy_skb->data, len);
-                       dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+                       dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
                        /* Reuse original ring buffer. */
                        hme_write_rxd(hp, this,
                                      (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
index ec010cf..6f874f9 100644 (file)
@@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
        mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
        req.v4_route_tbl_info_valid = 1;
        req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
-       req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
+       req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
 
        mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
        req.v6_route_tbl_info_valid = 1;
        req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
-       req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
+       req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
 
        mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
        req.v4_filter_tbl_start_valid = 1;
@@ -352,7 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
                req.v4_hash_route_tbl_info_valid = 1;
                req.v4_hash_route_tbl_info.start =
                                ipa->mem_offset + mem->offset;
-               req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
+               req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
        }
 
        mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
@@ -360,7 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
                req.v6_hash_route_tbl_info_valid = 1;
                req.v6_hash_route_tbl_info.start =
                        ipa->mem_offset + mem->offset;
-               req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
+               req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
        }
 
        mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
index 6838e80..75d3fc0 100644 (file)
@@ -311,7 +311,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
                .tlv_type       = 0x12,
                .offset         = offsetof(struct ipa_init_modem_driver_req,
                                           v4_route_tbl_info),
-               .ei_array       = ipa_mem_array_ei,
+               .ei_array       = ipa_mem_bounds_ei,
        },
        {
                .data_type      = QMI_OPT_FLAG,
@@ -332,7 +332,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
                .tlv_type       = 0x13,
                .offset         = offsetof(struct ipa_init_modem_driver_req,
                                           v6_route_tbl_info),
-               .ei_array       = ipa_mem_array_ei,
+               .ei_array       = ipa_mem_bounds_ei,
        },
        {
                .data_type      = QMI_OPT_FLAG,
@@ -496,7 +496,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
                .tlv_type       = 0x1b,
                .offset         = offsetof(struct ipa_init_modem_driver_req,
                                           v4_hash_route_tbl_info),
-               .ei_array       = ipa_mem_array_ei,
+               .ei_array       = ipa_mem_bounds_ei,
        },
        {
                .data_type      = QMI_OPT_FLAG,
@@ -517,7 +517,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
                .tlv_type       = 0x1c,
                .offset         = offsetof(struct ipa_init_modem_driver_req,
                                           v6_hash_route_tbl_info),
-               .ei_array       = ipa_mem_array_ei,
+               .ei_array       = ipa_mem_bounds_ei,
        },
        {
                .data_type      = QMI_OPT_FLAG,
index 495e85a..9651aa5 100644 (file)
@@ -86,9 +86,11 @@ enum ipa_platform_type {
        IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01       = 0x5,  /* QNX MSM */
 };
 
-/* This defines the start and end offset of a range of memory.  Both
- * fields are offsets relative to the start of IPA shared memory.
- * The end value is the last addressable byte *within* the range.
+/* This defines the start and end offset of a range of memory.  The start
+ * value is a byte offset relative to the start of IPA shared memory.  The
+ * end value is the last addressable unit *within* the range.  Typically
+ * the end value is in units of bytes, however it can also be a maximum
+ * array index value.
  */
 struct ipa_mem_bounds {
        u32 start;
@@ -129,18 +131,19 @@ struct ipa_init_modem_driver_req {
        u8                      hdr_tbl_info_valid;
        struct ipa_mem_bounds   hdr_tbl_info;
 
-       /* Routing table information.  These define the location and size of
-        * non-hashable IPv4 and IPv6 filter tables.  The start values are
-        * offsets relative to the start of IPA shared memory.
+       /* Routing table information.  These define the location and maximum
+        * *index* (not byte) for the modem portion of non-hashable IPv4 and
+        * IPv6 routing tables.  The start values are byte offsets relative
+        * to the start of IPA shared memory.
         */
        u8                      v4_route_tbl_info_valid;
-       struct ipa_mem_array    v4_route_tbl_info;
+       struct ipa_mem_bounds   v4_route_tbl_info;
        u8                      v6_route_tbl_info_valid;
-       struct ipa_mem_array    v6_route_tbl_info;
+       struct ipa_mem_bounds   v6_route_tbl_info;
 
        /* Filter table information.  These define the location of the
         * non-hashable IPv4 and IPv6 filter tables.  The start values are
-        * offsets relative to the start of IPA shared memory.
+        * byte offsets relative to the start of IPA shared memory.
         */
        u8                      v4_filter_tbl_start_valid;
        u32                     v4_filter_tbl_start;
@@ -181,18 +184,20 @@ struct ipa_init_modem_driver_req {
        u8                      zip_tbl_info_valid;
        struct ipa_mem_bounds   zip_tbl_info;
 
-       /* Routing table information.  These define the location and size
-        * of hashable IPv4 and IPv6 filter tables.  The start values are
-        * offsets relative to the start of IPA shared memory.
+       /* Routing table information.  These define the location and maximum
+        * *index* (not byte) for the modem portion of hashable IPv4 and IPv6
+        * routing tables (if supported by hardware).  The start values are
+        * byte offsets relative to the start of IPA shared memory.
         */
        u8                      v4_hash_route_tbl_info_valid;
-       struct ipa_mem_array    v4_hash_route_tbl_info;
+       struct ipa_mem_bounds   v4_hash_route_tbl_info;
        u8                      v6_hash_route_tbl_info_valid;
-       struct ipa_mem_array    v6_hash_route_tbl_info;
+       struct ipa_mem_bounds   v6_hash_route_tbl_info;
 
        /* Filter table information.  These define the location and size
-        * of hashable IPv4 and IPv6 filter tables.  The start values are
-        * offsets relative to the start of IPA shared memory.
+        * of hashable IPv4 and IPv6 filter tables (if supported by hardware).
+        * The start values are byte offsets relative to the start of IPA
+        * shared memory.
         */
        u8                      v4_hash_filter_tbl_start_valid;
        u32                     v4_hash_filter_tbl_start;
index 2f5a58b..69efe67 100644 (file)
 
 /* Assignment of route table entries to the modem and AP */
 #define IPA_ROUTE_MODEM_MIN            0
-#define IPA_ROUTE_MODEM_COUNT          8
-
 #define IPA_ROUTE_AP_MIN               IPA_ROUTE_MODEM_COUNT
 #define IPA_ROUTE_AP_COUNT \
                (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
index b6a9a0d..1538e2e 100644 (file)
@@ -13,6 +13,9 @@ struct ipa;
 /* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
 #define IPA_FILTER_COUNT_MAX   14
 
+/* The number of route table entries allotted to the modem */
+#define IPA_ROUTE_MODEM_COUNT  8
+
 /* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
 #define IPA_ROUTE_COUNT_MAX    15
 
index dfeb5b3..bb1c298 100644 (file)
@@ -495,7 +495,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
 
 static int ipvlan_process_outbound(struct sk_buff *skb)
 {
-       struct ethhdr *ethh = eth_hdr(skb);
        int ret = NET_XMIT_DROP;
 
        /* The ipvlan is a pseudo-L2 device, so the packets that we receive
@@ -505,6 +504,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
        if (skb_mac_header_was_set(skb)) {
                /* In this mode we dont care about
                 * multicast and broadcast traffic */
+               struct ethhdr *ethh = eth_hdr(skb);
+
                if (is_multicast_ether_addr(ethh->h_dest)) {
                        pr_debug_ratelimited(
                                "Dropped {multi|broad}cast of type=[%x]\n",
@@ -589,7 +590,7 @@ out:
 static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
 {
        const struct ipvl_dev *ipvlan = netdev_priv(dev);
-       struct ethhdr *eth = eth_hdr(skb);
+       struct ethhdr *eth = skb_eth_hdr(skb);
        struct ipvl_addr *addr;
        void *lyr3h;
        int addr_type;
@@ -619,6 +620,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
                return dev_forward_skb(ipvlan->phy_dev, skb);
 
        } else if (is_multicast_ether_addr(eth->h_dest)) {
+               skb_reset_mac_header(skb);
                ipvlan_skb_crossing_ns(skb, NULL);
                ipvlan_multicast_enqueue(ipvlan->port, skb, true);
                return NET_XMIT_SUCCESS;
index 9e3c815..796e9c7 100644 (file)
@@ -231,6 +231,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
        return 0;
 
 unregister:
+       of_node_put(child);
        mdiobus_unregister(mdio);
        return rc;
 }
index 605a38e..0e58aa7 100644 (file)
@@ -433,11 +433,11 @@ int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev)
                goto err_remove_hwstats_recursive;
        }
 
-       debugfs_create_file("enable_ifindex", 0600, hwstats->l3_ddir, hwstats,
+       debugfs_create_file("enable_ifindex", 0200, hwstats->l3_ddir, hwstats,
                            &nsim_dev_hwstats_l3_enable_fops.fops);
-       debugfs_create_file("disable_ifindex", 0600, hwstats->l3_ddir, hwstats,
+       debugfs_create_file("disable_ifindex", 0200, hwstats->l3_ddir, hwstats,
                            &nsim_dev_hwstats_l3_disable_fops.fops);
-       debugfs_create_file("fail_next_enable", 0600, hwstats->l3_ddir, hwstats,
+       debugfs_create_file("fail_next_enable", 0200, hwstats->l3_ddir, hwstats,
                            &nsim_dev_hwstats_l3_fail_fops.fops);
 
        INIT_DELAYED_WORK(&hwstats->traffic_dw,
index 8b7a46d..7111e2e 100644 (file)
@@ -91,6 +91,9 @@
 #define VEND1_GLOBAL_FW_ID_MAJOR               GENMASK(15, 8)
 #define VEND1_GLOBAL_FW_ID_MINOR               GENMASK(7, 0)
 
+#define VEND1_GLOBAL_GEN_STAT2                 0xc831
+#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG      BIT(15)
+
 #define VEND1_GLOBAL_RSVD_STAT1                        0xc885
 #define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID    GENMASK(7, 4)
 #define VEND1_GLOBAL_RSVD_STAT1_PROV_ID                GENMASK(3, 0)
 #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2     BIT(1)
 #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3     BIT(0)
 
+/* Sleep and timeout for checking if the Processor-Intensive
+ * MDIO operation is finished
+ */
+#define AQR107_OP_IN_PROG_SLEEP                1000
+#define AQR107_OP_IN_PROG_TIMEOUT      100000
+
 struct aqr107_hw_stat {
        const char *name;
        int reg;
@@ -597,16 +606,52 @@ static void aqr107_link_change_notify(struct phy_device *phydev)
                phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
 }
 
+static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
+{
+       int val, err;
+
+       /* The datasheet notes to wait at least 1ms after issuing a
+        * processor intensive operation before checking.
+        * We cannot use the 'sleep_before_read' parameter of read_poll_timeout
+        * because that just determines the maximum time slept, not the minimum.
+        */
+       usleep_range(1000, 5000);
+
+       err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+                                       VEND1_GLOBAL_GEN_STAT2, val,
+                                       !(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG),
+                                       AQR107_OP_IN_PROG_SLEEP,
+                                       AQR107_OP_IN_PROG_TIMEOUT, false);
+       if (err) {
+               phydev_err(phydev, "timeout: processor-intensive MDIO operation\n");
+               return err;
+       }
+
+       return 0;
+}
+
 static int aqr107_suspend(struct phy_device *phydev)
 {
-       return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
-                               MDIO_CTRL1_LPOWER);
+       int err;
+
+       err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+                              MDIO_CTRL1_LPOWER);
+       if (err)
+               return err;
+
+       return aqr107_wait_processor_intensive_op(phydev);
 }
 
 static int aqr107_resume(struct phy_device *phydev)
 {
-       return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
-                                 MDIO_CTRL1_LPOWER);
+       int err;
+
+       err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+                                MDIO_CTRL1_LPOWER);
+       if (err)
+               return err;
+
+       return aqr107_wait_processor_intensive_op(phydev);
 }
 
 static int aqr107_probe(struct phy_device *phydev)
index 6f52b4f..38234d7 100644 (file)
@@ -2679,16 +2679,19 @@ static int lan8804_config_init(struct phy_device *phydev)
 static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
 {
        int irq_status, tsu_irq_status;
+       int ret = IRQ_NONE;
 
        irq_status = phy_read(phydev, LAN8814_INTS);
-       if (irq_status > 0 && (irq_status & LAN8814_INT_LINK))
-               phy_trigger_machine(phydev);
-
        if (irq_status < 0) {
                phy_error(phydev);
                return IRQ_NONE;
        }
 
+       if (irq_status & LAN8814_INT_LINK) {
+               phy_trigger_machine(phydev);
+               ret = IRQ_HANDLED;
+       }
+
        while (1) {
                tsu_irq_status = lanphy_read_page_reg(phydev, 4,
                                                      LAN8814_INTR_STS_REG);
@@ -2697,12 +2700,15 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
                    (tsu_irq_status & (LAN8814_INTR_STS_REG_1588_TSU0_ |
                                       LAN8814_INTR_STS_REG_1588_TSU1_ |
                                       LAN8814_INTR_STS_REG_1588_TSU2_ |
-                                      LAN8814_INTR_STS_REG_1588_TSU3_)))
+                                      LAN8814_INTR_STS_REG_1588_TSU3_))) {
                        lan8814_handle_ptp_interrupt(phydev);
-               else
+                       ret = IRQ_HANDLED;
+               } else {
                        break;
+               }
        }
-       return IRQ_HANDLED;
+
+       return ret;
 }
 
 static int lan8814_ack_interrupt(struct phy_device *phydev)
index aac133a..154a3c0 100644 (file)
@@ -1275,10 +1275,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
                }
        }
 
-       netif_addr_lock_bh(dev);
-       dev_uc_sync_multiple(port_dev, dev);
-       dev_mc_sync_multiple(port_dev, dev);
-       netif_addr_unlock_bh(dev);
+       if (dev->flags & IFF_UP) {
+               netif_addr_lock_bh(dev);
+               dev_uc_sync_multiple(port_dev, dev);
+               dev_mc_sync_multiple(port_dev, dev);
+               netif_addr_unlock_bh(dev);
+       }
 
        port->index = -1;
        list_add_tail_rcu(&port->list, &team->port_list);
@@ -1349,8 +1351,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
        netdev_rx_handler_unregister(port_dev);
        team_port_disable_netpoll(port);
        vlan_vids_del_by_dev(port_dev, dev);
-       dev_uc_unsync(port_dev, dev);
-       dev_mc_unsync(port_dev, dev);
+       if (dev->flags & IFF_UP) {
+               dev_uc_unsync(port_dev, dev);
+               dev_mc_unsync(port_dev, dev);
+       }
        dev_close(port_dev);
        team_port_leave(team, port);
 
@@ -1700,6 +1704,14 @@ static int team_open(struct net_device *dev)
 
 static int team_close(struct net_device *dev)
 {
+       struct team *team = netdev_priv(dev);
+       struct team_port *port;
+
+       list_for_each_entry(port, &team->port_list, list) {
+               dev_uc_unsync(port->dev, dev);
+               dev_mc_unsync(port->dev, dev);
+       }
+
        return 0;
 }
 
index d0f3b6d..5c804bc 100644 (file)
@@ -436,14 +436,13 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
        if (attrs[WGPEER_A_ENDPOINT]) {
                struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
                size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
+               struct endpoint endpoint = { { { 0 } } };
 
-               if ((len == sizeof(struct sockaddr_in) &&
-                    addr->sa_family == AF_INET) ||
-                   (len == sizeof(struct sockaddr_in6) &&
-                    addr->sa_family == AF_INET6)) {
-                       struct endpoint endpoint = { { { 0 } } };
-
-                       memcpy(&endpoint.addr, addr, len);
+               if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) {
+                       endpoint.addr4 = *(struct sockaddr_in *)addr;
+                       wg_socket_set_peer_endpoint(peer, &endpoint);
+               } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) {
+                       endpoint.addr6 = *(struct sockaddr_in6 *)addr;
                        wg_socket_set_peer_endpoint(peer, &endpoint);
                }
        }
index ba87d29..d4bb40a 100644 (file)
@@ -6,29 +6,28 @@
 #ifdef DEBUG
 
 #include <linux/jiffies.h>
-#include <linux/hrtimer.h>
 
 static const struct {
        bool result;
-       u64 nsec_to_sleep_before;
+       unsigned int msec_to_sleep_before;
 } expected_results[] __initconst = {
        [0 ... PACKETS_BURSTABLE - 1] = { true, 0 },
        [PACKETS_BURSTABLE] = { false, 0 },
-       [PACKETS_BURSTABLE + 1] = { true, NSEC_PER_SEC / PACKETS_PER_SECOND },
+       [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND },
        [PACKETS_BURSTABLE + 2] = { false, 0 },
-       [PACKETS_BURSTABLE + 3] = { true, (NSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
+       [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
        [PACKETS_BURSTABLE + 4] = { true, 0 },
        [PACKETS_BURSTABLE + 5] = { false, 0 }
 };
 
 static __init unsigned int maximum_jiffies_at_index(int index)
 {
-       u64 total_nsecs = 2 * NSEC_PER_SEC / PACKETS_PER_SECOND / 3;
+       unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3;
        int i;
 
        for (i = 0; i <= index; ++i)
-               total_nsecs += expected_results[i].nsec_to_sleep_before;
-       return nsecs_to_jiffies(total_nsecs);
+               total_msecs += expected_results[i].msec_to_sleep_before;
+       return msecs_to_jiffies(total_msecs);
 }
 
 static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
@@ -43,12 +42,8 @@ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
        loop_start_time = jiffies;
 
        for (i = 0; i < ARRAY_SIZE(expected_results); ++i) {
-               if (expected_results[i].nsec_to_sleep_before) {
-                       ktime_t timeout = ktime_add(ktime_add_ns(ktime_get_coarse_boottime(), TICK_NSEC * 4 / 3),
-                                                   ns_to_ktime(expected_results[i].nsec_to_sleep_before));
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_hrtimeout_range_clock(&timeout, 0, HRTIMER_MODE_ABS, CLOCK_BOOTTIME);
-               }
+               if (expected_results[i].msec_to_sleep_before)
+                       msleep(expected_results[i].msec_to_sleep_before);
 
                if (time_is_before_jiffies(loop_start_time +
                                           maximum_jiffies_at_index(i)))
@@ -132,7 +127,7 @@ bool __init wg_ratelimiter_selftest(void)
        if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
                return true;
 
-       BUILD_BUG_ON(NSEC_PER_SEC % PACKETS_PER_SECOND != 0);
+       BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
 
        if (wg_ratelimiter_init())
                goto out;
@@ -172,7 +167,7 @@ bool __init wg_ratelimiter_selftest(void)
        ++test;
 #endif
 
-       for (trials = TRIALS_BEFORE_GIVING_UP;;) {
+       for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) {
                int test_count = 0, ret;
 
                ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count);
index a647a40..b20409f 100644 (file)
@@ -140,6 +140,7 @@ config IWLMEI
        depends on INTEL_MEI
        depends on PM
        depends on CFG80211
+       depends on BROKEN
        help
          Enables the iwlmei kernel module.
 
index 5eb28f8..11536f1 100644 (file)
@@ -1833,8 +1833,8 @@ static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
        * If nss < MAX: we can set zeros in other streams
        */
        if (nss > MAX_HE_SUPP_NSS) {
-               IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
-                        MAX_HE_SUPP_NSS);
+               IWL_DEBUG_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
+                              MAX_HE_SUPP_NSS);
                nss = MAX_HE_SUPP_NSS;
        }
 
index 253cbc1..6de13d6 100644 (file)
@@ -267,7 +267,8 @@ static void mt76_init_stream_cap(struct mt76_phy *phy,
        }
        vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
        vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
-       vht_cap->vht_mcs.tx_highest |=
+       if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
+               vht_cap->vht_mcs.tx_highest |=
                                cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
 }
 
index ad6c7d6..d6aae60 100644 (file)
@@ -1088,7 +1088,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
        offset %= 32;
 
        val = mt76_rr(dev, addr);
-       val >>= (tid % 32);
+       val >>= offset;
 
        if (offset > 20) {
                addr += 4;
index bf4f5c0..bbe5099 100644 (file)
@@ -1712,8 +1712,6 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
        res->flags = IORESOURCE_MEM;
 
        for (i = 0; i < nd_region->ndr_mappings; i++) {
-               uuid_t uuid;
-
                nsl_get_uuid(ndd, nd_label, &uuid);
                if (has_uuid_at_pos(nd_region, &uuid, cookie, i))
                        continue;
index 7e88cd2..96e6e9a 100644 (file)
@@ -45,7 +45,7 @@ static struct nd_region *to_region(struct pmem_device *pmem)
        return to_nd_region(to_dev(pmem)->parent);
 }
 
-static phys_addr_t to_phys(struct pmem_device *pmem, phys_addr_t offset)
+static phys_addr_t pmem_to_phys(struct pmem_device *pmem, phys_addr_t offset)
 {
        return pmem->phys_addr + offset;
 }
@@ -63,7 +63,7 @@ static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector)
 static void pmem_mkpage_present(struct pmem_device *pmem, phys_addr_t offset,
                unsigned int len)
 {
-       phys_addr_t phys = to_phys(pmem, offset);
+       phys_addr_t phys = pmem_to_phys(pmem, offset);
        unsigned long pfn_start, pfn_end, pfn;
 
        /* only pmem in the linear map supports HWPoison */
@@ -97,7 +97,7 @@ static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks)
 static long __pmem_clear_poison(struct pmem_device *pmem,
                phys_addr_t offset, unsigned int len)
 {
-       phys_addr_t phys = to_phys(pmem, offset);
+       phys_addr_t phys = pmem_to_phys(pmem, offset);
        long cleared = nvdimm_clear_poison(to_dev(pmem), phys, len);
 
        if (cleared > 0) {
index 7bc9292..1c573e7 100644 (file)
@@ -314,7 +314,7 @@ static int unflatten_dt_nodes(const void *blob,
        for (offset = 0;
             offset >= 0 && depth >= initial_depth;
             offset = fdt_next_node(blob, offset, &depth)) {
-               if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
+               if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
                        continue;
 
                if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
index 77d1ba3..e87567d 100644 (file)
@@ -873,7 +873,7 @@ int dev_pm_opp_config_clks_simple(struct device *dev,
                }
        }
 
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple);
 
index f223afe..a663860 100644 (file)
@@ -1546,6 +1546,7 @@ static int __init ccio_probe(struct parisc_device *dev)
        }
        ccio_ioc_init(ioc);
        if (ccio_init_resources(ioc)) {
+               iounmap(ioc->ioc_regs);
                kfree(ioc);
                return -ENOMEM;
        }
index 3a8c986..bdef7a8 100644 (file)
@@ -221,16 +221,7 @@ static size_t irt_num_entry;
 
 static struct irt_entry *iosapic_alloc_irt(int num_entries)
 {
-       unsigned long a;
-
-       /* The IRT needs to be 8-byte aligned for the PDC call. 
-        * Normally kmalloc would guarantee larger alignment, but
-        * if CONFIG_DEBUG_SLAB is enabled, then we can get only
-        * 4-byte alignment on 32-bit kernels
-        */
-       a = (unsigned long)kmalloc(sizeof(struct irt_entry) * num_entries + 8, GFP_KERNEL);
-       a = (a + 7UL) & ~7UL;
-       return (struct irt_entry *)a;
+       return kcalloc(num_entries, sizeof(struct irt_entry), GFP_KERNEL);
 }
 
 /**
index 80d8309..b80a9b7 100644 (file)
@@ -36,7 +36,7 @@
 #define CMN_CI_CHILD_COUNT             GENMASK_ULL(15, 0)
 #define CMN_CI_CHILD_PTR_OFFSET                GENMASK_ULL(31, 16)
 
-#define CMN_CHILD_NODE_ADDR            GENMASK(27, 0)
+#define CMN_CHILD_NODE_ADDR            GENMASK(29, 0)
 #define CMN_CHILD_NODE_EXTERNAL                BIT(31)
 
 #define CMN_MAX_DIMENSION              12
index a4d7d9b..67712c7 100644 (file)
@@ -274,7 +274,6 @@ struct mvebu_a3700_comphy_lane {
        int submode;
        bool invert_tx;
        bool invert_rx;
-       bool needs_reset;
 };
 
 struct gbe_phy_init_data_fix {
@@ -1097,40 +1096,12 @@ mvebu_a3700_comphy_pcie_power_off(struct mvebu_a3700_comphy_lane *lane)
                            0x0, PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT);
 }
 
-static int mvebu_a3700_comphy_reset(struct phy *phy)
+static void mvebu_a3700_comphy_usb3_power_off(struct mvebu_a3700_comphy_lane *lane)
 {
-       struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
-       u16 mask, data;
-
-       dev_dbg(lane->dev, "resetting lane %d\n", lane->id);
-
-       /* COMPHY reset for internal logic */
-       comphy_lane_reg_set(lane, COMPHY_SFT_RESET,
-                           SFT_RST_NO_REG, SFT_RST_NO_REG);
-
-       /* COMPHY register reset (cleared automatically) */
-       comphy_lane_reg_set(lane, COMPHY_SFT_RESET, SFT_RST, SFT_RST);
-
-       /* PIPE soft and register reset */
-       data = PIPE_SOFT_RESET | PIPE_REG_RESET;
-       mask = data;
-       comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, data, mask);
-
-       /* Release PIPE register reset */
-       comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL,
-                           0x0, PIPE_REG_RESET);
-
-       /* Reset SB configuration register (only for lanes 0 and 1) */
-       if (lane->id == 0 || lane->id == 1) {
-               u32 mask, data;
-
-               data = PIN_RESET_CORE_BIT | PIN_RESET_COMPHY_BIT |
-                      PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT;
-               mask = data | PIN_PU_IVREF_BIT | PIN_TX_IDLE_BIT;
-               comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask);
-       }
-
-       return 0;
+       /*
+        * The USB3 MAC sets the USB3 PHY to low state, so we do not
+        * need to power off USB3 PHY again.
+        */
 }
 
 static bool mvebu_a3700_comphy_check_mode(int lane,
@@ -1171,10 +1142,6 @@ static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode,
            (lane->mode != mode || lane->submode != submode))
                return -EBUSY;
 
-       /* If changing mode, ensure reset is called */
-       if (lane->mode != PHY_MODE_INVALID && lane->mode != mode)
-               lane->needs_reset = true;
-
        /* Just remember the mode, ->power_on() will do the real setup */
        lane->mode = mode;
        lane->submode = submode;
@@ -1185,7 +1152,6 @@ static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode,
 static int mvebu_a3700_comphy_power_on(struct phy *phy)
 {
        struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
-       int ret;
 
        if (!mvebu_a3700_comphy_check_mode(lane->id, lane->mode,
                                           lane->submode)) {
@@ -1193,14 +1159,6 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy)
                return -EINVAL;
        }
 
-       if (lane->needs_reset) {
-               ret = mvebu_a3700_comphy_reset(phy);
-               if (ret)
-                       return ret;
-
-               lane->needs_reset = false;
-       }
-
        switch (lane->mode) {
        case PHY_MODE_USB_HOST_SS:
                dev_dbg(lane->dev, "set lane %d to USB3 host mode\n", lane->id);
@@ -1224,38 +1182,28 @@ static int mvebu_a3700_comphy_power_off(struct phy *phy)
 {
        struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
 
-       switch (lane->mode) {
-       case PHY_MODE_USB_HOST_SS:
-               /*
-                * The USB3 MAC sets the USB3 PHY to low state, so we do not
-                * need to power off USB3 PHY again.
-                */
-               break;
-
-       case PHY_MODE_SATA:
-               mvebu_a3700_comphy_sata_power_off(lane);
-               break;
-
-       case PHY_MODE_ETHERNET:
+       switch (lane->id) {
+       case 0:
+               mvebu_a3700_comphy_usb3_power_off(lane);
                mvebu_a3700_comphy_ethernet_power_off(lane);
-               break;
-
-       case PHY_MODE_PCIE:
+               return 0;
+       case 1:
                mvebu_a3700_comphy_pcie_power_off(lane);
-               break;
-
+               mvebu_a3700_comphy_ethernet_power_off(lane);
+               return 0;
+       case 2:
+               mvebu_a3700_comphy_usb3_power_off(lane);
+               mvebu_a3700_comphy_sata_power_off(lane);
+               return 0;
        default:
                dev_err(lane->dev, "invalid COMPHY mode\n");
                return -EINVAL;
        }
-
-       return 0;
 }
 
 static const struct phy_ops mvebu_a3700_comphy_ops = {
        .power_on       = mvebu_a3700_comphy_power_on,
        .power_off      = mvebu_a3700_comphy_power_off,
-       .reset          = mvebu_a3700_comphy_reset,
        .set_mode       = mvebu_a3700_comphy_set_mode,
        .owner          = THIS_MODULE,
 };
@@ -1393,8 +1341,7 @@ static int mvebu_a3700_comphy_probe(struct platform_device *pdev)
                 * To avoid relying on the bootloader/firmware configuration,
                 * power off all comphys.
                 */
-               mvebu_a3700_comphy_reset(phy);
-               lane->needs_reset = false;
+               mvebu_a3700_comphy_power_off(phy);
        }
 
        provider = devm_of_phy_provider_register(&pdev->dev,
index c5fd154..c7df8c5 100644 (file)
@@ -331,6 +331,7 @@ struct ocelot_pinctrl {
        const struct ocelot_pincfg_data *pincfg_data;
        struct ocelot_pmx_func func[FUNC_MAX];
        u8 stride;
+       struct workqueue_struct *wq;
 };
 
 struct ocelot_match_data {
@@ -338,6 +339,11 @@ struct ocelot_match_data {
        struct ocelot_pincfg_data pincfg_data;
 };
 
+struct ocelot_irq_work {
+       struct work_struct irq_work;
+       struct irq_desc *irq_desc;
+};
+
 #define LUTON_P(p, f0, f1)                                             \
 static struct ocelot_pin_caps luton_pin_##p = {                                \
        .pin = p,                                                       \
@@ -1813,6 +1819,75 @@ static void ocelot_irq_mask(struct irq_data *data)
        gpiochip_disable_irq(chip, gpio);
 }
 
+static void ocelot_irq_work(struct work_struct *work)
+{
+       struct ocelot_irq_work *w = container_of(work, struct ocelot_irq_work, irq_work);
+       struct irq_chip *parent_chip = irq_desc_get_chip(w->irq_desc);
+       struct gpio_chip *chip = irq_desc_get_chip_data(w->irq_desc);
+       struct irq_data *data = irq_desc_get_irq_data(w->irq_desc);
+       unsigned int gpio = irqd_to_hwirq(data);
+
+       local_irq_disable();
+       chained_irq_enter(parent_chip, w->irq_desc);
+       generic_handle_domain_irq(chip->irq.domain, gpio);
+       chained_irq_exit(parent_chip, w->irq_desc);
+       local_irq_enable();
+
+       kfree(w);
+}
+
+static void ocelot_irq_unmask_level(struct irq_data *data)
+{
+       struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+       struct ocelot_pinctrl *info = gpiochip_get_data(chip);
+       struct irq_desc *desc = irq_data_to_desc(data);
+       unsigned int gpio = irqd_to_hwirq(data);
+       unsigned int bit = BIT(gpio % 32);
+       bool ack = false, active = false;
+       u8 trigger_level;
+       int val;
+
+       trigger_level = irqd_get_trigger_type(data);
+
+       /* Check if the interrupt line is still active. */
+       regmap_read(info->map, REG(OCELOT_GPIO_IN, info, gpio), &val);
+       if ((!(val & bit) && trigger_level == IRQ_TYPE_LEVEL_LOW) ||
+             (val & bit && trigger_level == IRQ_TYPE_LEVEL_HIGH))
+               active = true;
+
+       /*
+        * Check if the interrupt controller has seen any changes in the
+        * interrupt line.
+        */
+       regmap_read(info->map, REG(OCELOT_GPIO_INTR, info, gpio), &val);
+       if (val & bit)
+               ack = true;
+
+       /* Enable the interrupt now */
+       gpiochip_enable_irq(chip, gpio);
+       regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio),
+                          bit, bit);
+
+       /*
+        * In case the interrupt line is still active and the interrupt
+        * controller has not seen any changes in the interrupt line, then it
+        * means that there happen another interrupt while the line was active.
+        * So we missed that one, so we need to kick the interrupt again
+        * handler.
+        */
+       if (active && !ack) {
+               struct ocelot_irq_work *work;
+
+               work = kmalloc(sizeof(*work), GFP_ATOMIC);
+               if (!work)
+                       return;
+
+               work->irq_desc = desc;
+               INIT_WORK(&work->irq_work, ocelot_irq_work);
+               queue_work(info->wq, &work->irq_work);
+       }
+}
+
 static void ocelot_irq_unmask(struct irq_data *data)
 {
        struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
@@ -1836,13 +1911,12 @@ static void ocelot_irq_ack(struct irq_data *data)
 
 static int ocelot_irq_set_type(struct irq_data *data, unsigned int type);
 
-static struct irq_chip ocelot_eoi_irqchip = {
+static struct irq_chip ocelot_level_irqchip = {
        .name           = "gpio",
        .irq_mask       = ocelot_irq_mask,
-       .irq_eoi        = ocelot_irq_ack,
-       .irq_unmask     = ocelot_irq_unmask,
-       .flags          = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
-                         IRQCHIP_IMMUTABLE,
+       .irq_ack        = ocelot_irq_ack,
+       .irq_unmask     = ocelot_irq_unmask_level,
+       .flags          = IRQCHIP_IMMUTABLE,
        .irq_set_type   = ocelot_irq_set_type,
        GPIOCHIP_IRQ_RESOURCE_HELPERS
 };
@@ -1859,14 +1933,9 @@ static struct irq_chip ocelot_irqchip = {
 
 static int ocelot_irq_set_type(struct irq_data *data, unsigned int type)
 {
-       type &= IRQ_TYPE_SENSE_MASK;
-
-       if (!(type & (IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_LEVEL_HIGH)))
-               return -EINVAL;
-
-       if (type & IRQ_TYPE_LEVEL_HIGH)
-               irq_set_chip_handler_name_locked(data, &ocelot_eoi_irqchip,
-                                                handle_fasteoi_irq, NULL);
+       if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+               irq_set_chip_handler_name_locked(data, &ocelot_level_irqchip,
+                                                handle_level_irq, NULL);
        if (type & IRQ_TYPE_EDGE_BOTH)
                irq_set_chip_handler_name_locked(data, &ocelot_irqchip,
                                                 handle_edge_irq, NULL);
@@ -1996,6 +2065,10 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
        if (!info->desc)
                return -ENOMEM;
 
+       info->wq = alloc_ordered_workqueue("ocelot_ordered", 0);
+       if (!info->wq)
+               return -ENOMEM;
+
        info->pincfg_data = &data->pincfg_data;
 
        reset = devm_reset_control_get_optional_shared(dev, "switch");
@@ -2018,7 +2091,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
                dev_err(dev, "Failed to create regmap\n");
                return PTR_ERR(info->map);
        }
-       dev_set_drvdata(dev, info->map);
+       dev_set_drvdata(dev, info);
        info->dev = dev;
 
        /* Pinconf registers */
@@ -2043,6 +2116,15 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
        return 0;
 }
 
+static int ocelot_pinctrl_remove(struct platform_device *pdev)
+{
+       struct ocelot_pinctrl *info = platform_get_drvdata(pdev);
+
+       destroy_workqueue(info->wq);
+
+       return 0;
+}
+
 static struct platform_driver ocelot_pinctrl_driver = {
        .driver = {
                .name = "pinctrl-ocelot",
@@ -2050,6 +2132,7 @@ static struct platform_driver ocelot_pinctrl_driver = {
                .suppress_bind_attrs = true,
        },
        .probe = ocelot_pinctrl_probe,
+       .remove = ocelot_pinctrl_remove,
 };
 module_platform_driver(ocelot_pinctrl_driver);
 MODULE_LICENSE("Dual MIT/GPL");
index 6bec7f1..704a99d 100644 (file)
@@ -530,10 +530,10 @@ DECLARE_MSM_GPIO_PINS(187);
 DECLARE_MSM_GPIO_PINS(188);
 DECLARE_MSM_GPIO_PINS(189);
 
-static const unsigned int sdc2_clk_pins[] = { 190 };
-static const unsigned int sdc2_cmd_pins[] = { 191 };
-static const unsigned int sdc2_data_pins[] = { 192 };
-static const unsigned int ufs_reset_pins[] = { 193 };
+static const unsigned int ufs_reset_pins[] = { 190 };
+static const unsigned int sdc2_clk_pins[] = { 191 };
+static const unsigned int sdc2_cmd_pins[] = { 192 };
+static const unsigned int sdc2_data_pins[] = { 193 };
 
 enum sc8180x_functions {
        msm_mux_adsp_ext,
@@ -1582,7 +1582,7 @@ static const int sc8180x_acpi_reserved_gpios[] = {
 static const struct msm_gpio_wakeirq_map sc8180x_pdc_map[] = {
        { 3, 31 }, { 5, 32 }, { 8, 33 }, { 9, 34 }, { 10, 100 }, { 12, 104 },
        { 24, 37 }, { 26, 38 }, { 27, 41 }, { 28, 42 }, { 30, 39 }, { 36, 43 },
-       { 37, 43 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 },
+       { 37, 44 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 },
        { 42, 48 }, { 46, 50 }, { 47, 49 }, { 48, 51 }, { 49, 53 }, { 50, 52 },
        { 51, 116 }, { 51, 123 }, { 53, 54 }, { 54, 55 }, { 55, 56 },
        { 56, 57 }, { 58, 58 }, { 60, 60 }, { 68, 62 }, { 70, 63 }, { 76, 86 },
index afc1f5d..b82ad13 100644 (file)
@@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(of, a100_r_pinctrl_match);
 static struct platform_driver a100_r_pinctrl_driver = {
        .probe  = a100_r_pinctrl_probe,
        .driver = {
-               .name           = "sun50iw10p1-r-pinctrl",
+               .name           = "sun50i-a100-r-pinctrl",
                .of_match_table = a100_r_pinctrl_match,
        },
 };
index 185a333..d240872 100644 (file)
@@ -329,6 +329,7 @@ static int imx8mp_reset_set(struct reset_controller_dev *rcdev,
                break;
 
        case IMX8MP_RESET_PCIE_CTRL_APPS_EN:
+       case IMX8MP_RESET_PCIEPHY_PERST:
                value = assert ? 0 : bit;
                break;
        }
index 00b612a..f3528dd 100644 (file)
@@ -33,11 +33,8 @@ static struct regmap_config sparx5_reset_regmap_config = {
        .reg_stride     = 4,
 };
 
-static int sparx5_switch_reset(struct reset_controller_dev *rcdev,
-                              unsigned long id)
+static int sparx5_switch_reset(struct mchp_reset_context *ctx)
 {
-       struct mchp_reset_context *ctx =
-               container_of(rcdev, struct mchp_reset_context, rcdev);
        u32 val;
 
        /* Make sure the core is PROTECTED from reset */
@@ -54,8 +51,14 @@ static int sparx5_switch_reset(struct reset_controller_dev *rcdev,
                                        1, 100);
 }
 
+static int sparx5_reset_noop(struct reset_controller_dev *rcdev,
+                            unsigned long id)
+{
+       return 0;
+}
+
 static const struct reset_control_ops sparx5_reset_ops = {
-       .reset = sparx5_switch_reset,
+       .reset = sparx5_reset_noop,
 };
 
 static int mchp_sparx5_map_syscon(struct platform_device *pdev, char *name,
@@ -122,6 +125,11 @@ static int mchp_sparx5_reset_probe(struct platform_device *pdev)
        ctx->rcdev.of_node = dn;
        ctx->props = device_get_match_data(&pdev->dev);
 
+       /* Issue the reset very early, our actual reset callback is a noop. */
+       err = sparx5_switch_reset(ctx);
+       if (err)
+               return err;
+
        return devm_reset_controller_register(&pdev->dev, &ctx->rcdev);
 }
 
@@ -163,6 +171,10 @@ static int __init mchp_sparx5_reset_init(void)
        return platform_driver_register(&mchp_sparx5_reset_driver);
 }
 
+/*
+ * Because this is a global reset, keep this postcore_initcall() to issue the
+ * reset as early as possible during the kernel startup.
+ */
 postcore_initcall(mchp_sparx5_reset_init);
 
 MODULE_DESCRIPTION("Microchip Sparx5 switch reset driver");
index 24c55ef..f233350 100644 (file)
@@ -291,7 +291,7 @@ static void npcm_usb_reset_npcm8xx(struct npcm_rc_data *rc)
        iprst2 |= ipsrst2_bits;
        iprst3 |= (ipsrst3_bits | NPCM_IPSRST3_USBPHY1 |
                   NPCM_IPSRST3_USBPHY2);
-       iprst2 |= ipsrst4_bits;
+       iprst4 |= ipsrst4_bits;
 
        writel(iprst1, rc->base + NPCM_IPSRST1);
        writel(iprst2, rc->base + NPCM_IPSRST2);
index dc78a52..b6b938a 100644 (file)
@@ -675,12 +675,12 @@ int dasd_alias_remove_device(struct dasd_device *device)
 struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
 {
        struct dasd_eckd_private *alias_priv, *private = base_device->private;
-       struct alias_pav_group *group = private->pavgroup;
        struct alias_lcu *lcu = private->lcu;
        struct dasd_device *alias_device;
+       struct alias_pav_group *group;
        unsigned long flags;
 
-       if (!group || !lcu)
+       if (!lcu)
                return NULL;
        if (lcu->pav == NO_PAV ||
            lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
@@ -697,6 +697,11 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
        }
 
        spin_lock_irqsave(&lcu->lock, flags);
+       group = private->pavgroup;
+       if (!group) {
+               spin_unlock_irqrestore(&lcu->lock, flags);
+               return NULL;
+       }
        alias_device = group->next;
        if (!alias_device) {
                if (list_empty(&group->aliaslist)) {
index 6c8c41f..ee82207 100644 (file)
@@ -984,6 +984,11 @@ static ssize_t assign_adapter_store(struct device *dev,
                goto done;
        }
 
+       if (test_bit_inv(apid, matrix_mdev->matrix.apm)) {
+               ret = count;
+               goto done;
+       }
+
        set_bit_inv(apid, matrix_mdev->matrix.apm);
 
        ret = vfio_ap_mdev_validate_masks(matrix_mdev);
@@ -1109,6 +1114,11 @@ static ssize_t unassign_adapter_store(struct device *dev,
                goto done;
        }
 
+       if (!test_bit_inv(apid, matrix_mdev->matrix.apm)) {
+               ret = count;
+               goto done;
+       }
+
        clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
        vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid);
        ret = count;
@@ -1183,6 +1193,11 @@ static ssize_t assign_domain_store(struct device *dev,
                goto done;
        }
 
+       if (test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
+               ret = count;
+               goto done;
+       }
+
        set_bit_inv(apqi, matrix_mdev->matrix.aqm);
 
        ret = vfio_ap_mdev_validate_masks(matrix_mdev);
@@ -1286,6 +1301,11 @@ static ssize_t unassign_domain_store(struct device *dev,
                goto done;
        }
 
+       if (!test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
+               ret = count;
+               goto done;
+       }
+
        clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
        vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi);
        ret = count;
@@ -1329,6 +1349,11 @@ static ssize_t assign_control_domain_store(struct device *dev,
                goto done;
        }
 
+       if (test_bit_inv(id, matrix_mdev->matrix.adm)) {
+               ret = count;
+               goto done;
+       }
+
        /* Set the bit in the ADM (bitmask) corresponding to the AP control
         * domain number (id). The bits in the mask, from most significant to
         * least significant, correspond to IDs 0 up to the one less than the
@@ -1378,6 +1403,11 @@ static ssize_t unassign_control_domain_store(struct device *dev,
                goto done;
        }
 
+       if (!test_bit_inv(domid, matrix_mdev->matrix.adm)) {
+               ret = count;
+               goto done;
+       }
+
        clear_bit_inv(domid, matrix_mdev->matrix.adm);
 
        if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) {
index 565339a..331e896 100644 (file)
@@ -2993,7 +2993,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
 
        if (ioc->is_mcpu_endpoint ||
            sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
-           dma_get_required_mask(&pdev->dev) <= 32)
+           dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32))
                ioc->dma_mask = 32;
        /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
        else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
index 3d6b137..bbc4d58 100644 (file)
@@ -3686,11 +3686,6 @@ err2:
 err1:
        scsi_host_put(lport->host);
 err0:
-       if (qedf) {
-               QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
-
-               clear_bit(QEDF_PROBING, &qedf->flags);
-       }
        return rc;
 }
 
index 62666df..4acff4e 100644 (file)
@@ -2151,8 +2151,10 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
 
        abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
                                le32_to_cpu(abts->exchange_addr_to_abort));
-       if (!abort_cmd)
+       if (!abort_cmd) {
+               mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
                return -EIO;
+       }
        mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun;
 
        if (abort_cmd->qpair) {
index 1467bbd..e1d7b45 100644 (file)
@@ -288,7 +288,6 @@ static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
        if (BRCM_ID(family_id) == 0x7260 && BRCM_REV(family_id) == 0)
                cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs;
 out:
-       of_node_put(np);
        return ret;
 }
 
index a8f3876..09754cd 100644 (file)
@@ -78,8 +78,8 @@ static struct sunxi_sram_desc sun4i_a10_sram_d = {
 
 static struct sunxi_sram_desc sun50i_a64_sram_c = {
        .data   = SUNXI_SRAM_DATA("C", 0x4, 24, 1,
-                                 SUNXI_SRAM_MAP(0, 1, "cpu"),
-                                 SUNXI_SRAM_MAP(1, 0, "de2")),
+                                 SUNXI_SRAM_MAP(1, 0, "cpu"),
+                                 SUNXI_SRAM_MAP(0, 1, "de2")),
 };
 
 static const struct of_device_id sunxi_sram_dt_ids[] = {
@@ -254,6 +254,7 @@ int sunxi_sram_claim(struct device *dev)
        writel(val | ((device << sram_data->offset) & mask),
               base + sram_data->reg);
 
+       sram_desc->claimed = true;
        spin_unlock(&sram_lock);
 
        return 0;
@@ -329,11 +330,11 @@ static struct regmap_config sunxi_sram_emac_clock_regmap = {
        .writeable_reg  = sunxi_sram_regmap_accessible_reg,
 };
 
-static int sunxi_sram_probe(struct platform_device *pdev)
+static int __init sunxi_sram_probe(struct platform_device *pdev)
 {
-       struct dentry *d;
        struct regmap *emac_clock;
        const struct sunxi_sramc_variant *variant;
+       struct device *dev = &pdev->dev;
 
        sram_dev = &pdev->dev;
 
@@ -345,13 +346,6 @@ static int sunxi_sram_probe(struct platform_device *pdev)
        if (IS_ERR(base))
                return PTR_ERR(base);
 
-       of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
-
-       d = debugfs_create_file("sram", S_IRUGO, NULL, NULL,
-                               &sunxi_sram_fops);
-       if (!d)
-               return -ENOMEM;
-
        if (variant->num_emac_clocks > 0) {
                emac_clock = devm_regmap_init_mmio(&pdev->dev, base,
                                                   &sunxi_sram_emac_clock_regmap);
@@ -360,6 +354,10 @@ static int sunxi_sram_probe(struct platform_device *pdev)
                        return PTR_ERR(emac_clock);
        }
 
+       of_platform_populate(dev->of_node, NULL, NULL, dev);
+
+       debugfs_create_file("sram", 0444, NULL, NULL, &sunxi_sram_fops);
+
        return 0;
 }
 
@@ -409,9 +407,8 @@ static struct platform_driver sunxi_sram_driver = {
                .name           = "sunxi-sram",
                .of_match_table = sunxi_sram_dt_match,
        },
-       .probe  = sunxi_sram_probe,
 };
-module_platform_driver(sunxi_sram_driver);
+builtin_platform_driver_probe(sunxi_sram_driver, sunxi_sram_probe);
 
 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
 MODULE_DESCRIPTION("Allwinner sunXi SRAM Controller Driver");
index ae38f0d..572b589 100644 (file)
@@ -2529,6 +2529,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
                tb->cm_ops = &icm_icl_ops;
                break;
 
+       case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI:
        case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI:
                icm->is_supported = icm_tgl_is_supported;
                icm->get_mode = icm_ar_get_mode;
index f09da5b..01190d9 100644 (file)
@@ -55,6 +55,7 @@ extern const struct tb_nhi_ops icl_nhi_ops;
  * need for the PCI quirk anymore as we will use ICM also on Apple
  * hardware.
  */
+#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI         0x1134
 #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI         0x1137
 #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI            0x157d
 #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE         0x157e
index 0dcecbb..f7fbef8 100644 (file)
@@ -1334,6 +1334,7 @@ static int omap8250_probe(struct platform_device *pdev)
        up.port.throttle = omap_8250_throttle;
        up.port.unthrottle = omap_8250_unthrottle;
        up.port.rs485_config = serial8250_em485_config;
+       up.port.rs485_supported = serial8250_em485_supported;
        up.rs485_start_tx = serial8250_em485_start_tx;
        up.rs485_stop_tx = serial8250_em485_stop_tx;
        up.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
index b20f6f2..fbc4b07 100644 (file)
@@ -2724,14 +2724,15 @@ static int lpuart_probe(struct platform_device *pdev)
                lpuart_reg.cons = LPUART_CONSOLE;
                handler = lpuart_int;
        }
-       ret = uart_add_one_port(&lpuart_reg, &sport->port);
-       if (ret)
-               goto failed_attach_port;
 
        ret = lpuart_global_reset(sport);
        if (ret)
                goto failed_reset;
 
+       ret = uart_add_one_port(&lpuart_reg, &sport->port);
+       if (ret)
+               goto failed_attach_port;
+
        ret = uart_get_rs485_mode(&sport->port);
        if (ret)
                goto failed_get_rs485;
@@ -2747,9 +2748,9 @@ static int lpuart_probe(struct platform_device *pdev)
 
 failed_irq_request:
 failed_get_rs485:
-failed_reset:
        uart_remove_one_port(&lpuart_reg, &sport->port);
 failed_attach_port:
+failed_reset:
        lpuart_disable_clks(sport);
        return ret;
 }
index ad4f356..a5748e4 100644 (file)
@@ -525,7 +525,7 @@ static void tegra_uart_tx_dma_complete(void *args)
        count = tup->tx_bytes_requested - state.residue;
        async_tx_ack(tup->tx_dma_desc);
        spin_lock_irqsave(&tup->uport.lock, flags);
-       xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+       uart_xmit_advance(&tup->uport, count);
        tup->tx_in_progress = 0;
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
                uart_write_wakeup(&tup->uport);
@@ -613,7 +613,6 @@ static unsigned int tegra_uart_tx_empty(struct uart_port *u)
 static void tegra_uart_stop_tx(struct uart_port *u)
 {
        struct tegra_uart_port *tup = to_tegra_uport(u);
-       struct circ_buf *xmit = &tup->uport.state->xmit;
        struct dma_tx_state state;
        unsigned int count;
 
@@ -624,7 +623,7 @@ static void tegra_uart_stop_tx(struct uart_port *u)
        dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
        count = tup->tx_bytes_requested - state.residue;
        async_tx_ack(tup->tx_dma_desc);
-       xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+       uart_xmit_advance(&tup->uport, count);
        tup->tx_in_progress = 0;
 }
 
index 5c3a075..4b1d4fe 100644 (file)
@@ -945,7 +945,7 @@ static int sifive_serial_probe(struct platform_device *pdev)
                return PTR_ERR(base);
        }
 
-       clk = devm_clk_get(&pdev->dev, NULL);
+       clk = devm_clk_get_enabled(&pdev->dev, NULL);
        if (IS_ERR(clk)) {
                dev_err(&pdev->dev, "unable to find controller clock\n");
                return PTR_ERR(clk);
index 4877c54..889b701 100644 (file)
@@ -101,7 +101,7 @@ static void tegra_tcu_uart_start_tx(struct uart_port *port)
                        break;
 
                tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count);
-               xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+               uart_xmit_advance(port, count);
        }
 
        uart_write_wakeup(port);
index d4b1e70..bbab424 100644 (file)
@@ -6039,7 +6039,7 @@ re_enumerate:
  *
  * Return: The same as for usb_reset_and_verify_device().
  * However, if a reset is already in progress (for instance, if a
- * driver doesn't have pre_ or post_reset() callbacks, and while
+ * driver doesn't have pre_reset() or post_reset() callbacks, and while
  * being unbound or re-bound during the ongoing reset its disconnect()
  * or probe() routine tries to perform a second, nested reset), the
  * routine returns -EINPROGRESS.
index 8c8e326..d0237b3 100644 (file)
@@ -1752,12 +1752,6 @@ static int dwc3_probe(struct platform_device *pdev)
 
        dwc3_get_properties(dwc);
 
-       if (!dwc->sysdev_is_parent) {
-               ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
-               if (ret)
-                       return ret;
-       }
-
        dwc->reset = devm_reset_control_array_get_optional_shared(dev);
        if (IS_ERR(dwc->reset))
                return PTR_ERR(dwc->reset);
@@ -1823,6 +1817,13 @@ static int dwc3_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, dwc);
        dwc3_cache_hwparams(dwc);
 
+       if (!dwc->sysdev_is_parent &&
+           DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) {
+               ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
+               if (ret)
+                       goto disable_clks;
+       }
+
        spin_lock_init(&dwc->lock);
        mutex_init(&dwc->mutex);
 
index a5e8374..697683e 100644 (file)
@@ -256,6 +256,7 @@ static void option_instat_callback(struct urb *urb);
 #define QUECTEL_PRODUCT_EM060K                 0x030b
 #define QUECTEL_PRODUCT_EM12                   0x0512
 #define QUECTEL_PRODUCT_RM500Q                 0x0800
+#define QUECTEL_PRODUCT_RM520N                 0x0801
 #define QUECTEL_PRODUCT_EC200S_CN              0x6002
 #define QUECTEL_PRODUCT_EC200T                 0x6026
 #define QUECTEL_PRODUCT_RM500K                 0x7001
@@ -1138,6 +1139,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
          .driver_info = NUMEP2 },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
+       { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */
+         .driver_info = ZLP },
        { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
          .driver_info = RSVD(4) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
@@ -1159,6 +1162,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
          .driver_info = ZLP },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
index 5defdfe..831e704 100644 (file)
@@ -56,6 +56,7 @@ config TYPEC_ANX7411
        tristate "Analogix ANX7411 Type-C DRP Port controller driver"
        depends on I2C
        depends on USB_ROLE_SWITCH
+       depends on POWER_SUPPLY
        help
          Say Y or M here if your system has Analogix ANX7411 Type-C DRP Port
          controller driver.
index 886c564..b58b445 100644 (file)
 #define SYNTHVID_DEPTH_WIN8 32
 #define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
 
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
-
-
 enum pipe_msg_type {
        PIPE_MSG_INVALID,
        PIPE_MSG_DATA,
index d5f3f76..d4b2519 100644 (file)
@@ -382,9 +382,10 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
        unsigned long ring_size = nr_pages * XEN_PAGE_SIZE;
        grant_ref_t gref_head;
        unsigned int i;
+       void *addr;
        int ret;
 
-       *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
+       addr = *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
        if (!*vaddr) {
                ret = -ENOMEM;
                goto err;
@@ -401,13 +402,15 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
                unsigned long gfn;
 
                if (is_vmalloc_addr(*vaddr))
-                       gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr[i]));
+                       gfn = pfn_to_gfn(vmalloc_to_pfn(addr));
                else
-                       gfn = virt_to_gfn(vaddr[i]);
+                       gfn = virt_to_gfn(addr);
 
                grefs[i] = gnttab_claim_grant_reference(&gref_head);
                gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
                                                gfn, 0);
+
+               addr += XEN_PAGE_SIZE;
        }
 
        return 0;
index 1af28b0..2633137 100644 (file)
@@ -4475,6 +4475,17 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
        set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
 
        /*
+        * If we had UNFINISHED_DROPS we could still be processing them, so
+        * clear that bit and wake up relocation so it can stop.
+        * We must do this before stopping the block group reclaim task, because
+        * at btrfs_relocate_block_group() we wait for this bit, and after the
+        * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
+        * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
+        * return 1.
+        */
+       btrfs_wake_unfinished_drop(fs_info);
+
+       /*
         * We may have the reclaim task running and relocating a data block group,
         * in which case it may create delayed iputs. So stop it before we park
         * the cleaner kthread otherwise we can get new delayed iputs after
@@ -4492,12 +4503,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
         */
        kthread_park(fs_info->cleaner_kthread);
 
-       /*
-        * If we had UNFINISHED_DROPS we could still be processing them, so
-        * clear that bit and wake up relocation so it can stop.
-        */
-       btrfs_wake_unfinished_drop(fs_info);
-
        /* wait for the qgroup rescan worker to stop */
        btrfs_qgroup_wait_for_completion(fs_info, false);
 
@@ -4520,6 +4525,31 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
        /* clear out the rbtree of defraggable inodes */
        btrfs_cleanup_defrag_inodes(fs_info);
 
+       /*
+        * After we parked the cleaner kthread, ordered extents may have
+        * completed and created new delayed iputs. If one of the async reclaim
+        * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
+        * can hang forever trying to stop it, because if a delayed iput is
+        * added after it ran btrfs_run_delayed_iputs() and before it called
+        * btrfs_wait_on_delayed_iputs(), it will hang forever since there is
+        * no one else to run iputs.
+        *
+        * So wait for all ongoing ordered extents to complete and then run
+        * delayed iputs. This works because once we reach this point no one
+        * can either create new ordered extents nor create delayed iputs
+        * through some other means.
+        *
+        * Also note that btrfs_wait_ordered_roots() is not safe here, because
+        * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
+        * but the delayed iput for the respective inode is made only when doing
+        * the final btrfs_put_ordered_extent() (which must happen at
+        * btrfs_finish_ordered_io() when we are unmounting).
+        */
+       btrfs_flush_workqueue(fs_info->endio_write_workers);
+       /* Ordered extents for free space inodes. */
+       btrfs_flush_workqueue(fs_info->endio_freespace_worker);
+       btrfs_run_delayed_iputs(fs_info);
+
        cancel_work_sync(&fs_info->async_reclaim_work);
        cancel_work_sync(&fs_info->async_data_reclaim_work);
        cancel_work_sync(&fs_info->preempt_reclaim_work);
index 62e7007..73c6929 100644 (file)
@@ -1918,10 +1918,44 @@ out_unlock:
        return ret;
 }
 
+static void wait_eb_writebacks(struct btrfs_block_group *block_group)
+{
+       struct btrfs_fs_info *fs_info = block_group->fs_info;
+       const u64 end = block_group->start + block_group->length;
+       struct radix_tree_iter iter;
+       struct extent_buffer *eb;
+       void __rcu **slot;
+
+       rcu_read_lock();
+       radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter,
+                                block_group->start >> fs_info->sectorsize_bits) {
+               eb = radix_tree_deref_slot(slot);
+               if (!eb)
+                       continue;
+               if (radix_tree_deref_retry(eb)) {
+                       slot = radix_tree_iter_retry(&iter);
+                       continue;
+               }
+
+               if (eb->start < block_group->start)
+                       continue;
+               if (eb->start >= end)
+                       break;
+
+               slot = radix_tree_iter_resume(slot, &iter);
+               rcu_read_unlock();
+               wait_on_extent_buffer_writeback(eb);
+               rcu_read_lock();
+       }
+       rcu_read_unlock();
+}
+
 static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written)
 {
        struct btrfs_fs_info *fs_info = block_group->fs_info;
        struct map_lookup *map;
+       const bool is_metadata = (block_group->flags &
+                       (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
        int ret = 0;
        int i;
 
@@ -1932,8 +1966,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
        }
 
        /* Check if we have unwritten allocated space */
-       if ((block_group->flags &
-            (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)) &&
+       if (is_metadata &&
            block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
                spin_unlock(&block_group->lock);
                return -EAGAIN;
@@ -1958,6 +1991,9 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
                /* No need to wait for NOCOW writers. Zoned mode does not allow that */
                btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
                                         block_group->length);
+               /* Wait for extent buffers to be written. */
+               if (is_metadata)
+                       wait_eb_writebacks(block_group);
 
                spin_lock(&block_group->lock);
 
index 81f4c15..5b4a7a3 100644 (file)
@@ -153,6 +153,6 @@ extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
 /* when changing internal version - update following two lines at same time */
-#define SMB3_PRODUCT_BUILD 38
-#define CIFS_VERSION   "2.38"
+#define SMB3_PRODUCT_BUILD 39
+#define CIFS_VERSION   "2.39"
 #endif                         /* _CIFSFS_H */
index a0a06b6..7ae6f2c 100644 (file)
@@ -702,9 +702,6 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
        int length = 0;
        int total_read;
 
-       smb_msg->msg_control = NULL;
-       smb_msg->msg_controllen = 0;
-
        for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
                try_to_freeze();
 
@@ -760,7 +757,7 @@ int
 cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
                      unsigned int to_read)
 {
-       struct msghdr smb_msg;
+       struct msghdr smb_msg = {};
        struct kvec iov = {.iov_base = buf, .iov_len = to_read};
        iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read);
 
@@ -770,15 +767,13 @@ cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
 ssize_t
 cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read)
 {
-       struct msghdr smb_msg;
+       struct msghdr smb_msg = {};
 
        /*
         *  iov_iter_discard already sets smb_msg.type and count and iov_offset
         *  and cifs_readv_from_socket sets msg_control and msg_controllen
         *  so little to initialize in struct msghdr
         */
-       smb_msg.msg_name = NULL;
-       smb_msg.msg_namelen = 0;
        iov_iter_discard(&smb_msg.msg_iter, READ, to_read);
 
        return cifs_readv_from_socket(server, &smb_msg);
@@ -788,7 +783,7 @@ int
 cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
        unsigned int page_offset, unsigned int to_read)
 {
-       struct msghdr smb_msg;
+       struct msghdr smb_msg = {};
        struct bio_vec bv = {
                .bv_page = page, .bv_len = to_read, .bv_offset = page_offset};
        iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read);
@@ -2350,7 +2345,9 @@ cifs_put_tcon(struct cifs_tcon *tcon)
        ses = tcon->ses;
        cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
        spin_lock(&cifs_tcp_ses_lock);
+       spin_lock(&tcon->tc_lock);
        if (--tcon->tc_count > 0) {
+               spin_unlock(&tcon->tc_lock);
                spin_unlock(&cifs_tcp_ses_lock);
                return;
        }
@@ -2359,6 +2356,7 @@ cifs_put_tcon(struct cifs_tcon *tcon)
        WARN_ON(tcon->tc_count < 0);
 
        list_del_init(&tcon->tcon_list);
+       spin_unlock(&tcon->tc_lock);
        spin_unlock(&cifs_tcp_ses_lock);
 
        /* cancel polling of interfaces */
index fa738ad..6f38b13 100644 (file)
@@ -3575,6 +3575,9 @@ static ssize_t __cifs_writev(
 
 ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
 {
+       struct file *file = iocb->ki_filp;
+
+       cifs_revalidate_mapping(file->f_inode);
        return __cifs_writev(iocb, from, true);
 }
 
index c2fe035..9a2753e 100644 (file)
@@ -194,10 +194,6 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
 
        *sent = 0;
 
-       smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
-       smb_msg->msg_namelen = sizeof(struct sockaddr);
-       smb_msg->msg_control = NULL;
-       smb_msg->msg_controllen = 0;
        if (server->noblocksnd)
                smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
        else
@@ -309,7 +305,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
        sigset_t mask, oldmask;
        size_t total_len = 0, sent, size;
        struct socket *ssocket = server->ssocket;
-       struct msghdr smb_msg;
+       struct msghdr smb_msg = {};
        __be32 rfc1002_marker;
 
        if (cifs_rdma_enabled(server)) {
index c440dce..1c68678 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1445,6 +1445,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
        loff_t done = 0;
        int ret;
 
+       if (!iomi.len)
+               return 0;
+
        if (iov_iter_rw(iter) == WRITE) {
                lockdep_assert_held_write(&iomi.inode->i_rwsem);
                iomi.flags |= IOMAP_WRITE;
index 9a5ca7b..d046dbb 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -65,7 +65,6 @@
 #include <linux/io_uring.h>
 #include <linux/syscall_user_dispatch.h>
 #include <linux/coredump.h>
-#include <linux/time_namespace.h>
 
 #include <linux/uaccess.h>
 #include <asm/mmu_context.h>
@@ -979,12 +978,10 @@ static int exec_mmap(struct mm_struct *mm)
 {
        struct task_struct *tsk;
        struct mm_struct *old_mm, *active_mm;
-       bool vfork;
        int ret;
 
        /* Notify parent that we're no longer interested in the old VM */
        tsk = current;
-       vfork = !!tsk->vfork_done;
        old_mm = current->mm;
        exec_mm_release(tsk, old_mm);
        if (old_mm)
@@ -1029,10 +1026,6 @@ static int exec_mmap(struct mm_struct *mm)
        tsk->mm->vmacache_seqnum = 0;
        vmacache_flush(tsk);
        task_unlock(tsk);
-
-       if (vfork)
-               timens_on_fork(tsk->nsproxy, tsk);
-
        if (old_mm) {
                mmap_read_unlock(old_mm);
                BUG_ON(active_mm != old_mm);
index ee0b7cf..41ae4cc 100644 (file)
@@ -270,8 +270,7 @@ int exfat_zeroed_cluster(struct inode *dir, unsigned int clu)
        struct super_block *sb = dir->i_sb;
        struct exfat_sb_info *sbi = EXFAT_SB(sb);
        struct buffer_head *bh;
-       sector_t blknr, last_blknr;
-       int i;
+       sector_t blknr, last_blknr, i;
 
        blknr = exfat_cluster_to_sector(sbi, clu);
        last_blknr = blknr + sbi->sect_per_clus;
index 9bca556..3bf9a69 100644 (file)
@@ -167,8 +167,6 @@ enum SHIFT_DIRECTION {
 #define EXT4_MB_CR0_OPTIMIZED          0x8000
 /* Avg fragment size rb tree lookup succeeded at least once for cr = 1 */
 #define EXT4_MB_CR1_OPTIMIZED          0x00010000
-/* Perform linear traversal for one group */
-#define EXT4_MB_SEARCH_NEXT_LINEAR     0x00020000
 struct ext4_allocation_request {
        /* target inode for block we're allocating */
        struct inode *inode;
@@ -1600,8 +1598,8 @@ struct ext4_sb_info {
        struct list_head s_discard_list;
        struct work_struct s_discard_work;
        atomic_t s_retry_alloc_pending;
-       struct rb_root s_mb_avg_fragment_size_root;
-       rwlock_t s_mb_rb_lock;
+       struct list_head *s_mb_avg_fragment_size;
+       rwlock_t *s_mb_avg_fragment_size_locks;
        struct list_head *s_mb_largest_free_orders;
        rwlock_t *s_mb_largest_free_orders_locks;
 
@@ -3413,6 +3411,8 @@ struct ext4_group_info {
        ext4_grpblk_t   bb_first_free;  /* first free block */
        ext4_grpblk_t   bb_free;        /* total free blocks */
        ext4_grpblk_t   bb_fragments;   /* nr of freespace fragments */
+       int             bb_avg_fragment_size_order;     /* order of average
+                                                          fragment in BG */
        ext4_grpblk_t   bb_largest_free_order;/* order of largest frag in BG */
        ext4_group_t    bb_group;       /* Group number */
        struct          list_head bb_prealloc_list;
@@ -3420,7 +3420,7 @@ struct ext4_group_info {
        void            *bb_bitmap;
 #endif
        struct rw_semaphore alloc_sem;
-       struct rb_node  bb_avg_fragment_size_rb;
+       struct list_head bb_avg_fragment_size_node;
        struct list_head bb_largest_free_order_node;
        ext4_grpblk_t   bb_counters[];  /* Nr of free power-of-two-block
                                         * regions, index is order.
index c148bb9..5235974 100644 (file)
@@ -460,6 +460,10 @@ static int __ext4_ext_check(const char *function, unsigned int line,
                error_msg = "invalid eh_entries";
                goto corrupted;
        }
+       if (unlikely((eh->eh_entries == 0) && (depth > 0))) {
+               error_msg = "eh_entries is 0 but eh_depth is > 0";
+               goto corrupted;
+       }
        if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
                error_msg = "invalid extent entries";
                goto corrupted;
index f73e5eb..208b87c 100644 (file)
@@ -510,7 +510,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
                goto fallback;
        }
 
-       max_dirs = ndirs / ngroups + inodes_per_group / 16;
+       max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16;
        min_inodes = avefreei - inodes_per_group*flex_size / 4;
        if (min_inodes < 1)
                min_inodes = 1;
index bd8f8b5..9dad930 100644 (file)
  *    number of buddy bitmap orders possible) number of lists. Group-infos are
  *    placed in appropriate lists.
  *
- * 2) Average fragment size rb tree (sbi->s_mb_avg_fragment_size_root)
+ * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
  *
- *    Locking: sbi->s_mb_rb_lock (rwlock)
+ *    Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
  *
- *    This is a red black tree consisting of group infos and the tree is sorted
- *    by average fragment sizes (which is calculated as ext4_group_info->bb_free
- *    / ext4_group_info->bb_fragments).
+ *    This is an array of lists where in the i-th list there are groups with
+ *    average fragment size >= 2^i and < 2^(i+1). The average fragment size
+ *    is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
+ *    Note that we don't bother with a special list for completely empty groups
+ *    so we only have MB_NUM_ORDERS(sb) lists.
  *
  * When "mb_optimize_scan" mount option is set, mballoc consults the above data
  * structures to decide the order in which groups are to be traversed for
  *
  * At CR = 1, we only consider groups where average fragment size > request
  * size. So, we lookup a group which has average fragment size just above or
- * equal to request size using our rb tree (data structure 2) in O(log N) time.
+ * equal to request size using our average fragment size group lists (data
+ * structure 2) in O(1) time.
  *
  * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
  * linear order which requires O(N) search time for each CR 0 and CR 1 phase.
@@ -802,65 +805,51 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
        }
 }
 
-static void ext4_mb_rb_insert(struct rb_root *root, struct rb_node *new,
-                       int (*cmp)(struct rb_node *, struct rb_node *))
+static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
 {
-       struct rb_node **iter = &root->rb_node, *parent = NULL;
+       int order;
 
-       while (*iter) {
-               parent = *iter;
-               if (cmp(new, *iter) > 0)
-                       iter = &((*iter)->rb_left);
-               else
-                       iter = &((*iter)->rb_right);
-       }
-
-       rb_link_node(new, parent, iter);
-       rb_insert_color(new, root);
-}
-
-static int
-ext4_mb_avg_fragment_size_cmp(struct rb_node *rb1, struct rb_node *rb2)
-{
-       struct ext4_group_info *grp1 = rb_entry(rb1,
-                                               struct ext4_group_info,
-                                               bb_avg_fragment_size_rb);
-       struct ext4_group_info *grp2 = rb_entry(rb2,
-                                               struct ext4_group_info,
-                                               bb_avg_fragment_size_rb);
-       int num_frags_1, num_frags_2;
-
-       num_frags_1 = grp1->bb_fragments ?
-               grp1->bb_free / grp1->bb_fragments : 0;
-       num_frags_2 = grp2->bb_fragments ?
-               grp2->bb_free / grp2->bb_fragments : 0;
-
-       return (num_frags_2 - num_frags_1);
+       /*
+        * We don't bother with a special lists groups with only 1 block free
+        * extents and for completely empty groups.
+        */
+       order = fls(len) - 2;
+       if (order < 0)
+               return 0;
+       if (order == MB_NUM_ORDERS(sb))
+               order--;
+       return order;
 }
 
-/*
- * Reinsert grpinfo into the avg_fragment_size tree with new average
- * fragment size.
- */
+/* Move group to appropriate avg_fragment_size list */
 static void
 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
+       int new_order;
 
        if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
                return;
 
-       write_lock(&sbi->s_mb_rb_lock);
-       if (!RB_EMPTY_NODE(&grp->bb_avg_fragment_size_rb)) {
-               rb_erase(&grp->bb_avg_fragment_size_rb,
-                               &sbi->s_mb_avg_fragment_size_root);
-               RB_CLEAR_NODE(&grp->bb_avg_fragment_size_rb);
-       }
+       new_order = mb_avg_fragment_size_order(sb,
+                                       grp->bb_free / grp->bb_fragments);
+       if (new_order == grp->bb_avg_fragment_size_order)
+               return;
 
-       ext4_mb_rb_insert(&sbi->s_mb_avg_fragment_size_root,
-               &grp->bb_avg_fragment_size_rb,
-               ext4_mb_avg_fragment_size_cmp);
-       write_unlock(&sbi->s_mb_rb_lock);
+       if (grp->bb_avg_fragment_size_order != -1) {
+               write_lock(&sbi->s_mb_avg_fragment_size_locks[
+                                       grp->bb_avg_fragment_size_order]);
+               list_del(&grp->bb_avg_fragment_size_node);
+               write_unlock(&sbi->s_mb_avg_fragment_size_locks[
+                                       grp->bb_avg_fragment_size_order]);
+       }
+       grp->bb_avg_fragment_size_order = new_order;
+       write_lock(&sbi->s_mb_avg_fragment_size_locks[
+                                       grp->bb_avg_fragment_size_order]);
+       list_add_tail(&grp->bb_avg_fragment_size_node,
+               &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
+       write_unlock(&sbi->s_mb_avg_fragment_size_locks[
+                                       grp->bb_avg_fragment_size_order]);
 }
 
 /*
@@ -909,86 +898,55 @@ static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac,
                *new_cr = 1;
        } else {
                *group = grp->bb_group;
-               ac->ac_last_optimal_group = *group;
                ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED;
        }
 }
 
 /*
- * Choose next group by traversing average fragment size tree. Updates *new_cr
- * if cr lvel needs an update. Sets EXT4_MB_SEARCH_NEXT_LINEAR to indicate that
- * the linear search should continue for one iteration since there's lock
- * contention on the rb tree lock.
+ * Choose next group by traversing average fragment size list of suitable
+ * order. Updates *new_cr if cr level needs an update.
  */
 static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,
                int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
 {
        struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
-       int avg_fragment_size, best_so_far;
-       struct rb_node *node, *found;
-       struct ext4_group_info *grp;
-
-       /*
-        * If there is contention on the lock, instead of waiting for the lock
-        * to become available, just continue searching lineraly. We'll resume
-        * our rb tree search later starting at ac->ac_last_optimal_group.
-        */
-       if (!read_trylock(&sbi->s_mb_rb_lock)) {
-               ac->ac_flags |= EXT4_MB_SEARCH_NEXT_LINEAR;
-               return;
-       }
+       struct ext4_group_info *grp = NULL, *iter;
+       int i;
 
        if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) {
                if (sbi->s_mb_stats)
                        atomic_inc(&sbi->s_bal_cr1_bad_suggestions);
-               /* We have found something at CR 1 in the past */
-               grp = ext4_get_group_info(ac->ac_sb, ac->ac_last_optimal_group);
-               for (found = rb_next(&grp->bb_avg_fragment_size_rb); found != NULL;
-                    found = rb_next(found)) {
-                       grp = rb_entry(found, struct ext4_group_info,
-                                      bb_avg_fragment_size_rb);
+       }
+
+       for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
+            i < MB_NUM_ORDERS(ac->ac_sb); i++) {
+               if (list_empty(&sbi->s_mb_avg_fragment_size[i]))
+                       continue;
+               read_lock(&sbi->s_mb_avg_fragment_size_locks[i]);
+               if (list_empty(&sbi->s_mb_avg_fragment_size[i])) {
+                       read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
+                       continue;
+               }
+               list_for_each_entry(iter, &sbi->s_mb_avg_fragment_size[i],
+                                   bb_avg_fragment_size_node) {
                        if (sbi->s_mb_stats)
                                atomic64_inc(&sbi->s_bal_cX_groups_considered[1]);
-                       if (likely(ext4_mb_good_group(ac, grp->bb_group, 1)))
+                       if (likely(ext4_mb_good_group(ac, iter->bb_group, 1))) {
+                               grp = iter;
                                break;
-               }
-               goto done;
-       }
-
-       node = sbi->s_mb_avg_fragment_size_root.rb_node;
-       best_so_far = 0;
-       found = NULL;
-
-       while (node) {
-               grp = rb_entry(node, struct ext4_group_info,
-                              bb_avg_fragment_size_rb);
-               avg_fragment_size = 0;
-               if (ext4_mb_good_group(ac, grp->bb_group, 1)) {
-                       avg_fragment_size = grp->bb_fragments ?
-                               grp->bb_free / grp->bb_fragments : 0;
-                       if (!best_so_far || avg_fragment_size < best_so_far) {
-                               best_so_far = avg_fragment_size;
-                               found = node;
                        }
                }
-               if (avg_fragment_size > ac->ac_g_ex.fe_len)
-                       node = node->rb_right;
-               else
-                       node = node->rb_left;
+               read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
+               if (grp)
+                       break;
        }
 
-done:
-       if (found) {
-               grp = rb_entry(found, struct ext4_group_info,
-                              bb_avg_fragment_size_rb);
+       if (grp) {
                *group = grp->bb_group;
                ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED;
        } else {
                *new_cr = 2;
        }
-
-       read_unlock(&sbi->s_mb_rb_lock);
-       ac->ac_last_optimal_group = *group;
 }
 
 static inline int should_optimize_scan(struct ext4_allocation_context *ac)
@@ -1017,11 +975,6 @@ next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups)
                goto inc_and_return;
        }
 
-       if (ac->ac_flags & EXT4_MB_SEARCH_NEXT_LINEAR) {
-               ac->ac_flags &= ~EXT4_MB_SEARCH_NEXT_LINEAR;
-               goto inc_and_return;
-       }
-
        return group;
 inc_and_return:
        /*
@@ -1049,8 +1002,10 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
 {
        *new_cr = ac->ac_criteria;
 
-       if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining)
+       if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
+               *group = next_linear_group(ac, *group, ngroups);
                return;
+       }
 
        if (*new_cr == 0) {
                ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups);
@@ -1075,23 +1030,25 @@ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        int i;
 
-       if (test_opt2(sb, MB_OPTIMIZE_SCAN) && grp->bb_largest_free_order >= 0) {
+       for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
+               if (grp->bb_counters[i] > 0)
+                       break;
+       /* No need to move between order lists? */
+       if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
+           i == grp->bb_largest_free_order) {
+               grp->bb_largest_free_order = i;
+               return;
+       }
+
+       if (grp->bb_largest_free_order >= 0) {
                write_lock(&sbi->s_mb_largest_free_orders_locks[
                                              grp->bb_largest_free_order]);
                list_del_init(&grp->bb_largest_free_order_node);
                write_unlock(&sbi->s_mb_largest_free_orders_locks[
                                              grp->bb_largest_free_order]);
        }
-       grp->bb_largest_free_order = -1; /* uninit */
-
-       for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) {
-               if (grp->bb_counters[i] > 0) {
-                       grp->bb_largest_free_order = i;
-                       break;
-               }
-       }
-       if (test_opt2(sb, MB_OPTIMIZE_SCAN) &&
-           grp->bb_largest_free_order >= 0 && grp->bb_free) {
+       grp->bb_largest_free_order = i;
+       if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
                write_lock(&sbi->s_mb_largest_free_orders_locks[
                                              grp->bb_largest_free_order]);
                list_add_tail(&grp->bb_largest_free_order_node,
@@ -1148,13 +1105,13 @@ void ext4_mb_generate_buddy(struct super_block *sb,
                                        EXT4_GROUP_INFO_BBITMAP_CORRUPT);
        }
        mb_set_largest_free_order(sb, grp);
+       mb_update_avg_fragment_size(sb, grp);
 
        clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
 
        period = get_cycles() - period;
        atomic_inc(&sbi->s_mb_buddies_generated);
        atomic64_add(period, &sbi->s_mb_generation_time);
-       mb_update_avg_fragment_size(sb, grp);
 }
 
 /* The buddy information is attached the buddy cache inode
@@ -2636,7 +2593,7 @@ static noinline_for_stack int
 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
 {
        ext4_group_t prefetch_grp = 0, ngroups, group, i;
-       int cr = -1;
+       int cr = -1, new_cr;
        int err = 0, first_err = 0;
        unsigned int nr = 0, prefetch_ios = 0;
        struct ext4_sb_info *sbi;
@@ -2707,17 +2664,14 @@ repeat:
                 * from the goal value specified
                 */
                group = ac->ac_g_ex.fe_group;
-               ac->ac_last_optimal_group = group;
                ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
                prefetch_grp = group;
 
-               for (i = 0; i < ngroups; group = next_linear_group(ac, group, ngroups),
-                            i++) {
-                       int ret = 0, new_cr;
+               for (i = 0, new_cr = cr; i < ngroups; i++,
+                    ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
+                       int ret = 0;
 
                        cond_resched();
-
-                       ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups);
                        if (new_cr != cr) {
                                cr = new_cr;
                                goto repeat;
@@ -2991,9 +2945,7 @@ __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
        struct super_block *sb = pde_data(file_inode(seq->file));
        unsigned long position;
 
-       read_lock(&EXT4_SB(sb)->s_mb_rb_lock);
-
-       if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1)
+       if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
                return NULL;
        position = *pos + 1;
        return (void *) ((unsigned long) position);
@@ -3005,7 +2957,7 @@ static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, lof
        unsigned long position;
 
        ++*pos;
-       if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1)
+       if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
                return NULL;
        position = *pos + 1;
        return (void *) ((unsigned long) position);
@@ -3017,29 +2969,22 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        unsigned long position = ((unsigned long) v);
        struct ext4_group_info *grp;
-       struct rb_node *n;
-       unsigned int count, min, max;
+       unsigned int count;
 
        position--;
        if (position >= MB_NUM_ORDERS(sb)) {
-               seq_puts(seq, "fragment_size_tree:\n");
-               n = rb_first(&sbi->s_mb_avg_fragment_size_root);
-               if (!n) {
-                       seq_puts(seq, "\ttree_min: 0\n\ttree_max: 0\n\ttree_nodes: 0\n");
-                       return 0;
-               }
-               grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb);
-               min = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0;
-               count = 1;
-               while (rb_next(n)) {
-                       count++;
-                       n = rb_next(n);
-               }
-               grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb);
-               max = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0;
+               position -= MB_NUM_ORDERS(sb);
+               if (position == 0)
+                       seq_puts(seq, "avg_fragment_size_lists:\n");
 
-               seq_printf(seq, "\ttree_min: %u\n\ttree_max: %u\n\ttree_nodes: %u\n",
-                          min, max, count);
+               count = 0;
+               read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
+               list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
+                                   bb_avg_fragment_size_node)
+                       count++;
+               read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
+               seq_printf(seq, "\tlist_order_%u_groups: %u\n",
+                                       (unsigned int)position, count);
                return 0;
        }
 
@@ -3049,9 +2994,11 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
                seq_puts(seq, "max_free_order_lists:\n");
        }
        count = 0;
+       read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
        list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
                            bb_largest_free_order_node)
                count++;
+       read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
        seq_printf(seq, "\tlist_order_%u_groups: %u\n",
                   (unsigned int)position, count);
 
@@ -3059,11 +3006,7 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
 }
 
 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
-__releases(&EXT4_SB(sb)->s_mb_rb_lock)
 {
-       struct super_block *sb = pde_data(file_inode(seq->file));
-
-       read_unlock(&EXT4_SB(sb)->s_mb_rb_lock);
 }
 
 const struct seq_operations ext4_mb_seq_structs_summary_ops = {
@@ -3176,8 +3119,9 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
        init_rwsem(&meta_group_info[i]->alloc_sem);
        meta_group_info[i]->bb_free_root = RB_ROOT;
        INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
-       RB_CLEAR_NODE(&meta_group_info[i]->bb_avg_fragment_size_rb);
+       INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
        meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
+       meta_group_info[i]->bb_avg_fragment_size_order = -1;  /* uninit */
        meta_group_info[i]->bb_group = group;
 
        mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
@@ -3426,7 +3370,24 @@ int ext4_mb_init(struct super_block *sb)
                i++;
        } while (i < MB_NUM_ORDERS(sb));
 
-       sbi->s_mb_avg_fragment_size_root = RB_ROOT;
+       sbi->s_mb_avg_fragment_size =
+               kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
+                       GFP_KERNEL);
+       if (!sbi->s_mb_avg_fragment_size) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       sbi->s_mb_avg_fragment_size_locks =
+               kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
+                       GFP_KERNEL);
+       if (!sbi->s_mb_avg_fragment_size_locks) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
+               INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
+               rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
+       }
        sbi->s_mb_largest_free_orders =
                kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
                        GFP_KERNEL);
@@ -3445,7 +3406,6 @@ int ext4_mb_init(struct super_block *sb)
                INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
                rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
        }
-       rwlock_init(&sbi->s_mb_rb_lock);
 
        spin_lock_init(&sbi->s_md_lock);
        sbi->s_mb_free_pending = 0;
@@ -3516,6 +3476,8 @@ out_free_locality_groups:
        free_percpu(sbi->s_locality_groups);
        sbi->s_locality_groups = NULL;
 out:
+       kfree(sbi->s_mb_avg_fragment_size);
+       kfree(sbi->s_mb_avg_fragment_size_locks);
        kfree(sbi->s_mb_largest_free_orders);
        kfree(sbi->s_mb_largest_free_orders_locks);
        kfree(sbi->s_mb_offsets);
@@ -3582,6 +3544,8 @@ int ext4_mb_release(struct super_block *sb)
                kvfree(group_info);
                rcu_read_unlock();
        }
+       kfree(sbi->s_mb_avg_fragment_size);
+       kfree(sbi->s_mb_avg_fragment_size_locks);
        kfree(sbi->s_mb_largest_free_orders);
        kfree(sbi->s_mb_largest_free_orders_locks);
        kfree(sbi->s_mb_offsets);
@@ -5193,6 +5157,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
        struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
        int bsbits = ac->ac_sb->s_blocksize_bits;
        loff_t size, isize;
+       bool inode_pa_eligible, group_pa_eligible;
 
        if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
                return;
@@ -5200,25 +5165,27 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
        if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
                return;
 
+       group_pa_eligible = sbi->s_mb_group_prealloc > 0;
+       inode_pa_eligible = true;
        size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
        isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
                >> bsbits;
 
+       /* No point in using inode preallocation for closed files */
        if ((size == isize) && !ext4_fs_is_busy(sbi) &&
-           !inode_is_open_for_write(ac->ac_inode)) {
-               ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
-               return;
-       }
+           !inode_is_open_for_write(ac->ac_inode))
+               inode_pa_eligible = false;
 
-       if (sbi->s_mb_group_prealloc <= 0) {
-               ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
-               return;
-       }
-
-       /* don't use group allocation for large files */
        size = max(size, isize);
-       if (size > sbi->s_mb_stream_request) {
-               ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
+       /* Don't use group allocation for large files */
+       if (size > sbi->s_mb_stream_request)
+               group_pa_eligible = false;
+
+       if (!group_pa_eligible) {
+               if (inode_pa_eligible)
+                       ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
+               else
+                       ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
                return;
        }
 
@@ -5565,6 +5532,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
        ext4_fsblk_t block = 0;
        unsigned int inquota = 0;
        unsigned int reserv_clstrs = 0;
+       int retries = 0;
        u64 seq;
 
        might_sleep();
@@ -5667,7 +5635,8 @@ repeat:
                        ar->len = ac->ac_b_ex.fe_len;
                }
        } else {
-               if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
+               if (++retries < 3 &&
+                   ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
                        goto repeat;
                /*
                 * If block allocation fails then the pa allocated above
index 39da92c..dcda2a9 100644 (file)
@@ -178,7 +178,6 @@ struct ext4_allocation_context {
        /* copy of the best found extent taken before preallocation efforts */
        struct ext4_free_extent ac_f_ex;
 
-       ext4_group_t ac_last_optimal_group;
        __u32 ac_groups_considered;
        __u32 ac_flags;         /* allocation hints */
        __u16 ac_groups_scanned;
index 27c720d..898dd95 100644 (file)
@@ -606,6 +606,31 @@ static inline gfp_t nfs_io_gfp_mask(void)
        return GFP_KERNEL;
 }
 
+/*
+ * Special version of should_remove_suid() that ignores capabilities.
+ */
+static inline int nfs_should_remove_suid(const struct inode *inode)
+{
+       umode_t mode = inode->i_mode;
+       int kill = 0;
+
+       /* suid always must be killed */
+       if (unlikely(mode & S_ISUID))
+               kill = ATTR_KILL_SUID;
+
+       /*
+        * sgid without any exec bits is just a mandatory locking mark; leave
+        * it alone.  If some exec bits are set, it's a real sgid; kill it.
+        */
+       if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
+               kill |= ATTR_KILL_SGID;
+
+       if (unlikely(kill && S_ISREG(mode)))
+               return kill;
+
+       return 0;
+}
+
 /* unlink.c */
 extern struct rpc_task *
 nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
index 068c45b..6dab9e4 100644 (file)
@@ -78,10 +78,15 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
 
        status = nfs4_call_sync(server->client, server, msg,
                                &args.seq_args, &res.seq_res, 0);
-       if (status == 0)
+       if (status == 0) {
+               if (nfs_should_remove_suid(inode)) {
+                       spin_lock(&inode->i_lock);
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
+                       spin_unlock(&inode->i_lock);
+               }
                status = nfs_post_op_update_inode_force_wcc(inode,
                                                            res.falloc_fattr);
-
+       }
        if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE])
                trace_nfs4_fallocate(inode, &args, status);
        else
index 82944e1..ee66ffd 100644 (file)
@@ -1051,22 +1051,31 @@ static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx)
        if (ctx->bsize)
                sb->s_blocksize = nfs_block_size(ctx->bsize, &sb->s_blocksize_bits);
 
-       if (server->nfs_client->rpc_ops->version != 2) {
-               /* The VFS shouldn't apply the umask to mode bits. We will do
-                * so ourselves when necessary.
+       switch (server->nfs_client->rpc_ops->version) {
+       case 2:
+               sb->s_time_gran = 1000;
+               sb->s_time_min = 0;
+               sb->s_time_max = U32_MAX;
+               break;
+       case 3:
+               /*
+                * The VFS shouldn't apply the umask to mode bits.
+                * We will do so ourselves when necessary.
                 */
                sb->s_flags |= SB_POSIXACL;
                sb->s_time_gran = 1;
-               sb->s_export_op = &nfs_export_ops;
-       } else
-               sb->s_time_gran = 1000;
-
-       if (server->nfs_client->rpc_ops->version != 4) {
                sb->s_time_min = 0;
                sb->s_time_max = U32_MAX;
-       } else {
+               sb->s_export_op = &nfs_export_ops;
+               break;
+       case 4:
+               sb->s_flags |= SB_POSIXACL;
+               sb->s_time_gran = 1;
                sb->s_time_min = S64_MIN;
                sb->s_time_max = S64_MAX;
+               if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
+                       sb->s_export_op = &nfs_export_ops;
+               break;
        }
 
        sb->s_magic = NFS_SUPER_MAGIC;
index 1843fa2..f41d24b 100644 (file)
@@ -1496,31 +1496,6 @@ void nfs_commit_prepare(struct rpc_task *task, void *calldata)
        NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
 }
 
-/*
- * Special version of should_remove_suid() that ignores capabilities.
- */
-static int nfs_should_remove_suid(const struct inode *inode)
-{
-       umode_t mode = inode->i_mode;
-       int kill = 0;
-
-       /* suid always must be killed */
-       if (unlikely(mode & S_ISUID))
-               kill = ATTR_KILL_SUID;
-
-       /*
-        * sgid without any exec bits is just a mandatory locking mark; leave
-        * it alone.  If some exec bits are set, it's a real sgid; kill it.
-        */
-       if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
-               kill |= ATTR_KILL_SGID;
-
-       if (unlikely(kill && S_ISREG(mode)))
-               return kill;
-
-       return 0;
-}
-
 static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
                struct nfs_fattr *fattr)
 {
index 9f486b7..fc17b0a 100644 (file)
@@ -300,6 +300,10 @@ commit_metadata(struct svc_fh *fhp)
 static void
 nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
 {
+       /* Ignore mode updates on symlinks */
+       if (S_ISLNK(inode->i_mode))
+               iap->ia_valid &= ~ATTR_MODE;
+
        /* sanitize the mode change */
        if (iap->ia_valid & ATTR_MODE) {
                iap->ia_mode &= S_IALLUGO;
@@ -353,7 +357,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
        int             accmode = NFSD_MAY_SATTR;
        umode_t         ftype = 0;
        __be32          err;
-       int             host_err;
+       int             host_err = 0;
        bool            get_write_count;
        bool            size_change = (iap->ia_valid & ATTR_SIZE);
 
@@ -391,13 +395,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
        dentry = fhp->fh_dentry;
        inode = d_inode(dentry);
 
-       /* Ignore any mode updates on symlinks */
-       if (S_ISLNK(inode->i_mode))
-               iap->ia_valid &= ~ATTR_MODE;
-
-       if (!iap->ia_valid)
-               return 0;
-
        nfsd_sanitize_attrs(inode, iap);
 
        if (check_guard && guardtime != inode->i_ctime.tv_sec)
@@ -448,8 +445,10 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
                        goto out_unlock;
        }
 
-       iap->ia_valid |= ATTR_CTIME;
-       host_err = notify_change(&init_user_ns, dentry, iap, NULL);
+       if (iap->ia_valid) {
+               iap->ia_valid |= ATTR_CTIME;
+               host_err = notify_change(&init_user_ns, dentry, iap, NULL);
+       }
 
 out_unlock:
        if (attr->na_seclabel && attr->na_seclabel->len)
@@ -846,10 +845,14 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
                  struct splice_desc *sd)
 {
        struct svc_rqst *rqstp = sd->u.data;
-
-       svc_rqst_replace_page(rqstp, buf->page);
-       if (rqstp->rq_res.page_len == 0)
-               rqstp->rq_res.page_base = buf->offset;
+       struct page *page = buf->page;  // may be a compound one
+       unsigned offset = buf->offset;
+
+       page += offset / PAGE_SIZE;
+       for (int i = sd->len; i > 0; i -= PAGE_SIZE)
+               svc_rqst_replace_page(rqstp, page++);
+       if (rqstp->rq_res.page_len == 0)        // first call
+               rqstp->rq_res.page_base = offset % PAGE_SIZE;
        rqstp->rq_res.page_len += sd->len;
        return sd->len;
 }
index 5ae8de0..001f4e0 100644 (file)
@@ -2092,7 +2092,8 @@ get_ctx_vol_failed:
        // TODO: Initialize security.
        /* Get the extended system files' directory inode. */
        vol->extend_ino = ntfs_iget(sb, FILE_Extend);
-       if (IS_ERR(vol->extend_ino) || is_bad_inode(vol->extend_ino)) {
+       if (IS_ERR(vol->extend_ino) || is_bad_inode(vol->extend_ino) ||
+           !S_ISDIR(vol->extend_ino->i_mode)) {
                if (!IS_ERR(vol->extend_ino))
                        iput(vol->extend_ino);
                ntfs_error(sb, "Failed to load $Extend.");
index 8a813fa..cf7e5c3 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -716,6 +716,8 @@ int chown_common(const struct path *path, uid_t user, gid_t group)
        fs_userns = i_user_ns(inode);
 
 retry_deleg:
+       newattrs.ia_vfsuid = INVALID_VFSUID;
+       newattrs.ia_vfsgid = INVALID_VFSGID;
        newattrs.ia_valid =  ATTR_CTIME;
        if ((user != (uid_t)-1) && !setattr_vfsuid(&newattrs, uid))
                return -EINVAL;
index 69d9c83..5b1f9a2 100644 (file)
@@ -175,13 +175,13 @@ xfs_dax_notify_failure(
        u64                     ddev_start;
        u64                     ddev_end;
 
-       if (!(mp->m_sb.sb_flags & SB_BORN)) {
+       if (!(mp->m_super->s_flags & SB_BORN)) {
                xfs_warn(mp, "filesystem is not ready for notify_failure()!");
                return -EIO;
        }
 
        if (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_daxdev == dax_dev) {
-               xfs_warn(mp,
+               xfs_debug(mp,
                         "notify_failure() not supported on realtime device!");
                return -EOPNOTSUPP;
        }
@@ -194,7 +194,7 @@ xfs_dax_notify_failure(
        }
 
        if (!xfs_has_rmapbt(mp)) {
-               xfs_warn(mp, "notify_failure() needs rmapbt enabled!");
+               xfs_debug(mp, "notify_failure() needs rmapbt enabled!");
                return -EOPNOTSUPP;
        }
 
index 7515a46..7c90b1a 100644 (file)
  */
 #ifdef CONFIG_CFI_CLANG
 #define TEXT_CFI_JT                                                    \
-               . = ALIGN(PMD_SIZE);                                    \
+               ALIGN_FUNCTION();                                       \
                __cfi_jt_start = .;                                     \
                *(.text..L.cfi.jumptable .text..L.cfi.jumptable.*)      \
-               . = ALIGN(PMD_SIZE);                                    \
                __cfi_jt_end = .;
 #else
 #define TEXT_CFI_JT
index bd04786..e8ad12b 100644 (file)
@@ -1127,9 +1127,10 @@ cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
  * cover a worst-case of every other cpu being on one of two nodes for a
  * very large NR_CPUS.
  *
- *  Use PAGE_SIZE as a minimum for smaller configurations.
+ *  Use PAGE_SIZE as a minimum for smaller configurations while avoiding
+ *  unsigned comparison to -1.
  */
-#define CPUMAP_FILE_MAX_BYTES  ((((NR_CPUS * 9)/32 - 1) > PAGE_SIZE) \
+#define CPUMAP_FILE_MAX_BYTES  (((NR_CPUS * 9)/32 > PAGE_SIZE) \
                                        ? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
 #define CPULIST_FILE_MAX_BYTES  (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
 
index 8917a32..d81a519 100644 (file)
@@ -65,7 +65,6 @@ struct dmar_pci_notify_info {
 
 extern struct rw_semaphore dmar_global_lock;
 extern struct list_head dmar_drhd_units;
-extern int intel_iommu_enabled;
 
 #define for_each_drhd_unit(drhd)                                       \
        list_for_each_entry_rcu(drhd, &dmar_drhd_units, list,           \
@@ -89,8 +88,7 @@ extern int intel_iommu_enabled;
 static inline bool dmar_rcu_check(void)
 {
        return rwsem_is_locked(&dmar_global_lock) ||
-              system_state == SYSTEM_BOOTING ||
-              (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled);
+              system_state == SYSTEM_BOOTING;
 }
 
 #define        dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
index 6f1dee7..9be8704 100644 (file)
@@ -180,7 +180,7 @@ switch (val) {                                              \
 
 #define HP_SDC_CMD_SET_IM      0x40    /* 010xxxxx == set irq mask */
 
-/* The documents provided do not explicitly state that all registers betwee
+/* The documents provided do not explicitly state that all registers between
  * 0x01 and 0x1f inclusive can be read by sending their register index as a 
  * command, but this is implied and appears to be the case.
  */
index 6257867..567f123 100644 (file)
@@ -1788,42 +1788,6 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
        rcu_read_unlock();
 }
 
-/**
- * get_mem_cgroup_from_obj - get a memcg associated with passed kernel object.
- * @p: pointer to object from which memcg should be extracted. It can be NULL.
- *
- * Retrieves the memory group into which the memory of the pointed kernel
- * object is accounted. If memcg is found, its reference is taken.
- * If a passed kernel object is uncharged, or if proper memcg cannot be found,
- * as well as if mem_cgroup is disabled, NULL is returned.
- *
- * Return: valid memcg pointer with taken reference or NULL.
- */
-static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p)
-{
-       struct mem_cgroup *memcg;
-
-       rcu_read_lock();
-       do {
-               memcg = mem_cgroup_from_obj(p);
-       } while (memcg && !css_tryget(&memcg->css));
-       rcu_read_unlock();
-       return memcg;
-}
-
-/**
- * mem_cgroup_or_root - always returns a pointer to a valid memory cgroup.
- * @memcg: pointer to a valid memory cgroup or NULL.
- *
- * If passed argument is not NULL, returns it without any additional checks
- * and changes. Otherwise, root_mem_cgroup is returned.
- *
- * NOTE: root_mem_cgroup can be NULL during early boot.
- */
-static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg)
-{
-       return memcg ? memcg : root_mem_cgroup;
-}
 #else
 static inline bool mem_cgroup_kmem_disabled(void)
 {
@@ -1880,15 +1844,6 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
 {
 }
 
-static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p)
-{
-       return NULL;
-}
-
-static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg)
-{
-       return NULL;
-}
 #endif /* CONFIG_MEMCG_KMEM */
 
 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
index 1901049..c3b4cc8 100644 (file)
@@ -139,6 +139,11 @@ struct dev_pagemap {
        };
 };
 
+static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap)
+{
+       return pgmap->ops && pgmap->ops->memory_failure;
+}
+
 static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
 {
        if (pgmap->flags & PGMAP_ALTMAP_VALID)
index 1d7992a..1a803e4 100644 (file)
@@ -101,8 +101,9 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
 }
 
 static inline int of_dma_configure_id(struct device *dev,
-                                  struct device_node *np,
-                                  bool force_dma)
+                                     struct device_node *np,
+                                     bool force_dma,
+                                     const u32 *id)
 {
        return 0;
 }
index 6feade6..15b49e6 100644 (file)
 #define PCI_DEVICE_ID_ICE_1712         0x1712
 #define PCI_DEVICE_ID_VT1724           0x1724
 
+#define PCI_VENDOR_ID_MICROSOFT                0x1414
+#define PCI_DEVICE_ID_HYPERV_VIDEO     0x5353
+
 #define PCI_VENDOR_ID_OXSEMI           0x1415
 #define PCI_DEVICE_ID_OXSEMI_12PCI840  0x8403
 #define PCI_DEVICE_ID_OXSEMI_PCIe840           0xC000
index a193884..4f765bc 100644 (file)
@@ -84,7 +84,7 @@ struct scmi_protocol_handle;
 struct scmi_clk_proto_ops {
        int (*count_get)(const struct scmi_protocol_handle *ph);
 
-       const struct scmi_clock_info *(*info_get)
+       const struct scmi_clock_info __must_check *(*info_get)
                (const struct scmi_protocol_handle *ph, u32 clk_id);
        int (*rate_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
                        u64 *rate);
@@ -466,7 +466,7 @@ enum scmi_sensor_class {
  */
 struct scmi_sensor_proto_ops {
        int (*count_get)(const struct scmi_protocol_handle *ph);
-       const struct scmi_sensor_info *(*info_get)
+       const struct scmi_sensor_info __must_check *(*info_get)
                (const struct scmi_protocol_handle *ph, u32 sensor_id);
        int (*trip_point_config)(const struct scmi_protocol_handle *ph,
                                 u32 sensor_id, u8 trip_id, u64 trip_value);
index 6e4f476..1eaea9f 100644 (file)
@@ -624,6 +624,23 @@ struct uart_state {
 /* number of characters left in xmit buffer before we ask for more */
 #define WAKEUP_CHARS           256
 
+/**
+ * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars
+ * @up: uart_port structure describing the port
+ * @chars: number of characters sent
+ *
+ * This function advances the tail of circular xmit buffer by the number of
+ * @chars transmitted and handles accounting of transmitted bytes (into
+ * @up's icount.tx).
+ */
+static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars)
+{
+       struct circ_buf *xmit = &up->state->xmit;
+
+       xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1);
+       up->icount.tx += chars;
+}
+
 struct module;
 struct tty_driver;
 
index 0520e21..9949870 100644 (file)
@@ -124,8 +124,6 @@ struct hci_dev_info {
        __u16 acl_pkts;
        __u16 sco_mtu;
        __u16 sco_pkts;
-       __u16 iso_mtu;
-       __u16 iso_pkts;
 
        struct hci_dev_stats stat;
 };
index be2992e..a016f27 100644 (file)
@@ -15,8 +15,6 @@
 #define PKT_TYPE_LACPDU         cpu_to_be16(ETH_P_SLOW)
 #define AD_TIMER_INTERVAL       100 /*msec*/
 
-#define MULTICAST_LACPDU_ADDR    {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02}
-
 #define AD_LACP_SLOW 0
 #define AD_LACP_FAST 1
 
index afd606d..e999f85 100644 (file)
@@ -786,6 +786,9 @@ extern struct rtnl_link_ops bond_link_ops;
 /* exported from bond_sysfs_slave.c */
 extern const struct sysfs_ops slave_sysfs_ops;
 
+/* exported from bond_3ad.c */
+extern const u8 lacpdu_mcast_addr[];
+
 static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
 {
        dev_core_stats_tx_dropped_inc(dev);
index d0d188c..a8994f3 100644 (file)
 #ifndef IEEE802154_NETDEVICE_H
 #define IEEE802154_NETDEVICE_H
 
+#define IEEE802154_REQUIRED_SIZE(struct_type, member) \
+       (offsetof(typeof(struct_type), member) + \
+       sizeof(((typeof(struct_type) *)(NULL))->member))
+
+#define IEEE802154_ADDR_OFFSET \
+       offsetof(typeof(struct sockaddr_ieee802154), addr)
+
+#define IEEE802154_MIN_NAMELEN (IEEE802154_ADDR_OFFSET + \
+       IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, addr_type))
+
+#define IEEE802154_NAMELEN_SHORT (IEEE802154_ADDR_OFFSET + \
+       IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, short_addr))
+
+#define IEEE802154_NAMELEN_LONG (IEEE802154_ADDR_OFFSET + \
+       IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, hwaddr))
+
 #include <net/af_ieee802154.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
@@ -165,6 +181,27 @@ static inline void ieee802154_devaddr_to_raw(void *raw, __le64 addr)
        memcpy(raw, &temp, IEEE802154_ADDR_LEN);
 }
 
+static inline int
+ieee802154_sockaddr_check_size(struct sockaddr_ieee802154 *daddr, int len)
+{
+       struct ieee802154_addr_sa *sa;
+
+       sa = &daddr->addr;
+       if (len < IEEE802154_MIN_NAMELEN)
+               return -EINVAL;
+       switch (sa->addr_type) {
+       case IEEE802154_ADDR_SHORT:
+               if (len < IEEE802154_NAMELEN_SHORT)
+                       return -EINVAL;
+               break;
+       case IEEE802154_ADDR_LONG:
+               if (len < IEEE802154_NAMELEN_LONG)
+                       return -EINVAL;
+               break;
+       }
+       return 0;
+}
+
 static inline void ieee802154_addr_from_sa(struct ieee802154_addr *a,
                                           const struct ieee802154_addr_sa *sa)
 {
index 65016a7..f160d68 100644 (file)
@@ -27,9 +27,9 @@ TRACE_EVENT(scmi_fc_call,
                __entry->val2 = val2;
        ),
 
-       TP_printk("[0x%02X]:[0x%02X]:[%08X]:%u:%u",
-                 __entry->protocol_id, __entry->msg_id,
-                 __entry->res_id, __entry->val1, __entry->val2)
+       TP_printk("pt=%02X msg_id=%02X res_id:%u vals=%u:%u",
+               __entry->protocol_id, __entry->msg_id,
+               __entry->res_id, __entry->val1, __entry->val2)
 );
 
 TRACE_EVENT(scmi_xfer_begin,
@@ -53,9 +53,9 @@ TRACE_EVENT(scmi_xfer_begin,
                __entry->poll = poll;
        ),
 
-       TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u poll=%u",
-               __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
-               __entry->seq, __entry->poll)
+       TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X poll=%u",
+               __entry->protocol_id, __entry->msg_id, __entry->seq,
+               __entry->transfer_id, __entry->poll)
 );
 
 TRACE_EVENT(scmi_xfer_response_wait,
@@ -81,9 +81,9 @@ TRACE_EVENT(scmi_xfer_response_wait,
                __entry->poll = poll;
        ),
 
-       TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u tmo_ms=%u poll=%u",
-               __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
-               __entry->seq, __entry->timeout, __entry->poll)
+       TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X tmo_ms=%u poll=%u",
+               __entry->protocol_id, __entry->msg_id, __entry->seq,
+               __entry->transfer_id, __entry->timeout, __entry->poll)
 );
 
 TRACE_EVENT(scmi_xfer_end,
@@ -107,9 +107,9 @@ TRACE_EVENT(scmi_xfer_end,
                __entry->status = status;
        ),
 
-       TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u status=%d",
-               __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
-               __entry->seq, __entry->status)
+       TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X s=%d",
+               __entry->protocol_id, __entry->msg_id, __entry->seq,
+               __entry->transfer_id, __entry->status)
 );
 
 TRACE_EVENT(scmi_rx_done,
@@ -133,9 +133,9 @@ TRACE_EVENT(scmi_rx_done,
                __entry->msg_type = msg_type;
        ),
 
-       TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u msg_type=%u",
-               __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
-               __entry->seq, __entry->msg_type)
+       TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X msg_type=%u",
+               __entry->protocol_id, __entry->msg_id, __entry->seq,
+               __entry->transfer_id, __entry->msg_type)
 );
 
 TRACE_EVENT(scmi_msg_dump,
index b9640ad..2965b35 100644 (file)
@@ -2648,6 +2648,9 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
                io_kill_timeouts(ctx, NULL, true);
                /* if we failed setting up the ctx, we might not have any rings */
                io_iopoll_try_reap_events(ctx);
+               /* drop cached put refs after potentially doing completions */
+               if (current->io_uring)
+                       io_uring_drop_tctx_refs(current);
        }
 
        INIT_WORK(&ctx->exit_work, io_ring_exit_work);
index 976c4ba..4a7e5d0 100644 (file)
@@ -165,7 +165,8 @@ done:
                req_set_fail(req);
        io_req_set_res(req, ret, 0);
        /* put file to avoid an attempt to IOPOLL the req */
-       io_put_file(req->file);
+       if (!(req->flags & REQ_F_FIXED_FILE))
+               io_put_file(req->file);
        req->file = NULL;
        return IOU_OK;
 }
index e9efed4..60e392f 100644 (file)
@@ -905,6 +905,13 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
                          IORING_RECVSEND_FIXED_BUF))
                return -EINVAL;
+       notif = zc->notif = io_alloc_notif(ctx);
+       if (!notif)
+               return -ENOMEM;
+       notif->cqe.user_data = req->cqe.user_data;
+       notif->cqe.res = 0;
+       notif->cqe.flags = IORING_CQE_F_NOTIF;
+       req->flags |= REQ_F_NEED_CLEANUP;
        if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
                unsigned idx = READ_ONCE(sqe->buf_index);
 
@@ -912,15 +919,8 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                        return -EFAULT;
                idx = array_index_nospec(idx, ctx->nr_user_bufs);
                req->imu = READ_ONCE(ctx->user_bufs[idx]);
-               io_req_set_rsrc_node(req, ctx, 0);
+               io_req_set_rsrc_node(notif, ctx, 0);
        }
-       notif = zc->notif = io_alloc_notif(ctx);
-       if (!notif)
-               return -ENOMEM;
-       notif->cqe.user_data = req->cqe.user_data;
-       notif->cqe.res = 0;
-       notif->cqe.flags = IORING_CQE_F_NOTIF;
-       req->flags |= REQ_F_NEED_CLEANUP;
 
        zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
        zc->len = READ_ONCE(sqe->len);
index c61494e..c4dddd0 100644 (file)
@@ -471,7 +471,7 @@ const struct io_op_def io_op_defs[] = {
                .prep_async             = io_uring_cmd_prep_async,
        },
        [IORING_OP_SEND_ZC] = {
-               .name                   = "SENDZC_NOTIF",
+               .name                   = "SEND_ZC",
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollout                = 1,
index 1e18a44..76ebcfe 100644 (file)
@@ -206,7 +206,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res)
        return false;
 }
 
-static inline unsigned io_fixup_rw_res(struct io_kiocb *req, unsigned res)
+static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
 {
        struct io_async_rw *io = req->async_data;
 
index e4bb5d5..5f2090d 100644 (file)
@@ -6049,6 +6049,9 @@ struct cgroup *cgroup_get_from_id(u64 id)
        if (!kn)
                goto out;
 
+       if (kernfs_type(kn) != KERNFS_DIR)
+               goto put;
+
        rcu_read_lock();
 
        cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
@@ -6056,7 +6059,7 @@ struct cgroup *cgroup_get_from_id(u64 id)
                cgrp = NULL;
 
        rcu_read_unlock();
-
+put:
        kernfs_put(kn);
 out:
        return cgrp;
index 8a9e920..2b6bd51 100644 (file)
@@ -2047,11 +2047,8 @@ static __latent_entropy struct task_struct *copy_process(
        /*
         * If the new process will be in a different time namespace
         * do not allow it to share VM or a thread group with the forking task.
-        *
-        * On vfork, the child process enters the target time namespace only
-        * after exec.
         */
-       if ((clone_flags & (CLONE_VM | CLONE_VFORK)) == CLONE_VM) {
+       if (clone_flags & (CLONE_THREAD | CLONE_VM)) {
                if (nsp->time_ns != nsp->time_ns_for_children)
                        return ERR_PTR(-EINVAL);
        }
index b4cbb40..eec72ca 100644 (file)
@@ -179,8 +179,7 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
        if (IS_ERR(new_ns))
                return  PTR_ERR(new_ns);
 
-       if ((flags & CLONE_VM) == 0)
-               timens_on_fork(new_ns, tsk);
+       timens_on_fork(new_ns, tsk);
 
        tsk->nsproxy = new_ns;
        return 0;
index aeea973..39060a5 100644 (file)
@@ -3066,10 +3066,8 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
        if (WARN_ON(!work->func))
                return false;
 
-       if (!from_cancel) {
-               lock_map_acquire(&work->lockdep_map);
-               lock_map_release(&work->lockdep_map);
-       }
+       lock_map_acquire(&work->lockdep_map);
+       lock_map_release(&work->lockdep_map);
 
        if (start_flush_work(work, &barr, from_cancel)) {
                wait_for_completion(&barr.done);
index bcbe60d..d3e5f36 100644 (file)
@@ -264,8 +264,10 @@ config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
 config DEBUG_INFO_DWARF4
        bool "Generate DWARF Version 4 debuginfo"
        select DEBUG_INFO
+       depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
        help
-         Generate DWARF v4 debug info. This requires gcc 4.5+ and gdb 7.0+.
+         Generate DWARF v4 debug info. This requires gcc 4.5+, binutils 2.35.2
+         if using clang without clang's integrated assembler, and gdb 7.0+.
 
          If you have consumers of DWARF debug info that are not ready for
          newer revisions of DWARF, you may wish to choose this or have your
index cfdf631..4e51466 100644 (file)
@@ -884,6 +884,7 @@ static int dbgfs_rm_context(char *name)
        struct dentry *root, *dir, **new_dirs;
        struct damon_ctx **new_ctxs;
        int i, j;
+       int ret = 0;
 
        if (damon_nr_running_ctxs())
                return -EBUSY;
@@ -898,14 +899,16 @@ static int dbgfs_rm_context(char *name)
 
        new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs),
                        GFP_KERNEL);
-       if (!new_dirs)
-               return -ENOMEM;
+       if (!new_dirs) {
+               ret = -ENOMEM;
+               goto out_dput;
+       }
 
        new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs),
                        GFP_KERNEL);
        if (!new_ctxs) {
-               kfree(new_dirs);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_new_dirs;
        }
 
        for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) {
@@ -925,7 +928,13 @@ static int dbgfs_rm_context(char *name)
        dbgfs_ctxs = new_ctxs;
        dbgfs_nr_ctxs--;
 
-       return 0;
+       goto out_dput;
+
+out_new_dirs:
+       kfree(new_dirs);
+out_dput:
+       dput(dir);
+       return ret;
 }
 
 static ssize_t dbgfs_rm_context_write(struct file *file,
index 1a97610..279e55b 100644 (file)
@@ -125,6 +125,9 @@ void frontswap_init(unsigned type, unsigned long *map)
         * p->frontswap set to something valid to work properly.
         */
        frontswap_map_set(sis, map);
+
+       if (!frontswap_enabled())
+               return;
        frontswap_ops->init(type);
 }
 
index 5abdaf4..00926ab 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2345,8 +2345,28 @@ static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
 }
 
 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
-static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
-                        unsigned int flags, struct page **pages, int *nr)
+/*
+ * Fast-gup relies on pte change detection to avoid concurrent pgtable
+ * operations.
+ *
+ * To pin the page, fast-gup needs to do below in order:
+ * (1) pin the page (by prefetching pte), then (2) check pte not changed.
+ *
+ * For the rest of pgtable operations where pgtable updates can be racy
+ * with fast-gup, we need to do (1) clear pte, then (2) check whether page
+ * is pinned.
+ *
+ * Above will work for all pte-level operations, including THP split.
+ *
+ * For THP collapse, it's a bit more complicated because fast-gup may be
+ * walking a pgtable page that is being freed (pte is still valid but pmd
+ * can be cleared already).  To avoid race in such condition, we need to
+ * also check pmd here to make sure pmd doesn't change (corresponds to
+ * pmdp_collapse_flush() in the THP collapse code path).
+ */
+static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
+                        unsigned long end, unsigned int flags,
+                        struct page **pages, int *nr)
 {
        struct dev_pagemap *pgmap = NULL;
        int nr_start = *nr, ret = 0;
@@ -2392,7 +2412,8 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
                        goto pte_unmap;
                }
 
-               if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+               if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
+                   unlikely(pte_val(pte) != pte_val(*ptep))) {
                        gup_put_folio(folio, 1, flags);
                        goto pte_unmap;
                }
@@ -2439,8 +2460,9 @@ pte_unmap:
  * get_user_pages_fast_only implementation that can pin pages. Thus it's still
  * useful to have gup_huge_pmd even if we can't operate on ptes.
  */
-static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
-                        unsigned int flags, struct page **pages, int *nr)
+static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
+                        unsigned long end, unsigned int flags,
+                        struct page **pages, int *nr)
 {
        return 0;
 }
@@ -2764,7 +2786,7 @@ static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned lo
                        if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
                                         PMD_SHIFT, next, flags, pages, nr))
                                return 0;
-               } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
+               } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr))
                        return 0;
        } while (pmdp++, addr = next, addr != end);
 
index e9414ee..f42bb51 100644 (file)
@@ -2894,11 +2894,9 @@ static void split_huge_pages_all(void)
                max_zone_pfn = zone_end_pfn(zone);
                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
                        int nr_pages;
-                       if (!pfn_valid(pfn))
-                               continue;
 
-                       page = pfn_to_page(pfn);
-                       if (!get_page_unless_zero(page))
+                       page = pfn_to_online_page(pfn);
+                       if (!page || !get_page_unless_zero(page))
                                continue;
 
                        if (zone != page_zone(page))
index e070b85..0bdfc7e 100644 (file)
@@ -3420,6 +3420,7 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
 {
        int i, nid = page_to_nid(page);
        struct hstate *target_hstate;
+       struct page *subpage;
        int rc = 0;
 
        target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
@@ -3453,15 +3454,16 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
        mutex_lock(&target_hstate->resize_lock);
        for (i = 0; i < pages_per_huge_page(h);
                                i += pages_per_huge_page(target_hstate)) {
+               subpage = nth_page(page, i);
                if (hstate_is_gigantic(target_hstate))
-                       prep_compound_gigantic_page_for_demote(page + i,
+                       prep_compound_gigantic_page_for_demote(subpage,
                                                        target_hstate->order);
                else
-                       prep_compound_page(page + i, target_hstate->order);
-               set_page_private(page + i, 0);
-               set_page_refcounted(page + i);
-               prep_new_huge_page(target_hstate, page + i, nid);
-               put_page(page + i);
+                       prep_compound_page(subpage, target_hstate->order);
+               set_page_private(subpage, 0);
+               set_page_refcounted(subpage);
+               prep_new_huge_page(target_hstate, subpage, nid);
+               put_page(subpage);
        }
        mutex_unlock(&target_hstate->resize_lock);
 
index 01f7178..70b7ac6 100644 (file)
@@ -1083,10 +1083,12 @@ static void collapse_huge_page(struct mm_struct *mm,
 
        pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
        /*
-        * After this gup_fast can't run anymore. This also removes
-        * any huge TLB entry from the CPU so we won't allow
-        * huge and small TLB entries for the same virtual address
-        * to avoid the risk of CPU bugs in that area.
+        * This removes any huge TLB entry from the CPU so we won't allow
+        * huge and small TLB entries for the same virtual address to
+        * avoid the risk of CPU bugs in that area.
+        *
+        * Parallel fast GUP is fine since fast GUP will back off when
+        * it detects PMD is changed.
         */
        _pmd = pmdp_collapse_flush(vma, address, pmd);
        spin_unlock(pmd_ptl);
index 5f0f094..9ff5165 100644 (file)
@@ -451,8 +451,11 @@ regular_page:
                        continue;
                }
 
-               /* Do not interfere with other mappings of this page */
-               if (page_mapcount(page) != 1)
+               /*
+                * Do not interfere with other mappings of this page and
+                * non-LRU page.
+                */
+               if (!PageLRU(page) || page_mapcount(page) != 1)
                        continue;
 
                VM_BUG_ON_PAGE(PageTransCompound(page), page);
index 1443980..e7ac570 100644 (file)
@@ -345,13 +345,17 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
  * not much we can do. We just print a message and ignore otherwise.
  */
 
+#define FSDAX_INVALID_PGOFF ULONG_MAX
+
 /*
  * Schedule a process for later kill.
  * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
  *
- * Notice: @fsdax_pgoff is used only when @p is a fsdax page.
- *   In other cases, such as anonymous and file-backend page, the address to be
- *   killed can be caculated by @p itself.
+ * Note: @fsdax_pgoff is used only when @p is a fsdax page and a
+ * filesystem with a memory failure handler has claimed the
+ * memory_failure event. In all other cases, page->index and
+ * page->mapping are sufficient for mapping the page back to its
+ * corresponding user virtual address.
  */
 static void add_to_kill(struct task_struct *tsk, struct page *p,
                        pgoff_t fsdax_pgoff, struct vm_area_struct *vma,
@@ -367,11 +371,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
 
        tk->addr = page_address_in_vma(p, vma);
        if (is_zone_device_page(p)) {
-               /*
-                * Since page->mapping is not used for fsdax, we need
-                * calculate the address based on the vma.
-                */
-               if (p->pgmap->type == MEMORY_DEVICE_FS_DAX)
+               if (fsdax_pgoff != FSDAX_INVALID_PGOFF)
                        tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
                tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
        } else
@@ -523,7 +523,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
                        if (!page_mapped_in_vma(page, vma))
                                continue;
                        if (vma->vm_mm == t->mm)
-                               add_to_kill(t, page, 0, vma, to_kill);
+                               add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma,
+                                           to_kill);
                }
        }
        read_unlock(&tasklist_lock);
@@ -559,7 +560,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
                         * to be informed of all such data corruptions.
                         */
                        if (vma->vm_mm == t->mm)
-                               add_to_kill(t, page, 0, vma, to_kill);
+                               add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma,
+                                           to_kill);
                }
        }
        read_unlock(&tasklist_lock);
@@ -743,6 +745,9 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
        };
        priv.tk.tsk = p;
 
+       if (!p->mm)
+               return -EFAULT;
+
        mmap_read_lock(p->mm);
        ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops,
                              (void *)&priv);
@@ -1928,7 +1933,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
         * Call driver's implementation to handle the memory failure, otherwise
         * fall back to generic handler.
         */
-       if (pgmap->ops->memory_failure) {
+       if (pgmap_has_memory_failure(pgmap)) {
                rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
                /*
                 * Fall back to generic handler too if operation is not
index 4ba73f5..a788144 100644 (file)
@@ -4386,14 +4386,20 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
 
        vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
                                      vmf->address, &vmf->ptl);
-       ret = 0;
+
        /* Re-check under ptl */
-       if (likely(!vmf_pte_changed(vmf)))
+       if (likely(!vmf_pte_changed(vmf))) {
                do_set_pte(vmf, page, vmf->address);
-       else
+
+               /* no need to invalidate: a not-present page won't be cached */
+               update_mmu_cache(vma, vmf->address, vmf->pte);
+
+               ret = 0;
+       } else {
+               update_mmu_tlb(vma, vmf->address, vmf->pte);
                ret = VM_FAULT_NOPAGE;
+       }
 
-       update_mmu_tlb(vma, vmf->address, vmf->pte);
        pte_unmap_unlock(vmf->pte, vmf->ptl);
        return ret;
 }
index 27fb37d..dbf6c7a 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/export.h>
 #include <linux/memremap.h>
 #include <linux/migrate.h>
+#include <linux/mm.h>
 #include <linux/mm_inline.h>
 #include <linux/mmu_notifier.h>
 #include <linux/oom.h>
@@ -193,10 +194,10 @@ again:
                        bool anon_exclusive;
                        pte_t swp_pte;
 
+                       flush_cache_page(vma, addr, pte_pfn(*ptep));
                        anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
                        if (anon_exclusive) {
-                               flush_cache_page(vma, addr, pte_pfn(*ptep));
-                               ptep_clear_flush(vma, addr, ptep);
+                               pte = ptep_clear_flush(vma, addr, ptep);
 
                                if (page_try_share_anon_rmap(page)) {
                                        set_pte_at(mm, addr, ptep, pte);
@@ -206,11 +207,15 @@ again:
                                        goto next;
                                }
                        } else {
-                               ptep_get_and_clear(mm, addr, ptep);
+                               pte = ptep_get_and_clear(mm, addr, ptep);
                        }
 
                        migrate->cpages++;
 
+                       /* Set the dirty flag on the folio now the pte is gone. */
+                       if (pte_dirty(pte))
+                               folio_mark_dirty(page_folio(page));
+
                        /* Setup special migration page table entry */
                        if (mpfn & MIGRATE_PFN_WRITE)
                                entry = make_writable_migration_entry(
@@ -254,13 +259,14 @@ next:
                migrate->dst[migrate->npages] = 0;
                migrate->src[migrate->npages++] = mpfn;
        }
-       arch_leave_lazy_mmu_mode();
-       pte_unmap_unlock(ptep - 1, ptl);
 
        /* Only flush the TLB if we actually modified any entries */
        if (unmapped)
                flush_tlb_range(walk->vma, start, end);
 
+       arch_leave_lazy_mmu_mode();
+       pte_unmap_unlock(ptep - 1, ptl);
+
        return 0;
 }
 
index e5486d4..d04211f 100644 (file)
@@ -4708,6 +4708,30 @@ void fs_reclaim_release(gfp_t gfp_mask)
 EXPORT_SYMBOL_GPL(fs_reclaim_release);
 #endif
 
+/*
+ * Zonelists may change due to hotplug during allocation. Detect when zonelists
+ * have been rebuilt so allocation retries. Reader side does not lock and
+ * retries the allocation if zonelist changes. Writer side is protected by the
+ * embedded spin_lock.
+ */
+static DEFINE_SEQLOCK(zonelist_update_seq);
+
+static unsigned int zonelist_iter_begin(void)
+{
+       if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
+               return read_seqbegin(&zonelist_update_seq);
+
+       return 0;
+}
+
+static unsigned int check_retry_zonelist(unsigned int seq)
+{
+       if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
+               return read_seqretry(&zonelist_update_seq, seq);
+
+       return seq;
+}
+
 /* Perform direct synchronous page reclaim */
 static unsigned long
 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
@@ -5001,6 +5025,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        int compaction_retries;
        int no_progress_loops;
        unsigned int cpuset_mems_cookie;
+       unsigned int zonelist_iter_cookie;
        int reserve_flags;
 
        /*
@@ -5011,11 +5036,12 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                                (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
                gfp_mask &= ~__GFP_ATOMIC;
 
-retry_cpuset:
+restart:
        compaction_retries = 0;
        no_progress_loops = 0;
        compact_priority = DEF_COMPACT_PRIORITY;
        cpuset_mems_cookie = read_mems_allowed_begin();
+       zonelist_iter_cookie = zonelist_iter_begin();
 
        /*
         * The fast path uses conservative alloc_flags to succeed only until
@@ -5187,9 +5213,13 @@ retry:
                goto retry;
 
 
-       /* Deal with possible cpuset update races before we start OOM killing */
-       if (check_retry_cpuset(cpuset_mems_cookie, ac))
-               goto retry_cpuset;
+       /*
+        * Deal with possible cpuset update races or zonelist updates to avoid
+        * a unnecessary OOM kill.
+        */
+       if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
+           check_retry_zonelist(zonelist_iter_cookie))
+               goto restart;
 
        /* Reclaim has failed us, start killing things */
        page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
@@ -5209,9 +5239,13 @@ retry:
        }
 
 nopage:
-       /* Deal with possible cpuset update races before we fail */
-       if (check_retry_cpuset(cpuset_mems_cookie, ac))
-               goto retry_cpuset;
+       /*
+        * Deal with possible cpuset update races or zonelist updates to avoid
+        * a unnecessary OOM kill.
+        */
+       if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
+           check_retry_zonelist(zonelist_iter_cookie))
+               goto restart;
 
        /*
         * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
@@ -5706,6 +5740,18 @@ refill:
                /* reset page count bias and offset to start of new frag */
                nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
                offset = size - fragsz;
+               if (unlikely(offset < 0)) {
+                       /*
+                        * The caller is trying to allocate a fragment
+                        * with fragsz > PAGE_SIZE but the cache isn't big
+                        * enough to satisfy the request, this may
+                        * happen in low memory conditions.
+                        * We don't release the cache page because
+                        * it could make memory pressure worse
+                        * so we simply return NULL here.
+                        */
+                       return NULL;
+               }
        }
 
        nc->pagecnt_bias--;
@@ -6514,9 +6560,8 @@ static void __build_all_zonelists(void *data)
        int nid;
        int __maybe_unused cpu;
        pg_data_t *self = data;
-       static DEFINE_SPINLOCK(lock);
 
-       spin_lock(&lock);
+       write_seqlock(&zonelist_update_seq);
 
 #ifdef CONFIG_NUMA
        memset(node_load, 0, sizeof(node_load));
@@ -6553,7 +6598,7 @@ static void __build_all_zonelists(void *data)
 #endif
        }
 
-       spin_unlock(&lock);
+       write_sequnlock(&zonelist_update_seq);
 }
 
 static noinline void __init
index 9d73dc3..eb3a68c 100644 (file)
@@ -288,6 +288,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  * @isolate_before:    isolate the pageblock before the boundary_pfn
  * @skip_isolation:    the flag to skip the pageblock isolation in second
  *                     isolate_single_pageblock()
+ * @migratetype:       migrate type to set in error recovery.
  *
  * Free and in-use pages can be as big as MAX_ORDER-1 and contain more than one
  * pageblock. When not all pageblocks within a page are isolated at the same
@@ -302,9 +303,9 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
  * the in-use page then splitting the free page.
  */
 static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
-                       gfp_t gfp_flags, bool isolate_before, bool skip_isolation)
+                       gfp_t gfp_flags, bool isolate_before, bool skip_isolation,
+                       int migratetype)
 {
-       unsigned char saved_mt;
        unsigned long start_pfn;
        unsigned long isolate_pageblock;
        unsigned long pfn;
@@ -328,13 +329,13 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
        start_pfn  = max(ALIGN_DOWN(isolate_pageblock, MAX_ORDER_NR_PAGES),
                                      zone->zone_start_pfn);
 
-       saved_mt = get_pageblock_migratetype(pfn_to_page(isolate_pageblock));
+       if (skip_isolation) {
+               int mt = get_pageblock_migratetype(pfn_to_page(isolate_pageblock));
 
-       if (skip_isolation)
-               VM_BUG_ON(!is_migrate_isolate(saved_mt));
-       else {
-               ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock), saved_mt, flags,
-                               isolate_pageblock, isolate_pageblock + pageblock_nr_pages);
+               VM_BUG_ON(!is_migrate_isolate(mt));
+       } else {
+               ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock), migratetype,
+                               flags, isolate_pageblock, isolate_pageblock + pageblock_nr_pages);
 
                if (ret)
                        return ret;
@@ -475,7 +476,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
 failed:
        /* restore the original migratetype */
        if (!skip_isolation)
-               unset_migratetype_isolate(pfn_to_page(isolate_pageblock), saved_mt);
+               unset_migratetype_isolate(pfn_to_page(isolate_pageblock), migratetype);
        return -EBUSY;
 }
 
@@ -537,7 +538,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
        bool skip_isolation = false;
 
        /* isolate [isolate_start, isolate_start + pageblock_nr_pages) pageblock */
-       ret = isolate_single_pageblock(isolate_start, flags, gfp_flags, false, skip_isolation);
+       ret = isolate_single_pageblock(isolate_start, flags, gfp_flags, false,
+                       skip_isolation, migratetype);
        if (ret)
                return ret;
 
@@ -545,7 +547,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
                skip_isolation = true;
 
        /* isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock */
-       ret = isolate_single_pageblock(isolate_end, flags, gfp_flags, true, skip_isolation);
+       ret = isolate_single_pageblock(isolate_end, flags, gfp_flags, true,
+                       skip_isolation, migratetype);
        if (ret) {
                unset_migratetype_isolate(pfn_to_page(isolate_start), migratetype);
                return ret;
index e3e9590..3f71540 100644 (file)
@@ -285,7 +285,7 @@ static int secretmem_init(void)
 
        secretmem_mnt = kern_mount(&secretmem_fs);
        if (IS_ERR(secretmem_mnt))
-               ret = PTR_ERR(secretmem_mnt);
+               return PTR_ERR(secretmem_mnt);
 
        /* prevent secretmem mappings from ever getting PROT_EXEC */
        secretmem_mnt->mnt_flags |= MNT_NOEXEC;
index 07b9482..ccc0257 100644 (file)
@@ -475,6 +475,7 @@ void slab_kmem_cache_release(struct kmem_cache *s)
 void kmem_cache_destroy(struct kmem_cache *s)
 {
        int refcnt;
+       bool rcu_set;
 
        if (unlikely(!s) || !kasan_check_byte(s))
                return;
@@ -482,6 +483,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
        cpus_read_lock();
        mutex_lock(&slab_mutex);
 
+       rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
+
        refcnt = --s->refcount;
        if (refcnt)
                goto out_unlock;
@@ -492,7 +495,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
 out_unlock:
        mutex_unlock(&slab_mutex);
        cpus_read_unlock();
-       if (!refcnt && !(s->flags & SLAB_TYPESAFE_BY_RCU))
+       if (!refcnt && !rcu_set)
                kmem_cache_release(s);
 }
 EXPORT_SYMBOL(kmem_cache_destroy);
index 862dbd9..4b98dff 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -310,6 +310,11 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
  */
 static nodemask_t slab_nodes;
 
+/*
+ * Workqueue used for flush_cpu_slab().
+ */
+static struct workqueue_struct *flushwq;
+
 /********************************************************************
  *                     Core slab cache functions
  *******************************************************************/
@@ -2730,7 +2735,7 @@ static void flush_all_cpus_locked(struct kmem_cache *s)
                INIT_WORK(&sfw->work, flush_cpu_slab);
                sfw->skip = false;
                sfw->s = s;
-               schedule_work_on(cpu, &sfw->work);
+               queue_work_on(cpu, flushwq, &sfw->work);
        }
 
        for_each_online_cpu(cpu) {
@@ -4858,6 +4863,8 @@ void __init kmem_cache_init(void)
 
 void __init kmem_cache_init_late(void)
 {
+       flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
+       WARN_ON(!flushwq);
 }
 
 struct kmem_cache *
@@ -4926,6 +4933,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
        /* Honor the call site pointer we received. */
        trace_kmalloc(caller, ret, s, size, s->size, gfpflags);
 
+       ret = kasan_kmalloc(s, ret, size, gfpflags);
+
        return ret;
 }
 EXPORT_SYMBOL(__kmalloc_track_caller);
@@ -4957,6 +4966,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
        /* Honor the call site pointer we received. */
        trace_kmalloc_node(caller, ret, s, size, s->size, gfpflags, node);
 
+       ret = kasan_kmalloc(s, ret, size, gfpflags);
+
        return ret;
 }
 EXPORT_SYMBOL(__kmalloc_node_track_caller);
@@ -5890,7 +5901,8 @@ static char *create_unique_id(struct kmem_cache *s)
        char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
        char *p = name;
 
-       BUG_ON(!name);
+       if (!name)
+               return ERR_PTR(-ENOMEM);
 
        *p++ = ':';
        /*
@@ -5948,6 +5960,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
                 * for the symlinks.
                 */
                name = create_unique_id(s);
+               if (IS_ERR(name))
+                       return PTR_ERR(name);
        }
 
        s->kobj.kset = kset;
index e166051..41afa6d 100644 (file)
@@ -151,7 +151,7 @@ void __delete_from_swap_cache(struct folio *folio,
 
        for (i = 0; i < nr; i++) {
                void *entry = xas_store(&xas, shadow);
-               VM_BUG_ON_FOLIO(entry != folio, folio);
+               VM_BUG_ON_PAGE(entry != folio, entry);
                set_page_private(folio_page(folio, i), 0);
                xas_next(&xas);
        }
index b2b1431..382dbe9 100644 (file)
@@ -2550,8 +2550,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
                }
 
                if (unlikely(buffer_heads_over_limit)) {
-                       if (folio_get_private(folio) && folio_trylock(folio)) {
-                               if (folio_get_private(folio))
+                       if (folio_test_private(folio) && folio_trylock(folio)) {
+                               if (folio_test_private(folio))
                                        filemap_release_folio(folio, 0);
                                folio_unlock(folio);
                        }
index b8f8da7..41c1ad3 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/atomic.h>
 #include <linux/byteorder/generic.h>
 #include <linux/container_of.h>
+#include <linux/errno.h>
 #include <linux/gfp.h>
 #include <linux/if.h>
 #include <linux/if_arp.h>
@@ -700,6 +701,9 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
        int max_header_len = batadv_max_header_len();
        int ret;
 
+       if (hard_iface->net_dev->mtu < ETH_MIN_MTU + max_header_len)
+               return -EINVAL;
+
        if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
                goto out;
 
index 9a0ae59..4f385d5 100644 (file)
@@ -1040,8 +1040,10 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
                goto free_iterate;
        }
 
-       if (repl->valid_hooks != t->valid_hooks)
+       if (repl->valid_hooks != t->valid_hooks) {
+               ret = -EINVAL;
                goto free_unlock;
+       }
 
        if (repl->num_counters && repl->num_counters != t->private->nentries) {
                ret = -EINVAL;
index fe9be3c..385f04a 100644 (file)
@@ -52,6 +52,7 @@ int __get_compat_msghdr(struct msghdr *kmsg,
                kmsg->msg_namelen = sizeof(struct sockaddr_storage);
 
        kmsg->msg_control_is_user = true;
+       kmsg->msg_get_inq = 0;
        kmsg->msg_control_user = compat_ptr(msg->msg_control);
        kmsg->msg_controllen = msg->msg_controllen;
 
index 764c4cb..5dc3860 100644 (file)
@@ -1611,9 +1611,8 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
 
        switch (keys->control.addr_type) {
        case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
-               addr_diff = (__force u32)keys->addrs.v4addrs.dst -
-                           (__force u32)keys->addrs.v4addrs.src;
-               if (addr_diff < 0)
+               if ((__force u32)keys->addrs.v4addrs.dst <
+                   (__force u32)keys->addrs.v4addrs.src)
                        swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
 
                if ((__force u16)keys->ports.dst <
index 6b9f191..0ec2f59 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/user_namespace.h>
 #include <linux/net_namespace.h>
 #include <linux/sched/task.h>
-#include <linux/sched/mm.h>
 #include <linux/uidgid.h>
 #include <linux/cookie.h>
 
@@ -1144,13 +1143,7 @@ static int __register_pernet_operations(struct list_head *list,
                 * setup_net() and cleanup_net() are not possible.
                 */
                for_each_net(net) {
-                       struct mem_cgroup *old, *memcg;
-
-                       memcg = mem_cgroup_or_root(get_mem_cgroup_from_obj(net));
-                       old = set_active_memcg(memcg);
                        error = ops_init(ops, net);
-                       set_active_memcg(old);
-                       mem_cgroup_put(memcg);
                        if (error)
                                goto out_undo;
                        list_add_tail(&net->exit_list, &net_exit_list);
index 718fb77..7889e1e 100644 (file)
@@ -200,8 +200,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len)
        int err = 0;
        struct net_device *dev = NULL;
 
-       if (len < sizeof(*uaddr))
-               return -EINVAL;
+       err = ieee802154_sockaddr_check_size(uaddr, len);
+       if (err < 0)
+               return err;
 
        uaddr = (struct sockaddr_ieee802154 *)_uaddr;
        if (uaddr->family != AF_IEEE802154)
@@ -493,7 +494,8 @@ static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
 
        ro->bound = 0;
 
-       if (len < sizeof(*addr))
+       err = ieee802154_sockaddr_check_size(addr, len);
+       if (err < 0)
                goto out;
 
        if (addr->family != AF_IEEE802154)
@@ -564,8 +566,9 @@ static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
        struct dgram_sock *ro = dgram_sk(sk);
        int err = 0;
 
-       if (len < sizeof(*addr))
-               return -EINVAL;
+       err = ieee802154_sockaddr_check_size(addr, len);
+       if (err < 0)
+               return err;
 
        if (addr->family != AF_IEEE802154)
                return -EINVAL;
@@ -604,6 +607,7 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        struct ieee802154_mac_cb *cb;
        struct dgram_sock *ro = dgram_sk(sk);
        struct ieee802154_addr dst_addr;
+       DECLARE_SOCKADDR(struct sockaddr_ieee802154*, daddr, msg->msg_name);
        int hlen, tlen;
        int err;
 
@@ -612,10 +616,20 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
                return -EOPNOTSUPP;
        }
 
-       if (!ro->connected && !msg->msg_name)
-               return -EDESTADDRREQ;
-       else if (ro->connected && msg->msg_name)
-               return -EISCONN;
+       if (msg->msg_name) {
+               if (ro->connected)
+                       return -EISCONN;
+               if (msg->msg_namelen < IEEE802154_MIN_NAMELEN)
+                       return -EINVAL;
+               err = ieee802154_sockaddr_check_size(daddr, msg->msg_namelen);
+               if (err < 0)
+                       return err;
+               ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
+       } else {
+               if (!ro->connected)
+                       return -EDESTADDRREQ;
+               dst_addr = ro->dst_addr;
+       }
 
        if (!ro->bound)
                dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
@@ -651,16 +665,6 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        cb = mac_cb_init(skb);
        cb->type = IEEE802154_FC_TYPE_DATA;
        cb->ackreq = ro->want_ack;
-
-       if (msg->msg_name) {
-               DECLARE_SOCKADDR(struct sockaddr_ieee802154*,
-                                daddr, msg->msg_name);
-
-               ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
-       } else {
-               dst_addr = ro->dst_addr;
-       }
-
        cb->secen = ro->secen;
        cb->secen_override = ro->secen_override;
        cb->seclevel = ro->seclevel;
index 73651d1..e11d6b0 100644 (file)
@@ -1004,7 +1004,9 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
 
                        rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
                } else {
+                       rcu_read_lock();
                        ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
+                       rcu_read_unlock();
                }
        }
 }
index 6cdfce6..e373dde 100644 (file)
@@ -1761,19 +1761,28 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
        if (sk->sk_state == TCP_LISTEN)
                return -ENOTCONN;
 
-       skb = tcp_recv_skb(sk, seq, &offset);
-       if (!skb)
-               return 0;
+       while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
+               u8 tcp_flags;
+               int used;
 
-       __skb_unlink(skb, &sk->sk_receive_queue);
-       WARN_ON(!skb_set_owner_sk_safe(skb, sk));
-       copied = recv_actor(sk, skb);
-       if (copied >= 0) {
-               seq += copied;
-               if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+               __skb_unlink(skb, &sk->sk_receive_queue);
+               WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
+               tcp_flags = TCP_SKB_CB(skb)->tcp_flags;
+               used = recv_actor(sk, skb);
+               consume_skb(skb);
+               if (used < 0) {
+                       if (!copied)
+                               copied = used;
+                       break;
+               }
+               seq += used;
+               copied += used;
+
+               if (tcp_flags & TCPHDR_FIN) {
                        ++seq;
+                       break;
+               }
        }
-       consume_skb(skb);
        WRITE_ONCE(tp->copied_seq, seq);
 
        tcp_rcv_space_adjust(sk);
index cd72158..560d9ea 100644 (file)
@@ -1821,7 +1821,7 @@ int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
                        continue;
                }
 
-               WARN_ON(!skb_set_owner_sk_safe(skb, sk));
+               WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
                used = recv_actor(sk, skb);
                if (used <= 0) {
                        if (!copied)
index 2ce0c44..dbb1430 100644 (file)
@@ -1070,13 +1070,13 @@ static int __init inet6_init(void)
        for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
                INIT_LIST_HEAD(r);
 
+       raw_hashinfo_init(&raw_v6_hashinfo);
+
        if (disable_ipv6_mod) {
                pr_info("Loaded, but administratively disabled, reboot required to enable\n");
                goto out;
        }
 
-       raw_hashinfo_init(&raw_v6_hashinfo);
-
        err = proto_register(&tcpv6_prot, 1);
        if (err)
                goto out;
index a9ba416..858fd8a 100644 (file)
@@ -1028,8 +1028,11 @@ static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
                                ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
                        }
                        rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
-               } else
+               } else {
+                       rcu_read_lock();
                        ip6_mr_forward(net, mrt, skb->dev, skb, c);
+                       rcu_read_unlock();
+               }
        }
 }
 
index d398f38..969b33a 100644 (file)
@@ -150,9 +150,15 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
                 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
                 to->len, MPTCP_SKB_CB(from)->end_seq);
        MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
-       kfree_skb_partial(from, fragstolen);
+
+       /* note the fwd memory can reach a negative value after accounting
+        * for the delta, but the later skb free will restore a non
+        * negative one
+        */
        atomic_add(delta, &sk->sk_rmem_alloc);
        mptcp_rmem_charge(sk, delta);
+       kfree_skb_partial(from, fragstolen);
+
        return true;
 }
 
index 0d9332e..617f744 100644 (file)
@@ -33,6 +33,7 @@ MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
 MODULE_DESCRIPTION("ftp connection tracking helper");
 MODULE_ALIAS("ip_conntrack_ftp");
 MODULE_ALIAS_NFCT_HELPER(HELPER_NAME);
+static DEFINE_SPINLOCK(nf_ftp_lock);
 
 #define MAX_PORTS 8
 static u_int16_t ports[MAX_PORTS];
@@ -409,7 +410,8 @@ static int help(struct sk_buff *skb,
        }
        datalen = skb->len - dataoff;
 
-       spin_lock_bh(&ct->lock);
+       /* seqadj (nat) uses ct->lock internally, nf_nat_ftp would cause deadlock */
+       spin_lock_bh(&nf_ftp_lock);
        fb_ptr = skb->data + dataoff;
 
        ends_in_nl = (fb_ptr[datalen - 1] == '\n');
@@ -538,7 +540,7 @@ out_update_nl:
        if (ends_in_nl)
                update_nl_seq(ct, seq, ct_ftp_info, dir, skb);
  out:
-       spin_unlock_bh(&ct->lock);
+       spin_unlock_bh(&nf_ftp_lock);
        return ret;
 }
 
index 992decb..5703846 100644 (file)
@@ -157,15 +157,37 @@ static int help(struct sk_buff *skb, unsigned int protoff,
        data = ib_ptr;
        data_limit = ib_ptr + datalen;
 
-       /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
-        * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
-       while (data < data_limit - (19 + MINMATCHLEN)) {
-               if (memcmp(data, "\1DCC ", 5)) {
+       /* Skip any whitespace */
+       while (data < data_limit - 10) {
+               if (*data == ' ' || *data == '\r' || *data == '\n')
+                       data++;
+               else
+                       break;
+       }
+
+       /* strlen("PRIVMSG x ")=10 */
+       if (data < data_limit - 10) {
+               if (strncasecmp("PRIVMSG ", data, 8))
+                       goto out;
+               data += 8;
+       }
+
+       /* strlen(" :\1DCC SENT t AAAAAAAA P\1\n")=26
+        * 7+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=26
+        */
+       while (data < data_limit - (21 + MINMATCHLEN)) {
+               /* Find first " :", the start of message */
+               if (memcmp(data, " :", 2)) {
                        data++;
                        continue;
                }
+               data += 2;
+
+               /* then check that place only for the DCC command */
+               if (memcmp(data, "\1DCC ", 5))
+                       goto out;
                data += 5;
-               /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */
+               /* we have at least (21+MINMATCHLEN)-(2+5) bytes valid data left */
 
                iph = ip_hdr(skb);
                pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
@@ -181,7 +203,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
                        pr_debug("DCC %s detected\n", dccprotos[i]);
 
                        /* we have at least
-                        * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
+                        * (21+MINMATCHLEN)-7-dccprotos[i].matchlen bytes valid
                         * data left (== 14/13 bytes) */
                        if (parse_dcc(data, data_limit, &dcc_ip,
                                       &dcc_port, &addr_beg_p, &addr_end_p)) {
index daf06f7..77f5e82 100644 (file)
@@ -477,7 +477,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
                                return ret;
                        if (ret == 0)
                                break;
-                       dataoff += *matchoff;
+                       dataoff = *matchoff;
                }
                *in_header = 0;
        }
@@ -489,7 +489,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr,
                        break;
                if (ret == 0)
                        return ret;
-               dataoff += *matchoff;
+               dataoff = *matchoff;
        }
 
        if (in_header)
index 8160520..63c7014 100644 (file)
@@ -2197,7 +2197,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
                              struct netlink_ext_ack *extack)
 {
        const struct nlattr * const *nla = ctx->nla;
-       struct nft_stats __percpu *stats = NULL;
        struct nft_table *table = ctx->table;
        struct nft_base_chain *basechain;
        struct net *net = ctx->net;
@@ -2212,6 +2211,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
                return -EOVERFLOW;
 
        if (nla[NFTA_CHAIN_HOOK]) {
+               struct nft_stats __percpu *stats = NULL;
                struct nft_chain_hook hook;
 
                if (flags & NFT_CHAIN_BINDING)
@@ -2243,8 +2243,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
                if (err < 0) {
                        nft_chain_release_hook(&hook);
                        kfree(basechain);
+                       free_percpu(stats);
                        return err;
                }
+               if (stats)
+                       static_branch_inc(&nft_counters_enabled);
        } else {
                if (flags & NFT_CHAIN_BASE)
                        return -EINVAL;
@@ -2319,9 +2322,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
                goto err_unregister_hook;
        }
 
-       if (stats)
-               static_branch_inc(&nft_counters_enabled);
-
        table->use++;
 
        return 0;
index 0fa2e20..ee6840b 100644 (file)
@@ -269,6 +269,7 @@ bool nf_osf_find(const struct sk_buff *skb,
        struct nf_osf_hdr_ctx ctx;
        const struct tcphdr *tcp;
        struct tcphdr _tcph;
+       bool found = false;
 
        memset(&ctx, 0, sizeof(ctx));
 
@@ -283,10 +284,11 @@ bool nf_osf_find(const struct sk_buff *skb,
 
                data->genre = f->genre;
                data->version = f->version;
+               found = true;
                break;
        }
 
-       return true;
+       return found;
 }
 EXPORT_SYMBOL_GPL(nf_osf_find);
 
index 790d680..51d175f 100644 (file)
@@ -2137,6 +2137,7 @@ replay:
        }
 
        if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
+               tfilter_put(tp, fh);
                NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
                err = -EINVAL;
                goto errout;
index 0b941dd..86675a7 100644 (file)
@@ -67,6 +67,7 @@ struct taprio_sched {
        u32 flags;
        enum tk_offsets tk_offset;
        int clockid;
+       bool offloaded;
        atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
                                    * speeds it's sub-nanoseconds per byte
                                    */
@@ -1279,6 +1280,8 @@ static int taprio_enable_offload(struct net_device *dev,
                goto done;
        }
 
+       q->offloaded = true;
+
 done:
        taprio_offload_free(offload);
 
@@ -1293,12 +1296,9 @@ static int taprio_disable_offload(struct net_device *dev,
        struct tc_taprio_qopt_offload *offload;
        int err;
 
-       if (!FULL_OFFLOAD_IS_ENABLED(q->flags))
+       if (!q->offloaded)
                return 0;
 
-       if (!ops->ndo_setup_tc)
-               return -EOPNOTSUPP;
-
        offload = taprio_offload_alloc(0);
        if (!offload) {
                NL_SET_ERR_MSG(extack,
@@ -1314,6 +1314,8 @@ static int taprio_disable_offload(struct net_device *dev,
                goto out;
        }
 
+       q->offloaded = false;
+
 out:
        taprio_offload_free(offload);
 
@@ -1949,12 +1951,14 @@ start_error:
 
 static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
 {
-       struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
+       struct taprio_sched *q = qdisc_priv(sch);
+       struct net_device *dev = qdisc_dev(sch);
+       unsigned int ntx = cl - 1;
 
-       if (!dev_queue)
+       if (ntx >= dev->num_tx_queues)
                return NULL;
 
-       return dev_queue->qdisc_sleeping;
+       return q->qdiscs[ntx];
 }
 
 static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
index ebf56cd..df89c2e 100644 (file)
@@ -2239,7 +2239,7 @@ out:
 static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
                                     struct smc_buf_desc *buf_desc, bool is_rmb)
 {
-       int i, rc = 0;
+       int i, rc = 0, cnt = 0;
 
        /* protect against parallel link reconfiguration */
        mutex_lock(&lgr->llc_conf_mutex);
@@ -2252,9 +2252,12 @@ static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
                        rc = -ENOMEM;
                        goto out;
                }
+               cnt++;
        }
 out:
        mutex_unlock(&lgr->llc_conf_mutex);
+       if (!rc && !cnt)
+               rc = -EINVAL;
        return rc;
 }
 
index 7d268a2..c284efa 100644 (file)
@@ -2873,6 +2873,9 @@ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
 
        task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC,
                        &rpc_cb_add_xprt_call_ops, data);
+       if (IS_ERR(task))
+               return PTR_ERR(task);
+
        data->xps->xps_nunique_destaddr_xprts++;
        rpc_put_task(task);
 success:
index d71eec4..f8fae78 100644 (file)
@@ -1179,11 +1179,8 @@ xprt_request_dequeue_receive_locked(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
 
-       if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
+       if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
                xprt_request_rb_remove(req->rq_xprt, req);
-               xdr_free_bvec(&req->rq_rcv_buf);
-               req->rq_private_buf.bvec = NULL;
-       }
 }
 
 /**
@@ -1221,6 +1218,8 @@ void xprt_complete_rqst(struct rpc_task *task, int copied)
 
        xprt->stat.recvs++;
 
+       xdr_free_bvec(&req->rq_rcv_buf);
+       req->rq_private_buf.bvec = NULL;
        req->rq_private_buf.len = copied;
        /* Ensure all writes are done before we update */
        /* req->rq_reply_bytes_recvd */
@@ -1453,6 +1452,7 @@ xprt_request_dequeue_xprt(struct rpc_task *task)
                xprt_request_dequeue_transmit_locked(task);
                xprt_request_dequeue_receive_locked(task);
                spin_unlock(&xprt->queue_lock);
+               xdr_free_bvec(&req->rq_rcv_buf);
        }
 }
 
index 9f39b01..8cf1cb2 100644 (file)
@@ -1,20 +1,19 @@
 DEBUG_CFLAGS   :=
+debug-flags-y  := -g
 
 ifdef CONFIG_DEBUG_INFO_SPLIT
 DEBUG_CFLAGS   += -gsplit-dwarf
-else
-DEBUG_CFLAGS   += -g
 endif
 
-ifndef CONFIG_AS_IS_LLVM
-KBUILD_AFLAGS  += -Wa,-gdwarf-2
-endif
-
-ifndef CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
-dwarf-version-$(CONFIG_DEBUG_INFO_DWARF4) := 4
-dwarf-version-$(CONFIG_DEBUG_INFO_DWARF5) := 5
-DEBUG_CFLAGS   += -gdwarf-$(dwarf-version-y)
+debug-flags-$(CONFIG_DEBUG_INFO_DWARF4)        += -gdwarf-4
+debug-flags-$(CONFIG_DEBUG_INFO_DWARF5)        += -gdwarf-5
+ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_AS_IS_GNU),yy)
+# Clang does not pass -g or -gdwarf-* option down to GAS.
+# Add -Wa, prefix to explicitly specify the flags.
+KBUILD_AFLAGS  += $(addprefix -Wa$(comma), $(debug-flags-y))
 endif
+DEBUG_CFLAGS   += $(debug-flags-y)
+KBUILD_AFLAGS  += $(debug-flags-y)
 
 ifdef CONFIG_DEBUG_INFO_REDUCED
 DEBUG_CFLAGS   += -fno-var-tracking
@@ -29,5 +28,5 @@ KBUILD_AFLAGS += -gz=zlib
 KBUILD_LDFLAGS += --compress-debug-sections=zlib
 endif
 
-KBUILD_CFLAGS += $(DEBUG_CFLAGS)
+KBUILD_CFLAGS  += $(DEBUG_CFLAGS)
 export DEBUG_CFLAGS
index 1337ced..bb78c9b 100755 (executable)
@@ -12,7 +12,6 @@ compile_commands.json.
 import argparse
 import json
 import multiprocessing
-import os
 import subprocess
 import sys
 
index fa8c010..c396aa1 100644 (file)
@@ -98,7 +98,6 @@ bool menu_is_empty(struct menu *menu);
 bool menu_is_visible(struct menu *menu);
 bool menu_has_prompt(struct menu *menu);
 const char *menu_get_prompt(struct menu *menu);
-struct menu *menu_get_root_menu(struct menu *menu);
 struct menu *menu_get_parent_menu(struct menu *menu);
 bool menu_has_help(struct menu *menu);
 const char *menu_get_help(struct menu *menu);
index 3d6f7cb..62b6313 100644 (file)
@@ -661,11 +661,6 @@ const char *menu_get_prompt(struct menu *menu)
        return NULL;
 }
 
-struct menu *menu_get_root_menu(struct menu *menu)
-{
-       return &rootmenu;
-}
-
 struct menu *menu_get_parent_menu(struct menu *menu)
 {
        enum prop_type type;
index 193dae3..5377f94 100644 (file)
@@ -178,10 +178,8 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
                return -ENOMEM;
 
        err = snd_card_init(card, parent, idx, xid, module, extra_size);
-       if (err < 0) {
-               kfree(card);
-               return err;
-       }
+       if (err < 0)
+               return err; /* card is freed by error handler */
 
        *card_ret = card;
        return 0;
@@ -233,7 +231,7 @@ int snd_devm_card_new(struct device *parent, int idx, const char *xid,
        card->managed = true;
        err = snd_card_init(card, parent, idx, xid, module, extra_size);
        if (err < 0) {
-               devres_free(card);
+               devres_free(card); /* in managed mode, we need to free manually */
                return err;
        }
 
@@ -297,6 +295,8 @@ static int snd_card_init(struct snd_card *card, struct device *parent,
                mutex_unlock(&snd_card_mutex);
                dev_err(parent, "cannot find the slot for index %d (range 0-%i), error: %d\n",
                         idx, snd_ecards_limit - 1, err);
+               if (!card->managed)
+                       kfree(card); /* manually free here, as no destructor called */
                return err;
        }
        set_bit(idx, snd_cards_lock);           /* lock it */
index d84ffdf..5a47864 100644 (file)
@@ -450,6 +450,16 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x51cb,
        },
+       /* RaptorLake-M */
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x51ce,
+       },
+       /* RaptorLake-PX */
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x51cf,
+       },
 #endif
 
 };
index cae9a97..1a868dd 100644 (file)
@@ -157,10 +157,10 @@ static int hda_codec_driver_remove(struct device *dev)
                return codec->bus->core.ext_ops->hdev_detach(&codec->core);
        }
 
-       refcount_dec(&codec->pcm_ref);
        snd_hda_codec_disconnect_pcms(codec);
        snd_hda_jack_tbl_disconnect(codec);
-       wait_event(codec->remove_sleep, !refcount_read(&codec->pcm_ref));
+       if (!refcount_dec_and_test(&codec->pcm_ref))
+               wait_event(codec->remove_sleep, !refcount_read(&codec->pcm_ref));
        snd_power_sync_ref(codec->bus->card);
 
        if (codec->patch_ops.free)
index b20694f..6f30c37 100644 (file)
@@ -2550,6 +2550,8 @@ static const struct pci_device_id azx_ids[] = {
        /* 5 Series/3400 */
        { PCI_DEVICE(0x8086, 0x3b56),
          .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
+       { PCI_DEVICE(0x8086, 0x3b57),
+         .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
        /* Poulsbo */
        { PCI_DEVICE(0x8086, 0x811b),
          .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE },
index 6c209cd..c239d9d 100644 (file)
@@ -170,6 +170,8 @@ struct hdmi_spec {
        bool dyn_pcm_no_legacy;
        /* hdmi interrupt trigger control flag for Nvidia codec */
        bool hdmi_intr_trig_ctrl;
+       bool nv_dp_workaround; /* workaround DP audio infoframe for Nvidia */
+
        bool intel_hsw_fixup;   /* apply Intel platform-specific fixups */
        /*
         * Non-generic VIA/NVIDIA specific
@@ -679,15 +681,24 @@ static void hdmi_pin_setup_infoframe(struct hda_codec *codec,
                                     int ca, int active_channels,
                                     int conn_type)
 {
+       struct hdmi_spec *spec = codec->spec;
        union audio_infoframe ai;
 
        memset(&ai, 0, sizeof(ai));
-       if (conn_type == 0) { /* HDMI */
+       if ((conn_type == 0) || /* HDMI */
+               /* Nvidia DisplayPort: Nvidia HW expects same layout as HDMI */
+               (conn_type == 1 && spec->nv_dp_workaround)) {
                struct hdmi_audio_infoframe *hdmi_ai = &ai.hdmi;
 
-               hdmi_ai->type           = 0x84;
-               hdmi_ai->ver            = 0x01;
-               hdmi_ai->len            = 0x0a;
+               if (conn_type == 0) { /* HDMI */
+                       hdmi_ai->type           = 0x84;
+                       hdmi_ai->ver            = 0x01;
+                       hdmi_ai->len            = 0x0a;
+               } else {/* Nvidia DP */
+                       hdmi_ai->type           = 0x84;
+                       hdmi_ai->ver            = 0x1b;
+                       hdmi_ai->len            = 0x11 << 2;
+               }
                hdmi_ai->CC02_CT47      = active_channels - 1;
                hdmi_ai->CA             = ca;
                hdmi_checksum_audio_infoframe(hdmi_ai);
@@ -1267,6 +1278,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
        set_bit(pcm_idx, &spec->pcm_in_use);
        per_pin = get_pin(spec, pin_idx);
        per_pin->cvt_nid = per_cvt->cvt_nid;
+       per_pin->silent_stream = false;
        hinfo->nid = per_cvt->cvt_nid;
 
        /* flip stripe flag for the assigned stream if supported */
@@ -3617,6 +3629,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
        spec->pcm_playback.rates = SUPPORTED_RATES;
        spec->pcm_playback.maxbps = SUPPORTED_MAXBPS;
        spec->pcm_playback.formats = SUPPORTED_FORMATS;
+       spec->nv_dp_workaround = true;
        return 0;
 }
 
@@ -3756,6 +3769,7 @@ static int patch_nvhdmi(struct hda_codec *codec)
        spec->chmap.ops.chmap_cea_alloc_validate_get_type =
                nvhdmi_chmap_cea_alloc_validate_get_type;
        spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
+       spec->nv_dp_workaround = true;
 
        codec->link_down_at_suspend = 1;
 
@@ -3779,6 +3793,7 @@ static int patch_nvhdmi_legacy(struct hda_codec *codec)
        spec->chmap.ops.chmap_cea_alloc_validate_get_type =
                nvhdmi_chmap_cea_alloc_validate_get_type;
        spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
+       spec->nv_dp_workaround = true;
 
        codec->link_down_at_suspend = 1;
 
@@ -3984,6 +3999,7 @@ static int tegra_hdmi_init(struct hda_codec *codec)
 
        generic_hdmi_init_per_pins(codec);
 
+       codec->depop_delay = 10;
        codec->patch_ops.build_pcms = tegra_hdmi_build_pcms;
        spec->chmap.ops.chmap_cea_alloc_validate_get_type =
                nvhdmi_chmap_cea_alloc_validate_get_type;
@@ -3992,6 +4008,7 @@ static int tegra_hdmi_init(struct hda_codec *codec)
        spec->chmap.ops.chmap_cea_alloc_validate_get_type =
                nvhdmi_chmap_cea_alloc_validate_get_type;
        spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
+       spec->nv_dp_workaround = true;
 
        return 0;
 }
index 38930cf..f9d46ae 100644 (file)
@@ -7067,6 +7067,8 @@ enum {
        ALC294_FIXUP_ASUS_GU502_HP,
        ALC294_FIXUP_ASUS_GU502_PINS,
        ALC294_FIXUP_ASUS_GU502_VERBS,
+       ALC294_FIXUP_ASUS_G513_PINS,
+       ALC285_FIXUP_ASUS_G533Z_PINS,
        ALC285_FIXUP_HP_GPIO_LED,
        ALC285_FIXUP_HP_MUTE_LED,
        ALC236_FIXUP_HP_GPIO_LED,
@@ -8406,6 +8408,24 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc294_fixup_gu502_hp,
        },
+        [ALC294_FIXUP_ASUS_G513_PINS] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                               { 0x19, 0x03a11050 }, /* front HP mic */
+                               { 0x1a, 0x03a11c30 }, /* rear external mic */
+                               { 0x21, 0x03211420 }, /* front HP out */
+                               { }
+               },
+       },
+       [ALC285_FIXUP_ASUS_G533Z_PINS] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x14, 0x90170120 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC294_FIXUP_ASUS_G513_PINS,
+       },
        [ALC294_FIXUP_ASUS_COEF_1B] = {
                .type = HDA_FIXUP_VERBS,
                .v.verbs = (const struct hda_verb[]) {
@@ -9149,6 +9169,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
+       SND_PCI_QUIRK(0x1028, 0x087d, "Dell Precision 5530", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
@@ -9165,6 +9186,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK),
+       SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -9292,6 +9314,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8971, "HP EliteBook 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8972, "HP EliteBook 840 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
@@ -9339,10 +9362,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+       SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+       SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
        SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
-       SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
        SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
        SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
@@ -9358,14 +9382,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
        SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+       SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
        SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
        SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
        SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
+       SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
        SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
+       SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401),
        SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
-       SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
-       SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
        SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
        SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
@@ -9569,6 +9595,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
        SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
        SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
+       SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
        SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
        SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
index 1559645..4f19fd9 100644 (file)
@@ -901,7 +901,10 @@ static void nau8824_jdet_work(struct work_struct *work)
                NAU8824_IRQ_KEY_RELEASE_DIS |
                NAU8824_IRQ_KEY_SHORT_PRESS_DIS, 0);
 
-       nau8824_sema_release(nau8824);
+       if (nau8824->resume_lock) {
+               nau8824_sema_release(nau8824);
+               nau8824->resume_lock = false;
+       }
 }
 
 static void nau8824_setup_auto_irq(struct nau8824 *nau8824)
@@ -966,7 +969,10 @@ static irqreturn_t nau8824_interrupt(int irq, void *data)
                /* release semaphore held after resume,
                 * and cancel jack detection
                 */
-               nau8824_sema_release(nau8824);
+               if (nau8824->resume_lock) {
+                       nau8824_sema_release(nau8824);
+                       nau8824->resume_lock = false;
+               }
                cancel_work_sync(&nau8824->jdet_work);
        } else if (active_irq & NAU8824_KEY_SHORT_PRESS_IRQ) {
                int key_status, button_pressed;
@@ -1524,6 +1530,7 @@ static int __maybe_unused nau8824_suspend(struct snd_soc_component *component)
 static int __maybe_unused nau8824_resume(struct snd_soc_component *component)
 {
        struct nau8824 *nau8824 = snd_soc_component_get_drvdata(component);
+       int ret;
 
        regcache_cache_only(nau8824->regmap, false);
        regcache_sync(nau8824->regmap);
@@ -1531,7 +1538,10 @@ static int __maybe_unused nau8824_resume(struct snd_soc_component *component)
                /* Hold semaphore to postpone playback happening
                 * until jack detection done.
                 */
-               nau8824_sema_acquire(nau8824, 0);
+               nau8824->resume_lock = true;
+               ret = nau8824_sema_acquire(nau8824, 0);
+               if (ret)
+                       nau8824->resume_lock = false;
                enable_irq(nau8824->irq);
        }
 
@@ -1940,6 +1950,7 @@ static int nau8824_i2c_probe(struct i2c_client *i2c)
        nau8824->regmap = devm_regmap_init_i2c(i2c, &nau8824_regmap_config);
        if (IS_ERR(nau8824->regmap))
                return PTR_ERR(nau8824->regmap);
+       nau8824->resume_lock = false;
        nau8824->dev = dev;
        nau8824->irq = i2c->irq;
        sema_init(&nau8824->jd_sem, 1);
index de4bae8..5fcfc43 100644 (file)
@@ -436,6 +436,7 @@ struct nau8824 {
        struct semaphore jd_sem;
        int fs;
        int irq;
+       int resume_lock;
        int micbias_voltage;
        int vref_impedance;
        int jkdet_polarity;
index 5a84432..0f8e6dd 100644 (file)
@@ -2494,7 +2494,7 @@ static void rt5640_enable_jack_detect(struct snd_soc_component *component,
 
        /* Select JD-source */
        snd_soc_component_update_bits(component, RT5640_JD_CTRL,
-               RT5640_JD_MASK, rt5640->jd_src);
+               RT5640_JD_MASK, rt5640->jd_src << RT5640_JD_SFT);
 
        /* Selecting GPIO01 as an interrupt */
        snd_soc_component_update_bits(component, RT5640_GPIO_CTRL1,
@@ -2504,12 +2504,8 @@ static void rt5640_enable_jack_detect(struct snd_soc_component *component,
        snd_soc_component_update_bits(component, RT5640_GPIO_CTRL3,
                RT5640_GP1_PF_MASK, RT5640_GP1_PF_OUT);
 
-       /* Enabling jd2 in general control 1 */
        snd_soc_component_write(component, RT5640_DUMMY1, 0x3f41);
 
-       /* Enabling jd2 in general control 2 */
-       snd_soc_component_write(component, RT5640_DUMMY2, 0x4001);
-
        rt5640_set_ovcd_params(component);
 
        /*
@@ -2518,12 +2514,25 @@ static void rt5640_enable_jack_detect(struct snd_soc_component *component,
         * pin 0/1 instead of it being stuck to 1. So we invert the JD polarity
         * on systems where the hardware does not already do this.
         */
-       if (rt5640->jd_inverted)
-               snd_soc_component_write(component, RT5640_IRQ_CTRL1,
-                                       RT5640_IRQ_JD_NOR);
-       else
-               snd_soc_component_write(component, RT5640_IRQ_CTRL1,
-                                       RT5640_IRQ_JD_NOR | RT5640_JD_P_INV);
+       if (rt5640->jd_inverted) {
+               if (rt5640->jd_src == RT5640_JD_SRC_JD1_IN4P)
+                       snd_soc_component_write(component, RT5640_IRQ_CTRL1,
+                               RT5640_IRQ_JD_NOR);
+               else if (rt5640->jd_src == RT5640_JD_SRC_JD2_IN4N)
+                       snd_soc_component_update_bits(component, RT5640_DUMMY2,
+                               RT5640_IRQ_JD2_MASK | RT5640_JD2_MASK,
+                               RT5640_IRQ_JD2_NOR | RT5640_JD2_EN);
+       } else {
+               if (rt5640->jd_src == RT5640_JD_SRC_JD1_IN4P)
+                       snd_soc_component_write(component, RT5640_IRQ_CTRL1,
+                               RT5640_IRQ_JD_NOR | RT5640_JD_P_INV);
+               else if (rt5640->jd_src == RT5640_JD_SRC_JD2_IN4N)
+                       snd_soc_component_update_bits(component, RT5640_DUMMY2,
+                               RT5640_IRQ_JD2_MASK | RT5640_JD2_P_MASK |
+                               RT5640_JD2_MASK,
+                               RT5640_IRQ_JD2_NOR | RT5640_JD2_P_INV |
+                               RT5640_JD2_EN);
+       }
 
        rt5640->jack = jack;
        if (rt5640->jack->status & SND_JACK_MICROPHONE) {
@@ -2725,10 +2734,8 @@ static int rt5640_probe(struct snd_soc_component *component)
 
        if (device_property_read_u32(component->dev,
                                     "realtek,jack-detect-source", &val) == 0) {
-               if (val <= RT5640_JD_SRC_GPIO4)
-                       rt5640->jd_src = val << RT5640_JD_SFT;
-               else if (val == RT5640_JD_SRC_HDA_HEADER)
-                       rt5640->jd_src = RT5640_JD_SRC_HDA_HEADER;
+               if (val <= RT5640_JD_SRC_HDA_HEADER)
+                       rt5640->jd_src = val;
                else
                        dev_warn(component->dev, "Warning: Invalid jack-detect-source value: %d, leaving jack-detect disabled\n",
                                 val);
@@ -2809,12 +2816,31 @@ static int rt5640_resume(struct snd_soc_component *component)
        regcache_sync(rt5640->regmap);
 
        if (rt5640->jack) {
-               if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER)
+               if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER) {
                        snd_soc_component_update_bits(component,
                                RT5640_DUMMY2, 0x1100, 0x1100);
-               else
-                       snd_soc_component_write(component, RT5640_DUMMY2,
-                               0x4001);
+               } else {
+                       if (rt5640->jd_inverted) {
+                               if (rt5640->jd_src == RT5640_JD_SRC_JD2_IN4N)
+                                       snd_soc_component_update_bits(
+                                               component, RT5640_DUMMY2,
+                                               RT5640_IRQ_JD2_MASK |
+                                               RT5640_JD2_MASK,
+                                               RT5640_IRQ_JD2_NOR |
+                                               RT5640_JD2_EN);
+
+                       } else {
+                               if (rt5640->jd_src == RT5640_JD_SRC_JD2_IN4N)
+                                       snd_soc_component_update_bits(
+                                               component, RT5640_DUMMY2,
+                                               RT5640_IRQ_JD2_MASK |
+                                               RT5640_JD2_P_MASK |
+                                               RT5640_JD2_MASK,
+                                               RT5640_IRQ_JD2_NOR |
+                                               RT5640_JD2_P_INV |
+                                               RT5640_JD2_EN);
+                       }
+               }
 
                queue_delayed_work(system_long_wq, &rt5640->jack_work, 0);
        }
index 505c935..f58b88e 100644 (file)
 #define RT5640_M_MONO_ADC_R_SFT                        12
 #define RT5640_MCLK_DET                                (0x1 << 11)
 
+/* General Control 1 (0xfb) */
+#define RT5640_IRQ_JD2_MASK                    (0x1 << 12)
+#define RT5640_IRQ_JD2_SFT                     12
+#define RT5640_IRQ_JD2_BP                      (0x0 << 12)
+#define RT5640_IRQ_JD2_NOR                     (0x1 << 12)
+#define RT5640_JD2_P_MASK                      (0x1 << 10)
+#define RT5640_JD2_P_SFT                       10
+#define RT5640_JD2_P_NOR                       (0x0 << 10)
+#define RT5640_JD2_P_INV                       (0x1 << 10)
+#define RT5640_JD2_MASK                                (0x1 << 8)
+#define RT5640_JD2_SFT                         8
+#define RT5640_JD2_DIS                         (0x0 << 8)
+#define RT5640_JD2_EN                          (0x1 << 8)
+
 /* Codec Private Register definition */
 
 /* MIC Over current threshold scale factor (0x15) */
index bb653b6..b676523 100644 (file)
@@ -495,6 +495,8 @@ static struct snd_soc_dai_driver tas2770_dai_driver[] = {
        },
 };
 
+static const struct regmap_config tas2770_i2c_regmap;
+
 static int tas2770_codec_probe(struct snd_soc_component *component)
 {
        struct tas2770_priv *tas2770 =
@@ -508,6 +510,7 @@ static int tas2770_codec_probe(struct snd_soc_component *component)
        }
 
        tas2770_reset(tas2770);
+       regmap_reinit_cache(tas2770->regmap, &tas2770_i2c_regmap);
 
        return 0;
 }
index 14be295..3f128ce 100644 (file)
@@ -698,6 +698,10 @@ static int imx_card_parse_of(struct imx_card_data *data)
                of_node_put(cpu);
                of_node_put(codec);
                of_node_put(platform);
+
+               cpu = NULL;
+               codec = NULL;
+               platform = NULL;
        }
 
        return 0;
index a49bfaa..2ff30b4 100644 (file)
@@ -270,6 +270,16 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
                .callback = sof_sdw_quirk_cb,
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0AFF")
+               },
+               .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+                                       RT711_JD2 |
+                                       SOF_SDW_FOUR_SPK),
+       },
+       {
+               .callback = sof_sdw_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
                        DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B00")
                },
                .driver_data = (void *)(SOF_SDW_TGL_HDMI |
index 8c8f9a8..eb71df9 100644 (file)
@@ -758,8 +758,7 @@ bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
  * The endpoint needs to be closed via snd_usb_endpoint_close() later.
  *
  * Note that this function doesn't configure the endpoint.  The substream
- * needs to set it up later via snd_usb_endpoint_set_params() and
- * snd_usb_endpoint_prepare().
+ * needs to set it up later via snd_usb_endpoint_configure().
  */
 struct snd_usb_endpoint *
 snd_usb_endpoint_open(struct snd_usb_audio *chip,
@@ -1293,13 +1292,12 @@ out_of_memory:
 /*
  * snd_usb_endpoint_set_params: configure an snd_usb_endpoint
  *
- * It's called either from hw_params callback.
  * Determine the number of URBs to be used on this endpoint.
  * An endpoint must be configured before it can be started.
  * An endpoint that is already running can not be reconfigured.
  */
-int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
-                               struct snd_usb_endpoint *ep)
+static int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
+                                      struct snd_usb_endpoint *ep)
 {
        const struct audioformat *fmt = ep->cur_audiofmt;
        int err;
@@ -1382,18 +1380,18 @@ static int init_sample_rate(struct snd_usb_audio *chip,
 }
 
 /*
- * snd_usb_endpoint_prepare: Prepare the endpoint
+ * snd_usb_endpoint_configure: Configure the endpoint
  *
  * This function sets up the EP to be fully usable state.
- * It's called either from prepare callback.
+ * It's called either from hw_params or prepare callback.
  * The function checks need_setup flag, and performs nothing unless needed,
  * so it's safe to call this multiple times.
  *
  * This returns zero if unchanged, 1 if the configuration has changed,
  * or a negative error code.
  */
-int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
-                            struct snd_usb_endpoint *ep)
+int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
+                              struct snd_usb_endpoint *ep)
 {
        bool iface_first;
        int err = 0;
@@ -1414,6 +1412,9 @@ int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
                        if (err < 0)
                                goto unlock;
                }
+               err = snd_usb_endpoint_set_params(chip, ep);
+               if (err < 0)
+                       goto unlock;
                goto done;
        }
 
@@ -1441,6 +1442,10 @@ int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
        if (err < 0)
                goto unlock;
 
+       err = snd_usb_endpoint_set_params(chip, ep);
+       if (err < 0)
+               goto unlock;
+
        err = snd_usb_select_mode_quirk(chip, ep->cur_audiofmt);
        if (err < 0)
                goto unlock;
index e67ea28..6a9af04 100644 (file)
@@ -17,10 +17,8 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
                      bool is_sync_ep);
 void snd_usb_endpoint_close(struct snd_usb_audio *chip,
                            struct snd_usb_endpoint *ep);
-int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
-                               struct snd_usb_endpoint *ep);
-int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
-                            struct snd_usb_endpoint *ep);
+int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
+                              struct snd_usb_endpoint *ep);
 int snd_usb_endpoint_get_clock_rate(struct snd_usb_audio *chip, int clock);
 
 bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
index b604f7e..d45d1d7 100644 (file)
@@ -443,17 +443,17 @@ static int configure_endpoints(struct snd_usb_audio *chip,
                if (stop_endpoints(subs, false))
                        sync_pending_stops(subs);
                if (subs->sync_endpoint) {
-                       err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
+                       err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
                        if (err < 0)
                                return err;
                }
-               err = snd_usb_endpoint_prepare(chip, subs->data_endpoint);
+               err = snd_usb_endpoint_configure(chip, subs->data_endpoint);
                if (err < 0)
                        return err;
                snd_usb_set_format_quirk(subs, subs->cur_audiofmt);
        } else {
                if (subs->sync_endpoint) {
-                       err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
+                       err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
                        if (err < 0)
                                return err;
                }
@@ -551,13 +551,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
        subs->cur_audiofmt = fmt;
        mutex_unlock(&chip->mutex);
 
-       if (subs->sync_endpoint) {
-               ret = snd_usb_endpoint_set_params(chip, subs->sync_endpoint);
-               if (ret < 0)
-                       goto unlock;
-       }
-
-       ret = snd_usb_endpoint_set_params(chip, subs->data_endpoint);
+       ret = configure_endpoints(chip, subs);
 
  unlock:
        if (ret < 0)
index 235dc85..ef4775c 100644 (file)
 #define X86_BUG_ITLB_MULTIHIT          X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
 #define X86_BUG_SRBDS                  X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
 #define X86_BUG_MMIO_STALE_DATA                X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
-#define X86_BUG_RETBLEED               X86_BUG(26) /* CPU is affected by RETBleed */
-#define X86_BUG_EIBRS_PBRSB            X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
+#define X86_BUG_MMIO_UNKNOWN           X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
+#define X86_BUG_RETBLEED               X86_BUG(27) /* CPU is affected by RETBleed */
+#define X86_BUG_EIBRS_PBRSB            X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index 1e6fd6c..27f5e7d 100644 (file)
@@ -44,7 +44,7 @@
 
 /*
  * KVP protocol: The user mode component first registers with the
- * the kernel component. Subsequently, the kernel component requests, data
+ * kernel component. Subsequently, the kernel component requests, data
  * for the specified keys. In response to this message the user mode component
  * fills in the value corresponding to the specified key. We overload the
  * sequence field in the cn_msg header to define our KVP message types.
@@ -772,11 +772,11 @@ static int kvp_process_ip_address(void *addrp,
        const char *str;
 
        if (family == AF_INET) {
-               addr = (struct sockaddr_in *)addrp;
+               addr = addrp;
                str = inet_ntop(family, &addr->sin_addr, tmp, 50);
                addr_length = INET_ADDRSTRLEN;
        } else {
-               addr6 = (struct sockaddr_in6 *)addrp;
+               addr6 = addrp;
                str = inet_ntop(family, &addr6->sin6_addr.s6_addr, tmp, 50);
                addr_length = INET6_ADDRSTRLEN;
        }
index b238dbc..6a10ff5 100644 (file)
@@ -3,26 +3,7 @@
 #define _TOOLS_INCLUDE_LINUX_GFP_H
 
 #include <linux/types.h>
-
-#define __GFP_BITS_SHIFT 26
-#define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
-
-#define __GFP_HIGH             0x20u
-#define __GFP_IO               0x40u
-#define __GFP_FS               0x80u
-#define __GFP_NOWARN           0x200u
-#define __GFP_ZERO             0x8000u
-#define __GFP_ATOMIC           0x80000u
-#define __GFP_ACCOUNT          0x100000u
-#define __GFP_DIRECT_RECLAIM   0x400000u
-#define __GFP_KSWAPD_RECLAIM   0x2000000u
-
-#define __GFP_RECLAIM  (__GFP_DIRECT_RECLAIM | __GFP_KSWAPD_RECLAIM)
-
-#define GFP_ZONEMASK   0x0fu
-#define GFP_ATOMIC     (__GFP_HIGH | __GFP_ATOMIC | __GFP_KSWAPD_RECLAIM)
-#define GFP_KERNEL     (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
-#define GFP_NOWAIT     (__GFP_KSWAPD_RECLAIM)
+#include <linux/gfp_types.h>
 
 static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
 {
diff --git a/tools/include/linux/gfp_types.h b/tools/include/linux/gfp_types.h
new file mode 100644 (file)
index 0000000..5f9f1ed
--- /dev/null
@@ -0,0 +1 @@
+#include "../../../include/linux/gfp_types.h"
index d30439b..869379f 100644 (file)
@@ -9,8 +9,8 @@
 #include "../../../arch/alpha/include/uapi/asm/errno.h"
 #elif defined(__mips__)
 #include "../../../arch/mips/include/uapi/asm/errno.h"
-#elif defined(__xtensa__)
-#include "../../../arch/xtensa/include/uapi/asm/errno.h"
+#elif defined(__hppa__)
+#include "../../../arch/parisc/include/uapi/asm/errno.h"
 #else
 #include <asm-generic/errno.h>
 #endif
index 6b1bafe..8ec5b9f 100644 (file)
@@ -441,6 +441,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
 
        perf_evlist__for_each_entry(evlist, evsel) {
                bool overwrite = evsel->attr.write_backward;
+               enum fdarray_flags flgs;
                struct perf_mmap *map;
                int *output, fd, cpu;
 
@@ -504,8 +505,8 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
 
                revent = !overwrite ? POLLIN : 0;
 
-               if (!evsel->system_wide &&
-                   perf_evlist__add_pollfd(evlist, fd, map, revent, fdarray_flag__default) < 0) {
+               flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
+               if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
                        perf_mmap__put(map);
                        return -1;
                }
index f87ef43..0f711f8 100644 (file)
@@ -3371,6 +3371,8 @@ static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cp
                return 0;
 
        perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
+               if (cpu.cpu == -1)
+                       continue;
                /* Return ENODEV is input cpu is greater than max cpu */
                if ((unsigned long)cpu.cpu > mask->nbits)
                        return -ENODEV;
diff --git a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
new file mode 100755 (executable)
index 0000000..d724855
--- /dev/null
@@ -0,0 +1,83 @@
+#!/bin/sh
+# perf stat --bpf-counters --for-each-cgroup test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+test_cgroups=
+if [ "$1" = "-v" ]; then
+       verbose="1"
+fi
+
+# skip if --bpf-counters --for-each-cgroup is not supported
+check_bpf_counter()
+{
+       if ! perf stat -a --bpf-counters --for-each-cgroup / true > /dev/null 2>&1; then
+               if [ "${verbose}" = "1" ]; then
+                       echo "Skipping: --bpf-counters --for-each-cgroup not supported"
+                       perf --no-pager stat -a --bpf-counters --for-each-cgroup / true || true
+               fi
+               exit 2
+       fi
+}
+
+# find two cgroups to measure
+find_cgroups()
+{
+       # try usual systemd slices first
+       if [ -d /sys/fs/cgroup/system.slice -a -d /sys/fs/cgroup/user.slice ]; then
+               test_cgroups="system.slice,user.slice"
+               return
+       fi
+
+       # try root and self cgroups
+       local self_cgrp=$(grep perf_event /proc/self/cgroup | cut -d: -f3)
+       if [ -z ${self_cgrp} ]; then
+               # cgroup v2 doesn't specify perf_event
+               self_cgrp=$(grep ^0: /proc/self/cgroup | cut -d: -f3)
+       fi
+
+       if [ -z ${self_cgrp} ]; then
+               test_cgroups="/"
+       else
+               test_cgroups="/,${self_cgrp}"
+       fi
+}
+
+# As cgroup events are cpu-wide, we cannot simply compare the result.
+# Just check if it runs without failure and has non-zero results.
+check_system_wide_counted()
+{
+       local output
+
+       output=$(perf stat -a --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, sleep 1  2>&1)
+       if echo ${output} | grep -q -F "<not "; then
+               echo "Some system-wide events are not counted"
+               if [ "${verbose}" = "1" ]; then
+                       echo ${output}
+               fi
+               exit 1
+       fi
+}
+
+check_cpu_list_counted()
+{
+       local output
+
+       output=$(perf stat -C 1 --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, taskset -c 1 sleep 1  2>&1)
+       if echo ${output} | grep -q -F "<not "; then
+               echo "Some CPU events are not counted"
+               if [ "${verbose}" = "1" ]; then
+                       echo ${output}
+               fi
+               exit 1
+       fi
+}
+
+check_bpf_counter
+find_cgroups
+
+check_system_wide_counted
+check_cpu_list_counted
+
+exit 0
index 9d4c451..56455da 100644 (file)
@@ -2,7 +2,9 @@
 #include <stdlib.h>
 #include <string.h>
 #include <unistd.h>
+#include <errno.h>
 #include <sys/ioctl.h>
+#include <linux/compiler.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/kernel.h>
 #include "tests.h"
@@ -137,8 +139,7 @@ static int test__wp_rw(struct test_suite *test __maybe_unused,
 #endif
 }
 
-static int test__wp_modify(struct test_suite *test __maybe_unused,
-                          int subtest __maybe_unused)
+static int test__wp_modify(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
 {
 #if defined(__s390x__)
        return TEST_SKIP;
@@ -160,6 +161,11 @@ static int test__wp_modify(struct test_suite *test __maybe_unused,
        new_attr.disabled = 1;
        ret = ioctl(fd, PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &new_attr);
        if (ret < 0) {
+               if (errno == ENOTTY) {
+                       test->test_cases[subtest].skip_reason = "missing kernel support";
+                       ret = TEST_SKIP;
+               }
+
                pr_debug("ioctl(PERF_EVENT_IOC_MODIFY_ATTRIBUTES) failed\n");
                close(fd);
                return ret;
index 63b9db6..3c2df75 100644 (file)
@@ -95,7 +95,7 @@ static int bperf_load_program(struct evlist *evlist)
 
        perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
                link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
-                                                     FD(cgrp_switch, cpu.cpu));
+                                                     FD(cgrp_switch, i));
                if (IS_ERR(link)) {
                        pr_err("Failed to attach cgroup program\n");
                        err = PTR_ERR(link);
@@ -115,15 +115,15 @@ static int bperf_load_program(struct evlist *evlist)
                        evsel->cgrp = NULL;
 
                        /* open single copy of the events w/o cgroup */
-                       err = evsel__open_per_cpu(evsel, evlist->core.all_cpus, -1);
+                       err = evsel__open_per_cpu(evsel, evsel->core.cpus, -1);
                        if (err) {
                                pr_err("Failed to open first cgroup events\n");
                                goto out;
                        }
 
                        map_fd = bpf_map__fd(skel->maps.events);
-                       perf_cpu_map__for_each_cpu(cpu, j, evlist->core.all_cpus) {
-                               int fd = FD(evsel, cpu.cpu);
+                       perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) {
+                               int fd = FD(evsel, j);
                                __u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
 
                                err = bpf_map_update_elem(map_fd, &idx, &fd,
@@ -269,7 +269,7 @@ static int bperf_cgrp__read(struct evsel *evsel)
                        goto out;
                }
 
-               perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
+               perf_cpu_map__for_each_cpu(cpu, i, evsel->core.cpus) {
                        counts = perf_counts(evsel->counts, i, 0);
                        counts->val = values[cpu.cpu].counter;
                        counts->ena = values[cpu.cpu].enabled;
index 292c430..c72f8ad 100644 (file)
@@ -176,7 +176,7 @@ static int bperf_cgroup_count(void)
 }
 
 // This will be attached to cgroup-switches event for each cpu
-SEC("perf_events")
+SEC("perf_event")
 int BPF_PROG(on_cgrp_switch)
 {
        return bperf_cgroup_count();
index ed28a0d..d81b545 100644 (file)
@@ -253,6 +253,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
        Elf_Data *d;
        Elf_Scn *scn;
        Elf_Ehdr *ehdr;
+       Elf_Phdr *phdr;
        Elf_Shdr *shdr;
        uint64_t eh_frame_base_offset;
        char *strsym = NULL;
@@ -288,6 +289,19 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
        ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */
 
        /*
+        * setup program header
+        */
+       phdr = elf_newphdr(e, 1);
+       phdr[0].p_type = PT_LOAD;
+       phdr[0].p_offset = 0;
+       phdr[0].p_vaddr = 0;
+       phdr[0].p_paddr = 0;
+       phdr[0].p_filesz = csize;
+       phdr[0].p_memsz = csize;
+       phdr[0].p_flags = PF_X | PF_R;
+       phdr[0].p_align = 8;
+
+       /*
         * setup text section
         */
        scn = elf_newscn(e);
index ae138af..b5c9095 100644 (file)
@@ -53,8 +53,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
 
 #if GEN_ELF_CLASS == ELFCLASS64
 #define elf_newehdr    elf64_newehdr
+#define elf_newphdr    elf64_newphdr
 #define elf_getshdr    elf64_getshdr
 #define Elf_Ehdr       Elf64_Ehdr
+#define Elf_Phdr       Elf64_Phdr
 #define Elf_Shdr       Elf64_Shdr
 #define Elf_Sym                Elf64_Sym
 #define ELF_ST_TYPE(a) ELF64_ST_TYPE(a)
@@ -62,8 +64,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
 #define ELF_ST_VIS(a)  ELF64_ST_VISIBILITY(a)
 #else
 #define elf_newehdr    elf32_newehdr
+#define elf_newphdr    elf32_newphdr
 #define elf_getshdr    elf32_getshdr
 #define Elf_Ehdr       Elf32_Ehdr
+#define Elf_Phdr       Elf32_Phdr
 #define Elf_Shdr       Elf32_Shdr
 #define Elf_Sym                Elf32_Sym
 #define ELF_ST_TYPE(a) ELF32_ST_TYPE(a)
index 75bec32..647b7df 100644 (file)
@@ -2102,8 +2102,8 @@ static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
  * unusual.  One significant peculiarity is that the mapping (start -> pgoff)
  * is not the same for the kernel map and the modules map.  That happens because
  * the data is copied adjacently whereas the original kcore has gaps.  Finally,
- * kallsyms and modules files are compared with their copies to check that
- * modules have not been loaded or unloaded while the copies were taking place.
+ * kallsyms file is compared with its copy to check that modules have not been
+ * loaded or unloaded while the copies were taking place.
  *
  * Return: %0 on success, %-1 on failure.
  */
@@ -2166,9 +2166,6 @@ int kcore_copy(const char *from_dir, const char *to_dir)
                        goto out_extract_close;
        }
 
-       if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
-               goto out_extract_close;
-
        if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
                goto out_extract_close;
 
index 812424d..5387907 100644 (file)
@@ -367,13 +367,24 @@ static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
                                             bool is_kernel)
 {
        struct build_id bid;
+       struct nsinfo *nsi;
+       struct nscookie nc;
        int rc;
 
-       if (is_kernel)
+       if (is_kernel) {
                rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
-       else
-               rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
+               goto out;
+       }
+
+       nsi = nsinfo__new(event->pid);
+       nsinfo__mountns_enter(nsi, &nc);
 
+       rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
+
+       nsinfo__mountns_exit(&nc);
+       nsinfo__put(nsi);
+
+out:
        if (rc == 0) {
                memcpy(event->build_id, bid.data, sizeof(bid.data));
                event->build_id_size = (u8) bid.size;
index 4d1a947..01ceb98 100644 (file)
@@ -134,39 +134,6 @@ static struct ndtest_mapping region1_mapping[] = {
        },
 };
 
-static struct ndtest_mapping region2_mapping[] = {
-       {
-               .dimm = 0,
-               .position = 0,
-               .start = 0,
-               .size = DIMM_SIZE,
-       },
-};
-
-static struct ndtest_mapping region3_mapping[] = {
-       {
-               .dimm = 1,
-               .start = 0,
-               .size = DIMM_SIZE,
-       }
-};
-
-static struct ndtest_mapping region4_mapping[] = {
-       {
-               .dimm = 2,
-               .start = 0,
-               .size = DIMM_SIZE,
-       }
-};
-
-static struct ndtest_mapping region5_mapping[] = {
-       {
-               .dimm = 3,
-               .start = 0,
-               .size = DIMM_SIZE,
-       }
-};
-
 static struct ndtest_region bus0_regions[] = {
        {
                .type = ND_DEVICE_NAMESPACE_PMEM,
@@ -182,34 +149,6 @@ static struct ndtest_region bus0_regions[] = {
                .size = DIMM_SIZE * 2,
                .range_index = 2,
        },
-       {
-               .type = ND_DEVICE_NAMESPACE_BLK,
-               .num_mappings = ARRAY_SIZE(region2_mapping),
-               .mapping = region2_mapping,
-               .size = DIMM_SIZE,
-               .range_index = 3,
-       },
-       {
-               .type = ND_DEVICE_NAMESPACE_BLK,
-               .num_mappings = ARRAY_SIZE(region3_mapping),
-               .mapping = region3_mapping,
-               .size = DIMM_SIZE,
-               .range_index = 4,
-       },
-       {
-               .type = ND_DEVICE_NAMESPACE_BLK,
-               .num_mappings = ARRAY_SIZE(region4_mapping),
-               .mapping = region4_mapping,
-               .size = DIMM_SIZE,
-               .range_index = 5,
-       },
-       {
-               .type = ND_DEVICE_NAMESPACE_BLK,
-               .num_mappings = ARRAY_SIZE(region5_mapping),
-               .mapping = region5_mapping,
-               .size = DIMM_SIZE,
-               .range_index = 6,
-       },
 };
 
 static struct ndtest_mapping region6_mapping[] = {
@@ -501,21 +440,6 @@ static int ndtest_create_region(struct ndtest_priv *p,
        nd_set->altcookie = nd_set->cookie1;
        ndr_desc->nd_set = nd_set;
 
-       if (region->type == ND_DEVICE_NAMESPACE_BLK) {
-               mappings[0].start = 0;
-               mappings[0].size = DIMM_SIZE;
-               mappings[0].nvdimm = p->config->dimms[ndimm].nvdimm;
-
-               ndr_desc->mapping = &mappings[0];
-               ndr_desc->num_mappings = 1;
-               ndr_desc->num_lanes = 1;
-               ndbr_desc.enable = ndtest_blk_region_enable;
-               ndbr_desc.do_io = ndtest_blk_do_io;
-               region->region = nvdimm_blk_region_create(p->bus, ndr_desc);
-
-               goto done;
-       }
-
        for (i = 0; i < region->num_mappings; i++) {
                ndimm = region->mapping[i].dimm;
                mappings[i].start = region->mapping[i].start;
@@ -527,7 +451,6 @@ static int ndtest_create_region(struct ndtest_priv *p,
        ndr_desc->num_mappings = region->num_mappings;
        region->region = nvdimm_pmem_region_create(p->bus, ndr_desc);
 
-done:
        if (!region->region) {
                dev_err(&p->pdev.dev, "Error registering region %pR\n",
                        ndr_desc->res);
index c2064a3..1fc89b8 100644 (file)
@@ -13,6 +13,7 @@ TARGETS += damon
 TARGETS += drivers/dma-buf
 TARGETS += drivers/s390x/uvdevice
 TARGETS += drivers/net/bonding
+TARGETS += drivers/net/team
 TARGETS += efivarfs
 TARGETS += exec
 TARGETS += filesystems
index ab6c54b..1d86665 100644 (file)
@@ -1,6 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for net selftests
 
-TEST_PROGS := bond-break-lacpdu-tx.sh
+TEST_PROGS := bond-break-lacpdu-tx.sh \
+             dev_addr_lists.sh \
+             bond-arp-interval-causes-panic.sh
+
+TEST_FILES := lag_lib.sh
 
 include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh b/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
new file mode 100755 (executable)
index 0000000..71c00bf
--- /dev/null
@@ -0,0 +1,49 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# cause kernel oops in bond_rr_gen_slave_id
+DEBUG=${DEBUG:-0}
+
+set -e
+test ${DEBUG} -ne 0 && set -x
+
+finish()
+{
+       ip netns delete server || true
+       ip netns delete client || true
+       ip link del link1_1 || true
+}
+
+trap finish EXIT
+
+client_ip4=192.168.1.198
+server_ip4=192.168.1.254
+
+# setup kernel so it reboots after causing the panic
+echo 180 >/proc/sys/kernel/panic
+
+# build namespaces
+ip link add dev link1_1 type veth peer name link1_2
+
+ip netns add "server"
+ip link set dev link1_2 netns server up name eth0
+ip netns exec server ip addr add ${server_ip4}/24 dev eth0
+
+ip netns add "client"
+ip link set dev link1_1 netns client down name eth0
+ip netns exec client ip link add dev bond0 down type bond mode 1 \
+       miimon 100 all_slaves_active 1
+ip netns exec client ip link set dev eth0 down master bond0
+ip netns exec client ip link set dev bond0 up
+ip netns exec client ip addr add ${client_ip4}/24 dev bond0
+ip netns exec client ping -c 5 $server_ip4 >/dev/null
+
+ip netns exec client ip link set dev eth0 down nomaster
+ip netns exec client ip link set dev bond0 down
+ip netns exec client ip link set dev bond0 type bond mode 0 \
+       arp_interval 1000 arp_ip_target "+${server_ip4}"
+ip netns exec client ip link set dev eth0 down master bond0
+ip netns exec client ip link set dev bond0 up
+ip netns exec client ping -c 5 $server_ip4 >/dev/null
+
+exit 0
diff --git a/tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh b/tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh
new file mode 100755 (executable)
index 0000000..e6fa24e
--- /dev/null
@@ -0,0 +1,109 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test bond device handling of addr lists (dev->uc, mc)
+#
+
+ALL_TESTS="
+       bond_cleanup_mode1
+       bond_cleanup_mode4
+       bond_listen_lacpdu_multicast_case_down
+       bond_listen_lacpdu_multicast_case_up
+"
+
+REQUIRE_MZ=no
+NUM_NETIFS=0
+lib_dir=$(dirname "$0")
+source "$lib_dir"/../../../net/forwarding/lib.sh
+
+source "$lib_dir"/lag_lib.sh
+
+
+destroy()
+{
+       local ifnames=(dummy1 dummy2 bond1 mv0)
+       local ifname
+
+       for ifname in "${ifnames[@]}"; do
+               ip link del "$ifname" &>/dev/null
+       done
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       destroy
+}
+
+
+# bond driver control paths vary between modes that have a primary slave
+# (bond_uses_primary()) and others. Test both kinds of modes.
+
+bond_cleanup_mode1()
+{
+       RET=0
+
+       test_LAG_cleanup "bonding" "active-backup"
+}
+
+bond_cleanup_mode4() {
+       RET=0
+
+       test_LAG_cleanup "bonding" "802.3ad"
+}
+
+bond_listen_lacpdu_multicast()
+{
+       # Initial state of bond device, up | down
+       local init_state=$1
+       local lacpdu_mc="01:80:c2:00:00:02"
+
+       ip link add dummy1 type dummy
+       ip link add bond1 "$init_state" type bond mode 802.3ad
+       ip link set dev dummy1 master bond1
+       if [ "$init_state" = "down" ]; then
+               ip link set dev bond1 up
+       fi
+
+       grep_bridge_fdb "$lacpdu_mc" bridge fdb show brport dummy1 >/dev/null
+       check_err $? "LACPDU multicast address not present on slave (1)"
+
+       ip link set dev bond1 down
+
+       not grep_bridge_fdb "$lacpdu_mc" bridge fdb show brport dummy1 >/dev/null
+       check_err $? "LACPDU multicast address still present on slave"
+
+       ip link set dev bond1 up
+
+       grep_bridge_fdb "$lacpdu_mc" bridge fdb show brport dummy1 >/dev/null
+       check_err $? "LACPDU multicast address not present on slave (2)"
+
+       cleanup
+
+       log_test "bonding LACPDU multicast address to slave (from bond $init_state)"
+}
+
+# The LACPDU mc addr is added by different paths depending on the initial state
+# of the bond when enslaving a device. Test both cases.
+
+bond_listen_lacpdu_multicast_case_down()
+{
+       RET=0
+
+       bond_listen_lacpdu_multicast "down"
+}
+
+bond_listen_lacpdu_multicast_case_up()
+{
+       RET=0
+
+       bond_listen_lacpdu_multicast "up"
+}
+
+
+trap cleanup EXIT
+
+tests_run
+
+exit "$EXIT_STATUS"
diff --git a/tools/testing/selftests/drivers/net/bonding/lag_lib.sh b/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
new file mode 100644 (file)
index 0000000..16c7fb8
--- /dev/null
@@ -0,0 +1,61 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test that a link aggregation device (bonding, team) removes the hardware
+# addresses that it adds on its underlying devices.
+test_LAG_cleanup()
+{
+       local driver=$1
+       local mode=$2
+       local ucaddr="02:00:00:12:34:56"
+       local addr6="fe80::78:9abc/64"
+       local mcaddr="33:33:ff:78:9a:bc"
+       local name
+
+       ip link add dummy1 type dummy
+       ip link add dummy2 type dummy
+       if [ "$driver" = "bonding" ]; then
+               name="bond1"
+               ip link add "$name" up type bond mode "$mode"
+               ip link set dev dummy1 master "$name"
+               ip link set dev dummy2 master "$name"
+       elif [ "$driver" = "team" ]; then
+               name="team0"
+               teamd -d -c '
+                       {
+                               "device": "'"$name"'",
+                               "runner": {
+                                       "name": "'"$mode"'"
+                               },
+                               "ports": {
+                                       "dummy1":
+                                               {},
+                                       "dummy2":
+                                               {}
+                               }
+                       }
+               '
+               ip link set dev "$name" up
+       else
+               check_err 1
+               log_test test_LAG_cleanup ": unknown driver \"$driver\""
+               return
+       fi
+
+       # Used to test dev->uc handling
+       ip link add mv0 link "$name" up address "$ucaddr" type macvlan
+       # Used to test dev->mc handling
+       ip address add "$addr6" dev "$name"
+       ip link set dev "$name" down
+       ip link del "$name"
+
+       not grep_bridge_fdb "$ucaddr" bridge fdb show >/dev/null
+       check_err $? "macvlan unicast address still present on a slave"
+
+       not grep_bridge_fdb "$mcaddr" bridge fdb show >/dev/null
+       check_err $? "IPv6 solicited-node multicast mac address still present on a slave"
+
+       cleanup
+
+       log_test "$driver cleanup mode $mode"
+}
diff --git a/tools/testing/selftests/drivers/net/team/Makefile b/tools/testing/selftests/drivers/net/team/Makefile
new file mode 100644 (file)
index 0000000..642d8df
--- /dev/null
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for net selftests
+
+TEST_PROGS := dev_addr_lists.sh
+
+include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/team/config b/tools/testing/selftests/drivers/net/team/config
new file mode 100644 (file)
index 0000000..265b688
--- /dev/null
@@ -0,0 +1,3 @@
+CONFIG_NET_TEAM=y
+CONFIG_NET_TEAM_MODE_LOADBALANCE=y
+CONFIG_MACVLAN=y
diff --git a/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh b/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh
new file mode 100755 (executable)
index 0000000..debda72
--- /dev/null
@@ -0,0 +1,51 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test team device handling of addr lists (dev->uc, mc)
+#
+
+ALL_TESTS="
+       team_cleanup
+"
+
+REQUIRE_MZ=no
+NUM_NETIFS=0
+lib_dir=$(dirname "$0")
+source "$lib_dir"/../../../net/forwarding/lib.sh
+
+source "$lib_dir"/../bonding/lag_lib.sh
+
+
+destroy()
+{
+       local ifnames=(dummy0 dummy1 team0 mv0)
+       local ifname
+
+       for ifname in "${ifnames[@]}"; do
+               ip link del "$ifname" &>/dev/null
+       done
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       destroy
+}
+
+
+team_cleanup()
+{
+       RET=0
+
+       test_LAG_cleanup "team" "lacp"
+}
+
+
+require_command teamd
+
+trap cleanup EXIT
+
+tests_run
+
+exit "$EXIT_STATUS"
index fac248a..6f88da7 100644 (file)
@@ -227,7 +227,7 @@ int main(int argc, char *argv[])
        ucall_init(vm, NULL);
 
        pthread_create(&migration_thread, NULL, migration_worker,
-                      (void *)(unsigned long)gettid());
+                      (void *)(unsigned long)syscall(SYS_gettid));
 
        for (i = 0; !done; i++) {
                vcpu_run(vcpu);
index 02868ac..6632bff 100644 (file)
@@ -1,6 +1,11 @@
 # SPDX-License-Identifier: GPL-2.0
+#
+# First run: make -C ../../../.. headers_install
 
 CFLAGS += -Wall -O2 $(KHDR_INCLUDES)
+LDLIBS += -lcap
+
+LOCAL_HDRS += common.h
 
 src_test := $(wildcard *_test.c)
 
@@ -8,14 +13,10 @@ TEST_GEN_PROGS := $(src_test:.c=)
 
 TEST_GEN_PROGS_EXTENDED := true
 
-OVERRIDE_TARGETS := 1
-top_srcdir := ../../../..
-include ../lib.mk
-
-khdr_dir = $(top_srcdir)/usr/include
+# Static linking for short targets:
+$(TEST_GEN_PROGS_EXTENDED): LDFLAGS += -static
 
-$(OUTPUT)/true: true.c
-       $(LINK.c) $< $(LDLIBS) -o $@ -static
+include ../lib.mk
 
-$(OUTPUT)/%_test: %_test.c $(khdr_dir)/linux/landlock.h ../kselftest_harness.h common.h
-       $(LINK.c) $< $(LDLIBS) -o $@ -lcap -I$(khdr_dir)
+# Static linking for targets with $(OUTPUT)/ prefix:
+$(TEST_GEN_PROGS_EXTENDED): LDFLAGS += -static
index d44c72b..9d4cb94 100644 (file)
@@ -42,6 +42,10 @@ endif
 selfdir = $(realpath $(dir $(filter %/lib.mk,$(MAKEFILE_LIST))))
 top_srcdir = $(selfdir)/../../..
 
+ifeq ($(KHDR_INCLUDES),)
+KHDR_INCLUDES := -isystem $(top_srcdir)/usr/include
+endif
+
 # The following are built by lib.mk common compile rules.
 # TEST_CUSTOM_PROGS should be used by tests that require
 # custom build rule and prevent common build rule use.
index 57e90c8..5a58b1e 100755 (executable)
@@ -28,7 +28,7 @@
 # +------------------+       +------------------+
 #
 
-ALL_TESTS="mcast_v4 mcast_v6 rpf_v4 rpf_v6"
+ALL_TESTS="mcast_v4 mcast_v6 rpf_v4 rpf_v6 unres_v4 unres_v6"
 NUM_NETIFS=6
 source lib.sh
 source tc_common.sh
@@ -406,6 +406,96 @@ rpf_v6()
        log_test "RPF IPv6"
 }
 
+unres_v4()
+{
+       # Send a multicast packet not corresponding to an installed route,
+       # causing the kernel to queue the packet for resolution and emit an
+       # IGMPMSG_NOCACHE notification. smcrouted will react to this
+       # notification by consulting its (*, G) list and installing an (S, G)
+       # route, which will be used to forward the queued packet.
+
+       RET=0
+
+       tc filter add dev $h2 ingress protocol ip pref 1 handle 1 flower \
+               dst_ip 225.1.2.3 ip_proto udp dst_port 12345 action drop
+       tc filter add dev $h3 ingress protocol ip pref 1 handle 1 flower \
+               dst_ip 225.1.2.3 ip_proto udp dst_port 12345 action drop
+
+       # Forwarding should fail before installing a matching (*, G).
+       $MZ $h1 -c 1 -p 128 -t udp "ttl=10,sp=54321,dp=12345" \
+               -a 00:11:22:33:44:55 -b 01:00:5e:01:02:03 \
+               -A 198.51.100.2 -B 225.1.2.3 -q
+
+       tc_check_packets "dev $h2 ingress" 1 0
+       check_err $? "Multicast received on first host when should not"
+       tc_check_packets "dev $h3 ingress" 1 0
+       check_err $? "Multicast received on second host when should not"
+
+       # Create (*, G). Will not be installed in the kernel.
+       create_mcast_sg $rp1 0.0.0.0 225.1.2.3 $rp2 $rp3
+
+       $MZ $h1 -c 1 -p 128 -t udp "ttl=10,sp=54321,dp=12345" \
+               -a 00:11:22:33:44:55 -b 01:00:5e:01:02:03 \
+               -A 198.51.100.2 -B 225.1.2.3 -q
+
+       tc_check_packets "dev $h2 ingress" 1 1
+       check_err $? "Multicast not received on first host"
+       tc_check_packets "dev $h3 ingress" 1 1
+       check_err $? "Multicast not received on second host"
+
+       delete_mcast_sg $rp1 0.0.0.0 225.1.2.3 $rp2 $rp3
+
+       tc filter del dev $h3 ingress protocol ip pref 1 handle 1 flower
+       tc filter del dev $h2 ingress protocol ip pref 1 handle 1 flower
+
+       log_test "Unresolved queue IPv4"
+}
+
+unres_v6()
+{
+       # Send a multicast packet not corresponding to an installed route,
+       # causing the kernel to queue the packet for resolution and emit an
+       # MRT6MSG_NOCACHE notification. smcrouted will react to this
+       # notification by consulting its (*, G) list and installing an (S, G)
+       # route, which will be used to forward the queued packet.
+
+       RET=0
+
+       tc filter add dev $h2 ingress protocol ipv6 pref 1 handle 1 flower \
+               dst_ip ff0e::3 ip_proto udp dst_port 12345 action drop
+       tc filter add dev $h3 ingress protocol ipv6 pref 1 handle 1 flower \
+               dst_ip ff0e::3 ip_proto udp dst_port 12345 action drop
+
+       # Forwarding should fail before installing a matching (*, G).
+       $MZ $h1 -6 -c 1 -p 128 -t udp "ttl=10,sp=54321,dp=12345" \
+               -a 00:11:22:33:44:55 -b 33:33:00:00:00:03 \
+               -A 2001:db8:1::2 -B ff0e::3 -q
+
+       tc_check_packets "dev $h2 ingress" 1 0
+       check_err $? "Multicast received on first host when should not"
+       tc_check_packets "dev $h3 ingress" 1 0
+       check_err $? "Multicast received on second host when should not"
+
+       # Create (*, G). Will not be installed in the kernel.
+       create_mcast_sg $rp1 :: ff0e::3 $rp2 $rp3
+
+       $MZ $h1 -6 -c 1 -p 128 -t udp "ttl=10,sp=54321,dp=12345" \
+               -a 00:11:22:33:44:55 -b 33:33:00:00:00:03 \
+               -A 2001:db8:1::2 -B ff0e::3 -q
+
+       tc_check_packets "dev $h2 ingress" 1 1
+       check_err $? "Multicast not received on first host"
+       tc_check_packets "dev $h3 ingress" 1 1
+       check_err $? "Multicast not received on second host"
+
+       delete_mcast_sg $rp1 :: ff0e::3 $rp2 $rp3
+
+       tc filter del dev $h3 ingress protocol ipv6 pref 1 handle 1 flower
+       tc filter del dev $h2 ingress protocol ipv6 pref 1 handle 1 flower
+
+       log_test "Unresolved queue IPv6"
+}
+
 trap cleanup EXIT
 
 setup_prepare
index e714bae..81f3117 100755 (executable)
@@ -1,3 +1,4 @@
+#!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
 # This test sends one stream of traffic from H1 through a TBF shaper, to a RED
index a699187..e908009 100755 (executable)
@@ -91,7 +91,7 @@ src
 start          1
 count          5
 src_delta      2000
-tools          sendip nc bash
+tools          sendip socat nc bash
 proto          udp
 
 race_repeat    3
@@ -116,7 +116,7 @@ src
 start          10
 count          5
 src_delta      2000
-tools          sendip nc bash
+tools          sendip socat nc bash
 proto          udp6
 
 race_repeat    3
@@ -141,7 +141,7 @@ src
 start          1
 count          5
 src_delta      2000
-tools          sendip nc bash
+tools          sendip socat nc bash
 proto          udp
 
 race_repeat    0
@@ -163,7 +163,7 @@ src         mac
 start          10
 count          5
 src_delta      2000
-tools          sendip nc bash
+tools          sendip socat nc bash
 proto          udp6
 
 race_repeat    0
@@ -185,7 +185,7 @@ src         mac proto
 start          10
 count          5
 src_delta      2000
-tools          sendip nc bash
+tools          sendip socat nc bash
 proto          udp6
 
 race_repeat    0
@@ -207,7 +207,7 @@ src         addr4
 start          1
 count          5
 src_delta      2000
-tools          sendip nc bash
+tools          sendip socat nc bash
 proto          udp
 
 race_repeat    3
@@ -227,7 +227,7 @@ src         addr6 port
 start          10
 count          5
 src_delta      2000
-tools          sendip nc
+tools          sendip socat nc
 proto          udp6
 
 race_repeat    3
@@ -247,7 +247,7 @@ src         mac proto addr4
 start          1
 count          5
 src_delta      2000
-tools          sendip nc bash
+tools          sendip socat nc bash
 proto          udp
 
 race_repeat    0
@@ -264,7 +264,7 @@ src         mac
 start          1
 count          5
 src_delta      2000
-tools          sendip nc bash
+tools          sendip socat nc bash
 proto          udp
 
 race_repeat    0
@@ -286,7 +286,7 @@ src         mac addr4
 start          1
 count          5
 src_delta      2000
-tools          sendip nc bash
+tools          sendip socat nc bash
 proto          udp
 
 race_repeat    0
@@ -337,7 +337,7 @@ src         addr4
 start          1
 count          5
 src_delta      2000
-tools          sendip nc
+tools          sendip socat nc
 proto          udp
 
 race_repeat    3
@@ -363,7 +363,7 @@ src         mac
 start          1
 count          1
 src_delta      2000
-tools          sendip nc bash
+tools          sendip socat nc bash
 proto          udp
 
 race_repeat    0
@@ -541,6 +541,24 @@ setup_send_udp() {
                        dst_port=
                        src_addr4=
                }
+       elif command -v socat -v >/dev/null; then
+               send_udp() {
+                       if [ -n "${src_addr4}" ]; then
+                               B ip addr add "${src_addr4}" dev veth_b
+                               __socatbind=",bind=${src_addr4}"
+                               if [ -n "${src_port}" ];then
+                                       __socatbind="${__socatbind}:${src_port}"
+                               fi
+                       fi
+
+                       ip addr add "${dst_addr4}" dev veth_a 2>/dev/null
+                       [ -z "${dst_port}" ] && dst_port=12345
+
+                       echo "test4" | B socat -t 0.01 STDIN UDP4-DATAGRAM:${dst_addr4}:${dst_port}"${__socatbind}"
+
+                       src_addr4=
+                       src_port=
+               }
        elif command -v nc >/dev/null; then
                if nc -u -w0 1.1.1.1 1 2>/dev/null; then
                        # OpenBSD netcat
@@ -606,6 +624,29 @@ setup_send_udp6() {
                        dst_port=
                        src_addr6=
                }
+       elif command -v socat -v >/dev/null; then
+               send_udp6() {
+                       ip -6 addr add "${dst_addr6}" dev veth_a nodad \
+                               2>/dev/null
+
+                       __socatbind6=
+
+                       if [ -n "${src_addr6}" ]; then
+                               if [ -n "${src_addr6} != "${src_addr6_added} ]; then
+                                       B ip addr add "${src_addr6}" dev veth_b nodad
+
+                                       src_addr6_added=${src_addr6}
+                               fi
+
+                               __socatbind6=",bind=[${src_addr6}]"
+
+                               if [ -n "${src_port}" ] ;then
+                                       __socatbind6="${__socatbind6}:${src_port}"
+                               fi
+                       fi
+
+                       echo "test6" | B socat -t 0.01 STDIN UDP6-DATAGRAM:[${dst_addr6}]:${dst_port}"${__socatbind6}"
+               }
        elif command -v nc >/dev/null && nc -u -w0 1.1.1.1 1 2>/dev/null; then
                # GNU netcat might not work with IPv6, try next tool
                send_udp6() {
index f0d51d4..3a5936c 100644 (file)
@@ -1,4 +1,4 @@
-TEST_GEN_PROGS := timens timerfd timer clock_nanosleep procfs exec futex vfork_exec
+TEST_GEN_PROGS := timens timerfd timer clock_nanosleep procfs exec futex
 TEST_GEN_PROGS_EXTENDED := gettime_perf
 
 CFLAGS := -Wall -Werror -pthread
diff --git a/tools/testing/selftests/timens/vfork_exec.c b/tools/testing/selftests/timens/vfork_exec.c
deleted file mode 100644 (file)
index e6ccd90..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define _GNU_SOURCE
-#include <errno.h>
-#include <fcntl.h>
-#include <sched.h>
-#include <stdio.h>
-#include <stdbool.h>
-#include <sys/stat.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <time.h>
-#include <unistd.h>
-#include <string.h>
-
-#include "log.h"
-#include "timens.h"
-
-#define OFFSET (36000)
-
-int main(int argc, char *argv[])
-{
-       struct timespec now, tst;
-       int status, i;
-       pid_t pid;
-
-       if (argc > 1) {
-               if (sscanf(argv[1], "%ld", &now.tv_sec) != 1)
-                       return pr_perror("sscanf");
-
-               for (i = 0; i < 2; i++) {
-                       _gettime(CLOCK_MONOTONIC, &tst, i);
-                       if (abs(tst.tv_sec - now.tv_sec) > 5)
-                               return pr_fail("%ld %ld\n", now.tv_sec, tst.tv_sec);
-               }
-               return 0;
-       }
-
-       nscheck();
-
-       ksft_set_plan(1);
-
-       clock_gettime(CLOCK_MONOTONIC, &now);
-
-       if (unshare_timens())
-               return 1;
-
-       if (_settime(CLOCK_MONOTONIC, OFFSET))
-               return 1;
-
-       for (i = 0; i < 2; i++) {
-               _gettime(CLOCK_MONOTONIC, &tst, i);
-               if (abs(tst.tv_sec - now.tv_sec) > 5)
-                       return pr_fail("%ld %ld\n",
-                                       now.tv_sec, tst.tv_sec);
-       }
-
-       pid = vfork();
-       if (pid < 0)
-               return pr_perror("fork");
-
-       if (pid == 0) {
-               char now_str[64];
-               char *cargv[] = {"exec", now_str, NULL};
-               char *cenv[] = {NULL};
-
-               // Check that we are still in the source timens.
-               for (i = 0; i < 2; i++) {
-                       _gettime(CLOCK_MONOTONIC, &tst, i);
-                       if (abs(tst.tv_sec - now.tv_sec) > 5)
-                               return pr_fail("%ld %ld\n",
-                                               now.tv_sec, tst.tv_sec);
-               }
-
-               /* Check for proper vvar offsets after execve. */
-               snprintf(now_str, sizeof(now_str), "%ld", now.tv_sec + OFFSET);
-               execve("/proc/self/exe", cargv, cenv);
-               return pr_perror("execve");
-       }
-
-       if (waitpid(pid, &status, 0) != pid)
-               return pr_perror("waitpid");
-
-       if (status)
-               ksft_exit_fail();
-
-       ksft_test_result_pass("exec\n");
-       ksft_exit_pass();
-       return 0;
-}
index fda7628..e95bd56 100644 (file)
@@ -343,8 +343,10 @@ $(KERNEL_BZIMAGE): $(TOOLCHAIN_PATH)/.installed $(KERNEL_BUILD_PATH)/.config $(B
 .PHONY: $(KERNEL_BZIMAGE)
 
 $(TOOLCHAIN_PATH)/$(CHOST)/include/linux/.installed: | $(KERNEL_BUILD_PATH)/.config $(TOOLCHAIN_PATH)/.installed
+ifneq ($(ARCH),um)
        rm -rf $(TOOLCHAIN_PATH)/$(CHOST)/include/linux
        $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) INSTALL_HDR_PATH=$(TOOLCHAIN_PATH)/$(CHOST) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) headers_install
+endif
        touch $@
 
 $(TOOLCHAIN_PATH)/.installed: $(TOOLCHAIN_TAR)