Merge tag 'objtool_urgent_for_v6.1_rc7' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 27 Nov 2022 20:08:17 +0000 (12:08 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 27 Nov 2022 20:08:17 +0000 (12:08 -0800)
Pull objtool fix from Borislav Petkov:

 - Handle different output of readelf on different distros running
   ppc64le which confuses faddr2line's function offsets conversion

* tag 'objtool_urgent_for_v6.1_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  scripts/faddr2line: Fix regression in name resolution on ppc64le

623 files changed:
.mailmap
CREDITS
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/pm/amd-pstate.rst
Documentation/devicetree/bindings/input/goodix,gt7375p.yaml
Documentation/devicetree/bindings/media/allwinner,sun50i-h6-vpu-g2.yaml
Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
Documentation/devicetree/bindings/sound/realtek,rt1015p.yaml
Documentation/driver-api/miscellaneous.rst
Documentation/networking/generic_netlink.rst
Documentation/process/code-of-conduct-interpretation.rst
Documentation/translations/zh_CN/loongarch/introduction.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/am335x-pcm-953.dtsi
arch/arm/boot/dts/at91sam9g20ek_common.dtsi
arch/arm/boot/dts/imx6q-prti6q.dts
arch/arm/boot/dts/imx7s.dtsi
arch/arm/boot/dts/lan966x-pcb8291.dts
arch/arm/boot/dts/rk3036-evb.dts
arch/arm/boot/dts/rk3066a-mk808.dts
arch/arm/boot/dts/rk3188-radxarock.dts
arch/arm/boot/dts/rk3188.dtsi
arch/arm/boot/dts/rk3288-evb-act8846.dts
arch/arm/boot/dts/rk3288-evb.dtsi
arch/arm/boot/dts/rk3288-firefly.dtsi
arch/arm/boot/dts/rk3288-miqi.dts
arch/arm/boot/dts/rk3288-rock2-square.dts
arch/arm/boot/dts/rk3288-vmarc-som.dtsi
arch/arm/boot/dts/rk3xxx.dtsi
arch/arm/boot/dts/sama7g5-pinfunc.h
arch/arm/include/asm/perf_event.h
arch/arm/include/asm/pgtable-nommu.h
arch/arm/include/asm/pgtable.h
arch/arm/mach-at91/pm_suspend.S
arch/arm/mach-mxs/mach-mxs.c
arch/arm/mm/nommu.c
arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts
arch/arm64/boot/dts/freescale/imx8mm.dtsi
arch/arm64/boot/dts/freescale/imx8mn.dtsi
arch/arm64/boot/dts/freescale/imx8mp-evk.dts
arch/arm64/boot/dts/freescale/imx93-pinfunc.h [changed mode: 0755->0644]
arch/arm64/boot/dts/qcom/ipq8074.dtsi
arch/arm64/boot/dts/qcom/msm8996.dtsi
arch/arm64/boot/dts/qcom/sa8155p-adp.dts
arch/arm64/boot/dts/qcom/sa8295p-adp.dts
arch/arm64/boot/dts/qcom/sc7280.dtsi
arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
arch/arm64/boot/dts/qcom/sc8280xp.dtsi
arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi
arch/arm64/boot/dts/qcom/sm8250.dtsi
arch/arm64/boot/dts/qcom/sm8350-hdk.dts
arch/arm64/boot/dts/rockchip/px30-evb.dts
arch/arm64/boot/dts/rockchip/rk3308-evb.dts
arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts
arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
arch/arm64/boot/dts/rockchip/rk3368-r88.dts
arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
arch/arm64/boot/dts/rockchip/rk3399-nanopi-m4b.dts
arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
arch/arm64/boot/dts/rockchip/rk3399-roc-pc-plus.dts
arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi
arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi
arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts
arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
arch/arm64/include/asm/pgtable.h
arch/arm64/kernel/entry-ftrace.S
arch/loongarch/Makefile
arch/loongarch/include/asm/irq.h
arch/loongarch/include/asm/pgtable.h
arch/loongarch/include/asm/smp.h
arch/loongarch/kernel/acpi.c
arch/loongarch/kernel/irq.c
arch/loongarch/kernel/process.c
arch/loongarch/kernel/setup.c
arch/loongarch/kernel/smp.c
arch/loongarch/kernel/unwind_prologue.c
arch/microblaze/Makefile
arch/nios2/boot/Makefile
arch/powerpc/kernel/vmlinux.lds.S
arch/s390/include/asm/processor.h
arch/s390/kernel/crash_dump.c
arch/x86/boot/Makefile
arch/x86/events/amd/core.c
arch/x86/events/amd/uncore.c
arch/x86/events/intel/pt.c
arch/x86/hyperv/hv_init.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/qspinlock_paravirt.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/hygon.c
arch/x86/kernel/cpu/sgx/ioctl.c
arch/x86/kernel/cpu/tsx.c
arch/x86/kernel/fpu/core.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/x86.c
arch/x86/kvm/xen.c
arch/x86/mm/ioremap.c
arch/x86/net/bpf_jit_comp.c
arch/x86/power/cpu.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/setup.c
block/blk-cgroup.c
block/blk-core.c
block/blk-mq.c
block/blk-settings.c
block/blk.h
drivers/accessibility/speakup/main.c
drivers/accessibility/speakup/utils.h
drivers/android/binder_alloc.c
drivers/block/drbd/drbd_main.c
drivers/block/ublk_drv.c
drivers/bus/intel-ixp4xx-eb.c
drivers/bus/sunxi-rsb.c
drivers/cpufreq/Kconfig.x86
drivers/cpufreq/amd-pstate.c
drivers/dma-buf/dma-buf.c
drivers/dma-buf/dma-heap.c
drivers/extcon/extcon-usbc-tusb320.c
drivers/firmware/google/coreboot_table.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h
drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
drivers/gpu/drm/display/drm_dp_mst_topology.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_internal.h
drivers/gpu/drm/drm_mode_config.c
drivers/gpu/drm/i915/display/intel_display_power.c
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/lima/lima_devfreq.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/host1x/dev.c
drivers/hv/channel_mgmt.c
drivers/hv/vmbus_drv.c
drivers/iio/accel/bma400_core.c
drivers/iio/adc/at91-sama5d2_adc.c
drivers/iio/adc/at91_adc.c
drivers/iio/adc/mp2629_adc.c
drivers/iio/imu/bno055/bno055.c
drivers/iio/pressure/ms5611.h
drivers/iio/pressure/ms5611_core.c
drivers/iio/pressure/ms5611_spi.c
drivers/iio/trigger/iio-trig-sysfs.c
drivers/input/joystick/iforce/iforce-main.c
drivers/input/misc/soc_button_array.c
drivers/input/mouse/synaptics.c
drivers/input/serio/i8042-acpipnpio.h
drivers/input/serio/i8042.c
drivers/input/touchscreen/goodix.c
drivers/iommu/intel/iommu.c
drivers/iommu/intel/pasid.c
drivers/isdn/mISDN/core.c
drivers/isdn/mISDN/dsp_pipeline.c
drivers/md/dm-bufio.c
drivers/md/dm-crypt.c
drivers/md/dm-integrity.c
drivers/md/dm-ioctl.c
drivers/md/dm-log-writes.c
drivers/misc/vmw_vmci/vmci_queue_pair.c
drivers/mmc/core/core.c
drivers/mmc/host/sdhci-pci-core.c
drivers/mmc/host/sdhci-pci-o2micro.c
drivers/mtd/nand/onenand/Kconfig
drivers/mtd/nand/raw/nand_base.c
drivers/mtd/nand/raw/qcom_nandc.c
drivers/net/arcnet/com20020_cs.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/sja1105/sja1105_mdio.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/atheros/ag71xx.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/davicom/dm9051.c
drivers/net/ethernet/engleder/tsnep_main.c
drivers/net/ethernet/freescale/enetc/enetc.c
drivers/net/ethernet/freescale/enetc/enetc.h
drivers/net/ethernet/freescale/enetc/enetc_qos.c
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/huawei/hinic/hinic_main.c
drivers/net/ethernet/intel/iavf/iavf.h
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_ptp.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
drivers/net/ethernet/marvell/octeontx2/Kconfig
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
drivers/net/ethernet/marvell/prestera/prestera_main.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_ppe.c
drivers/net/ethernet/mediatek/mtk_ppe.h
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
drivers/net/ethernet/microchip/sparx5/sparx5_main.c
drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
drivers/net/ethernet/netronome/nfp/nfp_devlink.c
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/pensando/ionic/ionic_main.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/sfc/ef100_netdev.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ipvlan/ipvlan.h
drivers/net/ipvlan/ipvlan_main.c
drivers/net/macsec.c
drivers/net/macvlan.c
drivers/net/mctp/mctp-i2c.c
drivers/net/mhi_net.c
drivers/net/netdevsim/dev.c
drivers/net/phy/at803x.c
drivers/net/phy/dp83867.c
drivers/net/phy/marvell.c
drivers/net/thunderbolt.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/smsc95xx.c
drivers/net/virtio_net.c
drivers/net/wwan/iosm/iosm_ipc_pcie.c
drivers/net/wwan/t7xx/t7xx_modem_ops.c
drivers/nfc/nfcmrvl/i2c.c
drivers/nfc/nxp-nci/core.c
drivers/nfc/s3fwrn5/core.c
drivers/nfc/st-nci/se.c
drivers/nvme/host/pci.c
drivers/nvme/target/auth.c
drivers/nvmem/lan9662-otpc.c
drivers/nvmem/u-boot-env.c
drivers/parport/parport_pc.c
drivers/pci/controller/pci-hyperv.c
drivers/pinctrl/devicetree.c
drivers/pinctrl/mediatek/mtk-eint.c
drivers/pinctrl/mediatek/mtk-eint.h
drivers/pinctrl/mediatek/pinctrl-mt2701.c
drivers/pinctrl/mediatek/pinctrl-mt2712.c
drivers/pinctrl/mediatek/pinctrl-mt6765.c
drivers/pinctrl/mediatek/pinctrl-mt6779.c
drivers/pinctrl/mediatek/pinctrl-mt6795.c
drivers/pinctrl/mediatek/pinctrl-mt7622.c
drivers/pinctrl/mediatek/pinctrl-mt7623.c
drivers/pinctrl/mediatek/pinctrl-mt7629.c
drivers/pinctrl/mediatek/pinctrl-mt7986.c
drivers/pinctrl/mediatek/pinctrl-mt8127.c
drivers/pinctrl/mediatek/pinctrl-mt8135.c
drivers/pinctrl/mediatek/pinctrl-mt8167.c
drivers/pinctrl/mediatek/pinctrl-mt8173.c
drivers/pinctrl/mediatek/pinctrl-mt8183.c
drivers/pinctrl/mediatek/pinctrl-mt8186.c
drivers/pinctrl/mediatek/pinctrl-mt8188.c
drivers/pinctrl/mediatek/pinctrl-mt8192.c
drivers/pinctrl/mediatek/pinctrl-mt8195.c
drivers/pinctrl/mediatek/pinctrl-mt8365.c
drivers/pinctrl/mediatek/pinctrl-mt8516.c
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/qcom/pinctrl-sc8280xp.c
drivers/platform/surface/aggregator/ssh_packet_layer.c
drivers/platform/surface/surface_aggregator_registry.c
drivers/platform/x86/acer-wmi.c
drivers/platform/x86/amd/pmc.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel/pmc/pltdrv.c
drivers/platform/x86/thinkpad_acpi.c
drivers/power/supply/ab8500_btemp.c
drivers/power/supply/ip5xxx_power.c
drivers/power/supply/rk817_charger.c
drivers/regulator/core.c
drivers/regulator/rt5759-regulator.c
drivers/regulator/slg51000-regulator.c
drivers/regulator/twl6030-regulator.c
drivers/s390/block/dasd_devmap.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_ioctl.c
drivers/s390/block/dcssblk.c
drivers/s390/crypto/ap_bus.c
drivers/s390/scsi/zfcp_fsf.c
drivers/scsi/mpi3mr/mpi3mr_os.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/storvsc_drv.c
drivers/siox/siox-core.c
drivers/slimbus/Kconfig
drivers/slimbus/stream.c
drivers/soc/imx/soc-imx8m.c
drivers/spi/spi-dw-dma.c
drivers/spi/spi-imx.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi-tegra210-quad.c
drivers/staging/rtl8192e/rtllib_softmac_wx.c
drivers/target/loopback/tcm_loop.c
drivers/tee/optee/device.c
drivers/tty/n_gsm.c
drivers/tty/serial/8250/8250_lpss.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/imx.c
drivers/usb/cdns3/host.c
drivers/usb/chipidea/otg_fsm.c
drivers/usb/core/quirks.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/gadget.c
drivers/usb/dwc3/host.c
drivers/usb/host/bcma-hcd.c
drivers/usb/serial/option.c
drivers/usb/typec/mux/intel_pmc_mux.c
drivers/usb/typec/tipd/core.c
drivers/vfio/pci/vfio_pci_core.c
drivers/vfio/vfio_main.c
drivers/video/fbdev/core/fbcon.c
drivers/virt/coco/sev-guest/sev-guest.c
drivers/xen/pcpu.c
drivers/xen/platform-pci.c
drivers/xen/xen-pciback/conf_space_capability.c
fs/btrfs/ctree.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/send.c
fs/btrfs/sysfs.c
fs/btrfs/tree-log.c
fs/btrfs/zoned.c
fs/ceph/caps.c
fs/ceph/inode.c
fs/ceph/snap.c
fs/cifs/cifsfs.c
fs/cifs/connect.c
fs/cifs/ioctl.c
fs/cifs/sess.c
fs/cifs/smb2ops.c
fs/erofs/fscache.c
fs/erofs/internal.h
fs/erofs/super.c
fs/erofs/sysfs.c
fs/erofs/zdata.c
fs/ext4/extents.c
fs/file.c
fs/fs-writeback.c
fs/fscache/volume.c
fs/kernfs/dir.c
fs/namei.c
fs/netfs/buffered_read.c
fs/netfs/io.c
fs/nfsd/trace.h
fs/nfsd/vfs.c
fs/nilfs2/sufile.c
fs/proc/meminfo.c
fs/zonefs/super.c
fs/zonefs/sysfs.c
fs/zonefs/zonefs.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/fault-inject.h
include/linux/fscache.h
include/linux/io_uring.h
include/linux/kvm_host.h
include/linux/license.h
include/linux/mlx5/driver.h
include/linux/ring_buffer.h
include/linux/trace.h
include/linux/vfio.h
include/net/inet_hashtables.h
include/net/ip.h
include/net/ipv6.h
include/net/neighbour.h
include/net/sock.h
include/soc/at91/sama7-ddr.h
include/sound/sof/dai.h
include/sound/sof/info.h
include/trace/events/huge_memory.h
include/uapi/linux/ip.h
include/uapi/linux/ipv6.h
init/Kconfig
io_uring/filetable.c
io_uring/io_uring.c
io_uring/io_uring.h
io_uring/net.c
io_uring/poll.c
ipc/shm.c
kernel/bpf/dispatcher.c
kernel/bpf/percpu_freelist.c
kernel/bpf/verifier.c
kernel/events/core.c
kernel/gcov/clang.c
kernel/kprobes.c
kernel/rseq.c
kernel/sched/core.c
kernel/sched/cpufreq_schedutil.c
kernel/trace/ftrace.c
kernel/trace/kprobe_event_gen_test.c
kernel/trace/rethook.c
kernel/trace/ring_buffer.c
kernel/trace/synth_event_gen_test.c
kernel/trace/trace.c
kernel/trace/trace_eprobe.c
kernel/trace/trace_events_synth.c
kernel/trace/trace_syscalls.c
lib/Kconfig.debug
lib/fault-inject.c
mm/damon/sysfs.c
mm/failslab.c
mm/hugetlb.c
mm/kfence/report.c
mm/khugepaged.c
mm/maccess.c
mm/memcontrol.c
mm/memory.c
mm/migrate_device.c
mm/mmap.c
mm/page_alloc.c
mm/page_ext.c
mm/swapfile.c
mm/vmscan.c
net/9p/trans_fd.c
net/9p/trans_xen.c
net/bpf/test_run.c
net/bridge/br_vlan.c
net/caif/chnl_net.c
net/core/flow_dissector.c
net/core/lwtunnel.c
net/core/neighbour.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dccp/proto.c
net/dsa/dsa2.c
net/dsa/dsa_priv.h
net/dsa/master.c
net/dsa/port.c
net/ipv4/Kconfig
net/ipv4/af_inet.c
net/ipv4/esp4_offload.c
net/ipv4/fib_trie.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_input.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/tcp.c
net/ipv4/tcp_ipv4.c
net/ipv6/esp6_offload.c
net/ipv6/tcp_ipv6.c
net/ipv6/xfrm6_policy.c
net/kcm/kcmsock.c
net/key/af_key.c
net/l2tp/l2tp_core.c
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_ct.c
net/netfilter/xt_connmark.c
net/nfc/nci/core.c
net/nfc/nci/data.c
net/openvswitch/conntrack.c
net/rxrpc/ar-internal.h
net/rxrpc/conn_client.c
net/sched/Kconfig
net/sched/act_connmark.c
net/sched/act_ct.c
net/sched/act_ctinfo.c
net/tipc/discover.c
net/tipc/topsrv.c
net/tls/tls_device_fallback.c
net/x25/x25_dev.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_replay.c
scripts/Makefile.package
scripts/package/mkdebian
sound/core/seq/seq_memory.c
sound/hda/intel-dsp-config.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/yc/acp6x-mach.c
sound/soc/codecs/hdac_hda.h
sound/soc/codecs/max98373-i2c.c
sound/soc/codecs/rt5514-spi.c
sound/soc/codecs/rt5677-spi.c
sound/soc/codecs/rt711-sdca-sdw.c
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/tas2764.c
sound/soc/codecs/tas2770.c
sound/soc/codecs/tas2780.c
sound/soc/codecs/wm8962.c
sound/soc/fsl/fsl_asrc.c
sound/soc/fsl/fsl_esai.c
sound/soc/fsl/fsl_sai.c
sound/soc/intel/boards/bytcht_es8316.c
sound/soc/intel/boards/sof_es8336.c
sound/soc/intel/common/soc-acpi-intel-icl-match.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/soc-pcm.c
sound/soc/soc-utils.c
sound/soc/sof/ipc3-topology.c
sound/soc/sof/topology.c
sound/soc/stm/stm32_adfsdm.c
sound/soc/stm/stm32_i2s.c
sound/usb/midi.c
tools/arch/x86/include/asm/msr-index.h
tools/iio/iio_generic_buffer.c
tools/testing/selftests/bpf/prog_tests/varlen.c
tools/testing/selftests/bpf/progs/test_varlen.c
tools/testing/selftests/bpf/test_progs.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c
tools/testing/selftests/net/fcnal-test.sh
tools/testing/selftests/net/mptcp/mptcp_join.sh
tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
tools/testing/selftests/net/mptcp/simult_flows.sh
tools/testing/selftests/net/pmtu.sh
tools/testing/selftests/net/udpgro.sh
tools/testing/selftests/net/udpgro_bench.sh
tools/testing/selftests/net/udpgro_frglist.sh
tools/testing/selftests/net/udpgro_fwd.sh
tools/testing/selftests/net/veth.sh
virt/kvm/kvm_main.c
virt/kvm/pfncache.c

index fdd7989..4a14ece 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -29,6 +29,7 @@ Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electr
 Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
 Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
 Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
+Alex Hung <alexhung@gmail.com> <alex.hung@canonical.com>
 Alex Shi <alexs@kernel.org> <alex.shi@intel.com>
 Alex Shi <alexs@kernel.org> <alex.shi@linaro.org>
 Alex Shi <alexs@kernel.org> <alex.shi@linux.alibaba.com>
@@ -382,6 +383,7 @@ Santosh Shilimkar <santosh.shilimkar@oracle.org>
 Santosh Shilimkar <ssantosh@kernel.org>
 Sarangdhar Joshi <spjoshi@codeaurora.org>
 Sascha Hauer <s.hauer@pengutronix.de>
+Satya Priya <quic_c_skakit@quicinc.com> <skakit@codeaurora.org>
 S.Çağlar Onur <caglar@pardus.org.tr>
 Sean Christopherson <seanjc@google.com> <sean.j.christopherson@intel.com>
 Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
@@ -416,6 +418,7 @@ TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
 TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
 Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
 Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
+Tzung-Bi Shih <tzungbi@kernel.org> <tzungbi@google.com>
 Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
 Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
 Uwe Kleine-König <ukleinek@strlen.de>
diff --git a/CREDITS b/CREDITS
index 54672cb..198f675 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -2452,6 +2452,10 @@ S: 482 Shadowgraph Dr.
 S: San Jose, CA  95110
 S: USA
 
+N: Michal Marek
+E: michal.lkml@markovi.net
+D: Kbuild Maintainer 2009-2017
+
 N: Martin Mares
 E: mj@ucw.cz
 W: http://www.ucw.cz/~mj/
index a465d52..42af9ca 100644 (file)
                                memory, and other data can't be written using
                                xmon commands.
                        off     xmon is disabled.
+
+       amd_pstate=     [X86]
+                       disable
+                         Do not enable amd_pstate as the default
+                         scaling driver for the supported processors
+                       passive
+                         Use amd_pstate as a scaling driver, driver requests a
+                         desired performance on this abstract scale and the power
+                         management firmware translates the requests into actual
+                         hardware states (core frequency, data fabric and memory
+                         clocks etc.)
index 8f3d30c..06e2353 100644 (file)
@@ -283,23 +283,19 @@ efficiency frequency management method on AMD processors.
 Kernel Module Options for ``amd-pstate``
 =========================================
 
-.. _shared_mem:
-
-``shared_mem``
-Use a module param (shared_mem) to enable related processors manually with
-**amd_pstate.shared_mem=1**.
-Due to the performance issue on the processors with `Shared Memory Support
-<perf_cap_>`_, we disable it presently and will re-enable this by default
-once we address performance issue with this solution.
-
-To check whether the current processor is using `Full MSR Support <perf_cap_>`_
-or `Shared Memory Support <perf_cap_>`_ : ::
-
-  ray@hr-test1:~$ lscpu | grep cppc
-  Flags:                           fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid aperfmperf rapl pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_llc mwaitx cpb cat_l3 cdp_l3 hw_pstate ssbd mba ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 erms invpcid cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr rdpru wbnoinvd cppc arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif v_spec_ctrl umip pku ospke vaes vpclmulqdq rdpid overflow_recov succor smca fsrm
-
-If the CPU flags have ``cppc``, then this processor supports `Full MSR Support
-<perf_cap_>`_. Otherwise, it supports `Shared Memory Support <perf_cap_>`_.
+Passive Mode
+------------
+
+``amd_pstate=passive``
+
+It will be enabled if the ``amd_pstate=passive`` is passed to the kernel in the command line.
+In this mode, ``amd_pstate`` driver software specifies a desired QoS target in the CPPC
+performance scale as a relative number. This can be expressed as percentage of nominal
+performance (infrastructure max). Below the nominal sustained performance level,
+desired performance expresses the average performance level of the processor subject
+to the Performance Reduction Tolerance register. Above the nominal performance level,
+processor must provide at least nominal performance requested and go higher if current
+operating conditions allow.
 
 
 ``cpupower`` tool support for ``amd-pstate``
index fe1c501..1c191bc 100644 (file)
@@ -16,8 +16,11 @@ description:
 
 properties:
   compatible:
-    items:
+    oneOf:
       - const: goodix,gt7375p
+      - items:
+          - const: goodix,gt7986u
+          - const: goodix,gt7375p
 
   reg:
     enum:
index 24d7bf2..9d44236 100644 (file)
@@ -36,6 +36,9 @@ properties:
   resets:
     maxItems: 1
 
+  iommus:
+    maxItems: 1
+
 required:
   - compatible
   - reg
@@ -43,6 +46,7 @@ required:
   - clocks
   - clock-names
   - resets
+  - iommus
 
 additionalProperties: false
 
@@ -59,6 +63,7 @@ examples:
         clocks = <&ccu CLK_BUS_VP9>, <&ccu CLK_VP9>;
         clock-names = "bus", "mod";
         resets = <&ccu RST_BUS_VP9>;
+        iommus = <&iommu 5>;
     };
 
 ...
index c3e9f34..dea293f 100644 (file)
@@ -8,7 +8,7 @@ title: Audio codec controlled by ChromeOS EC
 
 maintainers:
   - Cheng-Yi Chiang <cychiang@chromium.org>
-  - Tzung-Bi Shih <tzungbi@google.com>
+  - Tzung-Bi Shih <tzungbi@kernel.org>
 
 description: |
   Google's ChromeOS EC codec is a digital mic codec provided by the
index 1d73204..ea7d490 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Realtek rt1015p codec devicetree bindings
 
 maintainers:
-  - Tzung-Bi Shih <tzungbi@google.com>
+  - Tzung-Bi Shih <tzungbi@kernel.org>
 
 description: |
   Rt1015p is a rt1015 variant which does not support I2C and
index 304ffb1..4a5104a 100644 (file)
@@ -16,12 +16,11 @@ Parallel Port Devices
 16x50 UART Driver
 =================
 
-.. kernel-doc:: drivers/tty/serial/serial_core.c
-   :export:
-
 .. kernel-doc:: drivers/tty/serial/8250/8250_core.c
    :export:
 
+See serial/driver.rst for related APIs.
+
 Pulse-Width Modulation (PWM)
 ============================
 
index 59e04cc..d960dbd 100644 (file)
@@ -6,4 +6,4 @@ Generic Netlink
 
 A wiki document on how to use Generic Netlink can be found here:
 
- * http://www.linuxfoundation.org/collaborate/workgroups/networking/generic_netlink_howto
+ * https://wiki.linuxfoundation.org/networking/generic_netlink_howto
index 922e0b5..66b07f1 100644 (file)
@@ -51,7 +51,7 @@ the Technical Advisory Board (TAB) or other maintainers if you're
 uncertain how to handle situations that come up.  It will not be
 considered a violation report unless you want it to be.  If you are
 uncertain about approaching the TAB or any other maintainers, please
-reach out to our conflict mediator, Joanna Lee <joanna.lee@gesmer.com>.
+reach out to our conflict mediator, Joanna Lee <jlee@linuxfoundation.org>.
 
 In the end, "be kind to each other" is really what the end goal is for
 everybody.  We know everyone is human and we all fail at times, but the
index 128878f..f3ec25b 100644 (file)
@@ -70,8 +70,8 @@ LA64中每个寄存器为64位宽。 ``$r0`` 的内容总是固定为0,而其
 ================= ================== =================== ==========
 
 .. note::
-    注意:在一些遗留代码中有时可能见到 ``$v0`` 和 ``$v1`` ,它们是
-    ``$a0`` 和 ``$a1`` 的别名,属于已经废弃的用法。
+    注意:在一些遗留代码中有时可能见到 ``$fv0`` 和 ``$fv1`` ,它们是
+    ``$fa0`` 和 ``$fa1`` 的别名,属于已经废弃的用法。
 
 
 向量寄存器
index 256f039..69565ac 100644 (file)
@@ -2197,7 +2197,7 @@ M:        Wei Xu <xuwei5@hisilicon.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 W:     http://www.hisilicon.com
-T:     git git://github.com/hisilicon/linux-hisi.git
+T:     git https://github.com/hisilicon/linux-hisi.git
 F:     arch/arm/boot/dts/hi3*
 F:     arch/arm/boot/dts/hip*
 F:     arch/arm/boot/dts/hisi*
@@ -4809,7 +4809,7 @@ R:        Jeff Layton <jlayton@kernel.org>
 L:     ceph-devel@vger.kernel.org
 S:     Supported
 W:     http://ceph.com/
-T:     git git://github.com/ceph/ceph-client.git
+T:     git https://github.com/ceph/ceph-client.git
 F:     include/linux/ceph/
 F:     include/linux/crush/
 F:     net/ceph/
@@ -4821,7 +4821,7 @@ R:        Jeff Layton <jlayton@kernel.org>
 L:     ceph-devel@vger.kernel.org
 S:     Supported
 W:     http://ceph.com/
-T:     git git://github.com/ceph/ceph-client.git
+T:     git https://github.com/ceph/ceph-client.git
 F:     Documentation/filesystems/ceph.rst
 F:     fs/ceph/
 
@@ -4911,7 +4911,7 @@ F:        drivers/platform/chrome/
 
 CHROMEOS EC CODEC DRIVER
 M:     Cheng-Yi Chiang <cychiang@chromium.org>
-M:     Tzung-Bi Shih <tzungbi@google.com>
+M:     Tzung-Bi Shih <tzungbi@kernel.org>
 R:     Guenter Roeck <groeck@chromium.org>
 L:     chrome-platform@lists.linux.dev
 S:     Maintained
@@ -10287,7 +10287,7 @@ T:      git https://github.com/intel/gvt-linux.git
 F:     drivers/gpu/drm/i915/gvt/
 
 INTEL HID EVENT DRIVER
-M:     Alex Hung <alex.hung@canonical.com>
+M:     Alex Hung <alexhung@gmail.com>
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/x86/intel/hid.c
@@ -11035,6 +11035,7 @@ KCONFIG
 M:     Masahiro Yamada <masahiroy@kernel.org>
 L:     linux-kbuild@vger.kernel.org
 S:     Maintained
+Q:     https://patchwork.kernel.org/project/linux-kbuild/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git kconfig
 F:     Documentation/kbuild/kconfig*
 F:     scripts/Kconfig.include
@@ -11092,10 +11093,12 @@ F:    fs/autofs/
 
 KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
 M:     Masahiro Yamada <masahiroy@kernel.org>
-M:     Michal Marek <michal.lkml@markovi.net>
+R:     Nathan Chancellor <nathan@kernel.org>
 R:     Nick Desaulniers <ndesaulniers@google.com>
+R:     Nicolas Schier <nicolas@fjasle.eu>
 L:     linux-kbuild@vger.kernel.org
 S:     Maintained
+Q:     https://patchwork.kernel.org/project/linux-kbuild/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git
 F:     Documentation/kbuild/
 F:     Makefile
@@ -13625,6 +13628,12 @@ S:     Supported
 F:     drivers/misc/atmel-ssc.c
 F:     include/linux/atmel-ssc.h
 
+MICROCHIP SOC DRIVERS
+M:     Conor Dooley <conor@kernel.org>
+S:     Supported
+T:     git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
+F:     drivers/soc/microchip/
+
 MICROCHIP USB251XB DRIVER
 M:     Richard Leitner <richard.leitner@skidata.com>
 L:     linux-usb@vger.kernel.org
@@ -15943,6 +15952,7 @@ Q:      https://patchwork.kernel.org/project/linux-pci/list/
 B:     https://bugzilla.kernel.org
 C:     irc://irc.oftc.net/linux-pci
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git
+F:     Documentation/devicetree/bindings/pci/
 F:     drivers/pci/controller/
 F:     drivers/pci/pci-bridge-emul.c
 F:     drivers/pci/pci-bridge-emul.h
@@ -16049,7 +16059,7 @@ F:      Documentation/devicetree/bindings/pci/microchip*
 F:     drivers/pci/controller/*microchip*
 
 PCIE DRIVER FOR QUALCOMM MSM
-M:     Stanimir Varbanov <svarbanov@mm-sol.com>
+M:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
 L:     linux-pci@vger.kernel.org
 L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
@@ -17222,7 +17232,7 @@ R:      Dongsheng Yang <dongsheng.yang@easystack.cn>
 L:     ceph-devel@vger.kernel.org
 S:     Supported
 W:     http://ceph.com/
-T:     git git://github.com/ceph/ceph-client.git
+T:     git https://github.com/ceph/ceph-client.git
 F:     Documentation/ABI/testing/sysfs-bus-rbd
 F:     drivers/block/rbd.c
 F:     drivers/block/rbd_types.h
@@ -17723,7 +17733,7 @@ F:      arch/riscv/
 N:     riscv
 K:     riscv
 
-RISC-V/MICROCHIP POLARFIRE SOC SUPPORT
+RISC-V MICROCHIP FPGA SUPPORT
 M:     Conor Dooley <conor.dooley@microchip.com>
 M:     Daire McNamara <daire.mcnamara@microchip.com>
 L:     linux-riscv@lists.infradead.org
@@ -17741,17 +17751,26 @@ F:    Documentation/devicetree/bindings/usb/microchip,mpfs-musb.yaml
 F:     arch/riscv/boot/dts/microchip/
 F:     drivers/char/hw_random/mpfs-rng.c
 F:     drivers/clk/microchip/clk-mpfs.c
-F:     drivers/i2c/busses/i2c-microchip-core.c
+F:     drivers/i2c/busses/i2c-microchip-corei2c.c
 F:     drivers/mailbox/mailbox-mpfs.c
 F:     drivers/pci/controller/pcie-microchip-host.c
 F:     drivers/reset/reset-mpfs.c
 F:     drivers/rtc/rtc-mpfs.c
-F:     drivers/soc/microchip/
+F:     drivers/soc/microchip/mpfs-sys-controller.c
 F:     drivers/spi/spi-microchip-core-qspi.c
 F:     drivers/spi/spi-microchip-core.c
 F:     drivers/usb/musb/mpfs.c
 F:     include/soc/microchip/mpfs.h
 
+RISC-V MISC SOC SUPPORT
+M:     Conor Dooley <conor@kernel.org>
+L:     linux-riscv@lists.infradead.org
+S:     Maintained
+Q:     https://patchwork.kernel.org/project/linux-riscv/list/
+T:     git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
+F:     Documentation/devicetree/bindings/riscv/
+F:     arch/riscv/boot/dts/
+
 RNBD BLOCK DRIVERS
 M:     Md. Haris Iqbal <haris.iqbal@ionos.com>
 M:     Jack Wang <jinpu.wang@ionos.com>
@@ -17992,7 +18011,7 @@ L:      linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/video/fbdev/savage/
 
-S390
+S390 ARCHITECTURE
 M:     Heiko Carstens <hca@linux.ibm.com>
 M:     Vasily Gorbik <gor@linux.ibm.com>
 M:     Alexander Gordeev <agordeev@linux.ibm.com>
@@ -18047,6 +18066,15 @@ L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/s390/net/
 
+S390 MM
+M:     Alexander Gordeev <agordeev@linux.ibm.com>
+M:     Gerald Schaefer <gerald.schaefer@linux.ibm.com>
+L:     linux-s390@vger.kernel.org
+S:     Supported
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git
+F:     arch/s390/include/asm/pgtable.h
+F:     arch/s390/mm
+
 S390 PCI SUBSYSTEM
 M:     Niklas Schnelle <schnelle@linux.ibm.com>
 M:     Gerald Schaefer <gerald.schaefer@linux.ibm.com>
@@ -18778,7 +18806,6 @@ M:      Palmer Dabbelt <palmer@dabbelt.com>
 M:     Paul Walmsley <paul.walmsley@sifive.com>
 L:     linux-riscv@lists.infradead.org
 S:     Supported
-T:     git https://github.com/sifive/riscv-linux.git
 N:     sifive
 K:     [^@]sifive
 
@@ -18797,6 +18824,13 @@ S:     Maintained
 F:     Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
 F:     drivers/dma/sf-pdma/
 
+SIFIVE SOC DRIVERS
+M:     Conor Dooley <conor@kernel.org>
+L:     linux-riscv@lists.infradead.org
+S:     Maintained
+T:     git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
+F:     drivers/soc/sifive/
+
 SILEAD TOUCHSCREEN DRIVER
 M:     Hans de Goede <hdegoede@redhat.com>
 L:     linux-input@vger.kernel.org
@@ -19598,6 +19632,11 @@ M:     Ion Badulescu <ionut@badula.org>
 S:     Odd Fixes
 F:     drivers/net/ethernet/adaptec/starfire*
 
+STARFIVE DEVICETREES
+M:     Emil Renner Berthing <kernel@esmil.dk>
+S:     Maintained
+F:     arch/riscv/boot/dts/starfive/
+
 STARFIVE JH7100 CLOCK DRIVERS
 M:     Emil Renner Berthing <kernel@esmil.dk>
 S:     Maintained
index 58cd4f5..6f846b1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 1
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
index dae4480..9474974 100644 (file)
        compatible = "phytec,am335x-pcm-953", "phytec,am335x-phycore-som", "ti,am33xx";
 
        /* Power */
-       regulators {
-               vcc3v3: fixedregulator@1 {
-                       compatible = "regulator-fixed";
-                       regulator-name = "vcc3v3";
-                       regulator-min-microvolt = <3300000>;
-                       regulator-max-microvolt = <3300000>;
-                       regulator-boot-on;
-               };
+       vcc3v3: fixedregulator1 {
+               compatible = "regulator-fixed";
+               regulator-name = "vcc3v3";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               regulator-boot-on;
+       };
 
-               vcc1v8: fixedregulator@2 {
-                       compatible = "regulator-fixed";
-                       regulator-name = "vcc1v8";
-                       regulator-min-microvolt = <1800000>;
-                       regulator-max-microvolt = <1800000>;
-                       regulator-boot-on;
-               };
+       vcc1v8: fixedregulator2 {
+               compatible = "regulator-fixed";
+               regulator-name = "vcc1v8";
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+               regulator-boot-on;
        };
 
        /* User IO */
index 60d6129..024af2d 100644 (file)
 
                                };
 
+                               usb1 {
+                                       pinctrl_usb1_vbus_gpio: usb1_vbus_gpio {
+                                               atmel,pins =
+                                                       <AT91_PIOC 5 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>;   /* PC5 GPIO */
+                                       };
+                               };
+
                                mmc0_slot1 {
                                        pinctrl_board_mmc0_slot1: mmc0_slot1-board {
                                                atmel,pins =
@@ -84,6 +91,8 @@
                        };
 
                        usb1: gadget@fffa4000 {
+                               pinctrl-0 = <&pinctrl_usb1_vbus_gpio>;
+                               pinctrl-names = "default";
                                atmel,vbus-gpio = <&pioC 5 GPIO_ACTIVE_HIGH>;
                                status = "okay";
                        };
index b4605ed..d8fa83e 100644 (file)
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_wifi>;
                interrupts-extended = <&gpio1 30 IRQ_TYPE_LEVEL_HIGH>;
-               ref-clock-frequency = "38400000";
-               tcxo-clock-frequency = "19200000";
+               ref-clock-frequency = <38400000>;
+               tcxo-clock-frequency = <19200000>;
        };
 };
 
index 0fc9e6b..03d2e85 100644 (file)
                        clocks = <&clks IMX7D_NAND_USDHC_BUS_RAWNAND_CLK>;
                };
 
-               gpmi: nand-controller@33002000{
+               gpmi: nand-controller@33002000 {
                        compatible = "fsl,imx7d-gpmi-nand";
                        #address-cells = <1>;
-                       #size-cells = <1>;
+                       #size-cells = <0>;
                        reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
                        reg-names = "gpmi-nand", "bch";
                        interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
index f4f054c..3a3d76a 100644 (file)
                pins = "GPIO_35", "GPIO_36";
                function = "can0_b";
        };
+
+       sgpio_a_pins: sgpio-a-pins {
+               /* SCK, D0, D1, LD */
+               pins = "GPIO_32", "GPIO_33", "GPIO_34", "GPIO_35";
+               function = "sgpio_a";
+       };
 };
 
 &can0 {
        status = "okay";
 };
 
+&sgpio {
+       pinctrl-0 = <&sgpio_a_pins>;
+       pinctrl-names = "default";
+       microchip,sgpio-port-ranges = <0 3>, <8 11>;
+       status = "okay";
+
+       gpio@0 {
+               ngpios = <64>;
+       };
+       gpio@1 {
+               ngpios = <64>;
+       };
+};
+
 &switch {
        status = "okay";
 };
index 9fd4d9d..becdc0b 100644 (file)
 &i2c1 {
        status = "okay";
 
-       hym8563: hym8563@51 {
+       hym8563: rtc@51 {
                compatible = "haoyu,hym8563";
                reg = <0x51>;
                #clock-cells = <0>;
-               clock-frequency = <32768>;
                clock-output-names = "xin32k";
        };
 };
index cfa318a..2db5ba7 100644 (file)
@@ -32,7 +32,7 @@
                keyup-threshold-microvolt = <2500000>;
                poll-interval = <100>;
 
-               recovery {
+               button-recovery {
                        label = "recovery";
                        linux,code = <KEY_VENDOR>;
                        press-threshold-microvolt = <0>;
index e7cf188..118deac 100644 (file)
@@ -71,7 +71,7 @@
                #sound-dai-cells = <0>;
        };
 
-       ir_recv: gpio-ir-receiver {
+       ir_recv: ir-receiver {
                compatible = "gpio-ir-receiver";
                gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
                pinctrl-names = "default";
index cdd4a0b..44b54af 100644 (file)
                                rockchip,pins = <2 RK_PD3 1 &pcfg_pull_none>;
                        };
 
-                       lcdc1_rgb24: ldcd1-rgb24 {
+                       lcdc1_rgb24: lcdc1-rgb24 {
                                rockchip,pins = <2 RK_PA0 1 &pcfg_pull_none>,
                                                <2 RK_PA1 1 &pcfg_pull_none>,
                                                <2 RK_PA2 1 &pcfg_pull_none>,
 
 &global_timer {
        interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
-       status = "disabled";
 };
 
 &local_timer {
index be695b8..8a635c2 100644 (file)
@@ -54,7 +54,7 @@
                vin-supply = <&vcc_sys>;
        };
 
-       hym8563@51 {
+       rtc@51 {
                compatible = "haoyu,hym8563";
                reg = <0x51>;
 
index 399d6b9..382d283 100644 (file)
                        press-threshold-microvolt = <300000>;
                };
 
-               menu {
+               button-menu {
                        label = "Menu";
                        linux,code = <KEY_MENU>;
                        press-threshold-microvolt = <640000>;
                };
 
-               esc {
+               button-esc {
                        label = "Esc";
                        linux,code = <KEY_ESC>;
                        press-threshold-microvolt = <1000000>;
                };
 
-               home  {
+               button-home  {
                        label = "Home";
                        linux,code = <KEY_HOME>;
                        press-threshold-microvolt = <1300000>;
index 052afe5..3836c61 100644 (file)
                vin-supply = <&vcc_sys>;
        };
 
-       hym8563: hym8563@51 {
+       hym8563: rtc@51 {
                compatible = "haoyu,hym8563";
                reg = <0x51>;
                #clock-cells = <0>;
-               clock-frequency = <32768>;
                clock-output-names = "xin32k";
                interrupt-parent = <&gpio7>;
                interrupts = <RK_PA4 IRQ_TYPE_EDGE_FALLING>;
index 713f55e..db1eb64 100644 (file)
                vin-supply = <&vcc_sys>;
        };
 
-       hym8563: hym8563@51 {
+       hym8563: rtc@51 {
                compatible = "haoyu,hym8563";
                reg = <0x51>;
                #clock-cells = <0>;
-               clock-frequency = <32768>;
                clock-output-names = "xin32k";
        };
 
index 80e0f07..13cfdaa 100644 (file)
 };
 
 &i2c0 {
-       hym8563: hym8563@51 {
+       hym8563: rtc@51 {
                compatible = "haoyu,hym8563";
                reg = <0x51>;
                #clock-cells = <0>;
-               clock-frequency = <32768>;
                clock-output-names = "xin32k";
                interrupt-parent = <&gpio0>;
                interrupts = <RK_PA4 IRQ_TYPE_EDGE_FALLING>;
index 0ae2bd1..7939516 100644 (file)
                interrupt-parent = <&gpio5>;
                interrupts = <RK_PC3 IRQ_TYPE_LEVEL_LOW>;
                #clock-cells = <0>;
-               clock-frequency = <32768>;
                clock-output-names = "hym8563";
                pinctrl-names = "default";
                pinctrl-0 = <&hym8563_int>;
index bf28509..cb4e42e 100644 (file)
                reg = <0x1013c200 0x20>;
                interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>;
                clocks = <&cru CORE_PERI>;
+               status = "disabled";
+               /* The clock source and the sched_clock provided by the arm_global_timer
+                * on Rockchip rk3066a/rk3188 are quite unstable because their rates
+                * depend on the CPU frequency.
+                * Keep the arm_global_timer disabled in order to have the
+                * DW_APB_TIMER (rk3066a) or ROCKCHIP_TIMER (rk3188) selected by default.
+                */
        };
 
        local_timer: local-timer@1013c600 {
index 4eb3044..6e87f0d 100644 (file)
 #define PIN_PB2__FLEXCOM6_IO0          PINMUX_PIN(PIN_PB2, 2, 1)
 #define PIN_PB2__ADTRG                 PINMUX_PIN(PIN_PB2, 3, 1)
 #define PIN_PB2__A20                   PINMUX_PIN(PIN_PB2, 4, 1)
-#define PIN_PB2__FLEXCOM11_IO0         PINMUX_PIN(PIN_PB2, 6, 3)
+#define PIN_PB2__FLEXCOM11_IO1         PINMUX_PIN(PIN_PB2, 6, 3)
 #define PIN_PB3                                35
 #define PIN_PB3__GPIO                  PINMUX_PIN(PIN_PB3, 0, 0)
 #define PIN_PB3__RF1                   PINMUX_PIN(PIN_PB3, 1, 1)
index fe87397..bdbc1e5 100644 (file)
@@ -17,7 +17,7 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
 
 #define perf_arch_fetch_caller_regs(regs, __ip) { \
        (regs)->ARM_pc = (__ip); \
-       (regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \
+       frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \
        (regs)->ARM_sp = current_stack_pointer; \
        (regs)->ARM_cpsr = SVC_MODE; \
 }
index d16aba4..0900113 100644 (file)
 typedef pte_t *pte_addr_t;
 
 /*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-#define ZERO_PAGE(vaddr)       (virt_to_page(0))
-
-/*
  * Mark the prot value as uncacheable and unbufferable.
  */
 #define pgprot_noncached(prot) (prot)
index 78a5320..ef48a55 100644 (file)
 #include <linux/const.h>
 #include <asm/proc-fns.h>
 
+#ifndef __ASSEMBLY__
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern struct page *empty_zero_page;
+#define ZERO_PAGE(vaddr)       (empty_zero_page)
+#endif
+
 #ifndef CONFIG_MMU
 
 #include <asm-generic/pgtable-nopud.h>
@@ -139,13 +148,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  */
 
 #ifndef __ASSEMBLY__
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern struct page *empty_zero_page;
-#define ZERO_PAGE(vaddr)       (empty_zero_page)
-
 
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 
index ffed4d9..e4904fa 100644 (file)
@@ -169,10 +169,15 @@ sr_ena_2:
        cmp     tmp1, #UDDRC_STAT_SELFREF_TYPE_SW
        bne     sr_ena_2
 
-       /* Put DDR PHY's DLL in bypass mode for non-backup modes. */
+       /* Disable DX DLLs for non-backup modes. */
        cmp     r7, #AT91_PM_BACKUP
        beq     sr_ena_3
 
+       /* Do not soft reset the AC DLL. */
+       ldr     tmp1, [r3, DDR3PHY_ACDLLCR]
+       bic     tmp1, tmp1, DDR3PHY_ACDLLCR_DLLSRST
+       str     tmp1, [r3, DDR3PHY_ACDLLCR]
+
        /* Disable DX DLLs. */
        ldr     tmp1, [r3, #DDR3PHY_DX0DLLCR]
        orr     tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS
index 25c9d18..1c57ac4 100644 (file)
@@ -393,8 +393,10 @@ static void __init mxs_machine_init(void)
 
        root = of_find_node_by_path("/");
        ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
-       if (ret)
+       if (ret) {
+               kfree(soc_dev_attr);
                return;
+       }
 
        soc_dev_attr->family = "Freescale MXS Family";
        soc_dev_attr->soc_id = mxs_get_soc_id();
index c42deba..c1494a4 100644 (file)
 
 unsigned long vectors_base;
 
+/*
+ * empty_zero_page is a special page that is used for
+ * zero-initialized data and COW.
+ */
+struct page *empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
+
 #ifdef CONFIG_ARM_MPU
 struct mpu_rgn_info mpu_rgn_info;
 #endif
@@ -148,9 +155,21 @@ void __init adjust_lowmem_bounds(void)
  */
 void __init paging_init(const struct machine_desc *mdesc)
 {
+       void *zero_page;
+
        early_trap_init((void *)vectors_base);
        mpu_setup();
+
+       /* allocate the zero page. */
+       zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+       if (!zero_page)
+               panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+                     __func__, PAGE_SIZE, PAGE_SIZE);
+
        bootmem_init();
+
+       empty_zero_page = virt_to_page(zero_page);
+       flush_dcache_page(empty_zero_page);
 }
 
 /*
index 53f6660..ca1d287 100644 (file)
                        clocks = <&ccu CLK_BUS_VP9>, <&ccu CLK_VP9>;
                        clock-names = "bus", "mod";
                        resets = <&ccu RST_BUS_VP9>;
+                       iommus = <&iommu 5>;
                };
 
                video-codec@1c0e000 {
index 7e0aeb2..a0aeac6 100644 (file)
                off-on-delay-us = <12000>;
        };
 
-       extcon_usbotg1: extcon-usbotg1 {
-               compatible = "linux,extcon-usb-gpio";
+       connector {
+               compatible = "gpio-usb-b-connector", "usb-b-connector";
+               type = "micro";
+               label = "X19";
                pinctrl-names = "default";
-               pinctrl-0 = <&pinctrl_usb1_extcon>;
-               id-gpio = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+               pinctrl-0 = <&pinctrl_usb1_connector>;
+               id-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               usb_dr_connector: endpoint {
+                                       remote-endpoint = <&usb1_drd_sw>;
+                               };
+                       };
+               };
        };
 };
 
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usbotg1>;
        dr_mode = "otg";
-       extcon = <&extcon_usbotg1>;
        srp-disable;
        hnp-disable;
        adp-disable;
        power-active-high;
        over-current-active-low;
+       usb-role-switch;
        status = "okay";
+
+       port {
+               usb1_drd_sw: endpoint {
+                       remote-endpoint = <&usb_dr_connector>;
+               };
+       };
 };
 
 &usbotg2 {
                           <MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC         0x84>;
        };
 
-       pinctrl_usb1_extcon: usb1-extcongrp {
+       pinctrl_usb1_connector: usb1-connectorgrp {
                fsl,pins = <MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10          0x1c0>;
        };
 
index dabd94d..50ef929 100644 (file)
                        clocks = <&clk IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK>;
                };
 
-               gpmi: nand-controller@33002000{
+               gpmi: nand-controller@33002000 {
                        compatible = "fsl,imx8mm-gpmi-nand", "fsl,imx7d-gpmi-nand";
                        #address-cells = <1>;
-                       #size-cells = <1>;
+                       #size-cells = <0>;
                        reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
                        reg-names = "gpmi-nand", "bch";
                        interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
index ad0b99a..67b554b 100644 (file)
                gpmi: nand-controller@33002000 {
                        compatible = "fsl,imx8mn-gpmi-nand", "fsl,imx7d-gpmi-nand";
                        #address-cells = <1>;
-                       #size-cells = <1>;
+                       #size-cells = <0>;
                        reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
                        reg-names = "gpmi-nand", "bch";
                        interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
index 9f1469d..b4c1ef2 100644 (file)
 
        pinctrl_pcie0: pcie0grp {
                fsl,pins = <
-                       MX8MP_IOMUXC_I2C4_SCL__PCIE_CLKREQ_B    0x61 /* open drain, pull up */
-                       MX8MP_IOMUXC_SD1_DATA5__GPIO2_IO07      0x41
+                       MX8MP_IOMUXC_I2C4_SCL__PCIE_CLKREQ_B    0x60 /* open drain, pull up */
+                       MX8MP_IOMUXC_SD1_DATA5__GPIO2_IO07      0x40
                >;
        };
 
        pinctrl_pcie0_reg: pcie0reggrp {
                fsl,pins = <
-                       MX8MP_IOMUXC_SD1_DATA4__GPIO2_IO06      0x41
+                       MX8MP_IOMUXC_SD1_DATA4__GPIO2_IO06      0x40
                >;
        };
 
old mode 100755 (executable)
new mode 100644 (file)
index a47acf9..a721cdd 100644 (file)
 
                apcs_glb: mailbox@b111000 {
                        compatible = "qcom,ipq8074-apcs-apps-global";
-                       reg = <0x0b111000 0x6000>;
+                       reg = <0x0b111000 0x1000>;
 
                        #clock-cells = <1>;
                        #mbox-cells = <1>;
index c0a2baf..aba7176 100644 (file)
                };
 
                saw3: syscon@9a10000 {
-                       compatible = "qcom,tcsr-msm8996", "syscon";
+                       compatible = "syscon";
                        reg = <0x09a10000 0x1000>;
                };
 
index 87ab0e1..4dee790 100644 (file)
@@ -43,7 +43,6 @@
 
                regulator-always-on;
                regulator-boot-on;
-               regulator-allow-set-load;
 
                vin-supply = <&vreg_3p3>;
        };
                        regulator-max-microvolt = <880000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
 
                vreg_l7a_1p8: ldo7 {
                        regulator-max-microvolt = <2960000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
 
                vreg_l11a_0p8: ldo11 {
                        regulator-max-microvolt = <1200000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
 
                vreg_l7c_1p8: ldo7 {
                        regulator-max-microvolt = <1200000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
 
                vreg_l10c_3p3: ldo10 {
index b608b82..2c62ba6 100644 (file)
@@ -83,6 +83,9 @@
                        regulator-max-microvolt = <1200000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
 
                vreg_l4c: ldo4 {
                        regulator-max-microvolt = <1200000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
 
                vreg_l7c: ldo7 {
                        regulator-max-microvolt = <2504000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
 
                vreg_l17c: ldo17 {
                        regulator-max-microvolt = <2504000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
        };
 
index 2125803..4cdc88d 100644 (file)
 
                lpass_audiocc: clock-controller@3300000 {
                        compatible = "qcom,sc7280-lpassaudiocc";
-                       reg = <0 0x03300000 0 0x30000>;
+                       reg = <0 0x03300000 0 0x30000>,
+                             <0 0x032a9000 0 0x1000>;
                        clocks = <&rpmhcc RPMH_CXO_CLK>,
                               <&lpass_aon LPASS_AON_CC_MAIN_RCG_CLK_SRC>;
                        clock-names = "bi_tcxo", "lpass_aon_cc_main_rcg_clk_src";
index fea7d82..5e30349 100644 (file)
                        regulator-max-microvolt = <2504000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
 
                vreg_l13c: ldo13 {
                        regulator-max-microvolt = <1200000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
 
                vreg_l4d: ldo4 {
index c32bcde..212d63d 100644 (file)
 
                ufs_mem_phy: phy@1d87000 {
                        compatible = "qcom,sc8280xp-qmp-ufs-phy";
-                       reg = <0 0x01d87000 0 0xe10>;
+                       reg = <0 0x01d87000 0 0x1c8>;
                        #address-cells = <2>;
                        #size-cells = <2>;
                        ranges;
                        clock-names = "ref",
                                      "ref_aux";
-                       clocks = <&rpmhcc RPMH_CXO_CLK>,
+                       clocks = <&gcc GCC_UFS_REF_CLKREF_CLK>,
                                 <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
 
                        resets = <&ufs_mem_hc 0>;
 
                ufs_card_phy: phy@1da7000 {
                        compatible = "qcom,sc8280xp-qmp-ufs-phy";
-                       reg = <0 0x01da7000 0 0xe10>;
+                       reg = <0 0x01da7000 0 0x1c8>;
                        #address-cells = <2>;
                        #size-cells = <2>;
                        ranges;
                        clock-names = "ref",
                                      "ref_aux";
-                       clocks = <&gcc GCC_UFS_1_CARD_CLKREF_CLK>,
+                       clocks = <&gcc GCC_UFS_REF_CLKREF_CLK>,
                                 <&gcc GCC_UFS_CARD_PHY_AUX_CLK>;
 
                        resets = <&ufs_card_hc 0>;
                        usb_0_ssphy: usb3-phy@88eb400 {
                                reg = <0 0x088eb400 0 0x100>,
                                      <0 0x088eb600 0 0x3ec>,
-                                     <0 0x088ec400 0 0x1f0>,
+                                     <0 0x088ec400 0 0x364>,
                                      <0 0x088eba00 0 0x100>,
                                      <0 0x088ebc00 0 0x3ec>,
-                                     <0 0x088ec700 0 0x64>;
+                                     <0 0x088ec200 0 0x18>;
                                #phy-cells = <0>;
                                #clock-cells = <0>;
                                clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
                                clock-names = "pipe0";
                                clock-output-names = "usb0_phy_pipe_clk_src";
                        };
-
-                       usb_0_dpphy: dp-phy@88ed200 {
-                               reg = <0 0x088ed200 0 0x200>,
-                                     <0 0x088ed400 0 0x200>,
-                                     <0 0x088eda00 0 0x200>,
-                                     <0 0x088ea600 0 0x200>,
-                                     <0 0x088ea800 0 0x200>;
-                               #clock-cells = <1>;
-                               #phy-cells = <0>;
-                       };
                };
 
                usb_1_hsphy: phy@8902000 {
 
                        usb_1_ssphy: usb3-phy@8903400 {
                                reg = <0 0x08903400 0 0x100>,
-                                     <0 0x08903c00 0 0x3ec>,
-                                     <0 0x08904400 0 0x1f0>,
+                                     <0 0x08903600 0 0x3ec>,
+                                     <0 0x08904400 0 0x364>,
                                      <0 0x08903a00 0 0x100>,
                                      <0 0x08903c00 0 0x3ec>,
                                      <0 0x08904200 0 0x18>;
                                clock-names = "pipe0";
                                clock-output-names = "usb1_phy_pipe_clk_src";
                        };
-
-                       usb_1_dpphy: dp-phy@8904200 {
-                               reg = <0 0x08904200 0 0x200>,
-                                     <0 0x08904400 0 0x200>,
-                                     <0 0x08904a00 0 0x200>,
-                                     <0 0x08904600 0 0x200>,
-                                     <0 0x08904800 0 0x200>;
-                               #clock-cells = <1>;
-                               #phy-cells = <0>;
-                       };
                };
 
                system-cache-controller@9200000 {
index 014fe3a..fb6e5a1 100644 (file)
                        regulator-max-microvolt = <2960000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
 
                vreg_l7c_3p0: ldo7 {
                        regulator-max-microvolt = <2960000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
 
                vreg_l10c_3p3: ldo10 {
index 549e0a2..5428aab 100644 (file)
                        regulator-max-microvolt = <2960000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
 
                vreg_l7c_2p85: ldo7 {
                        regulator-max-microvolt = <2960000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
 
                vreg_l10c_3p3: ldo10 {
index a5b62ca..e276eed 100644 (file)
                                exit-latency-us = <6562>;
                                min-residency-us = <9987>;
                                local-timer-stop;
+                               status = "disabled";
                        };
                };
        };
index 0fcf5bd..69ae650 100644 (file)
                        regulator-max-microvolt = <888000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
 
                vreg_l6b_1p2: ldo6 {
                        regulator-max-microvolt = <1208000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
 
                vreg_l7b_2p96: ldo7 {
                        regulator-max-microvolt = <2504000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
 
                vreg_l9b_1p2: ldo9 {
                        regulator-max-microvolt = <1200000>;
                        regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
                        regulator-allow-set-load;
+                       regulator-allowed-modes =
+                           <RPMH_REGULATOR_MODE_LPM
+                            RPMH_REGULATOR_MODE_HPM>;
                };
        };
 
index 07008d8..c1bbd55 100644 (file)
                keyup-threshold-microvolt = <1800000>;
                poll-interval = <100>;
 
-               esc-key {
+               button-esc {
                        label = "esc";
                        linux,code = <KEY_ESC>;
                        press-threshold-microvolt = <1310000>;
                };
 
-               home-key {
+               button-home {
                        label = "home";
                        linux,code = <KEY_HOME>;
                        press-threshold-microvolt = <624000>;
                };
 
-               menu-key {
+               button-menu {
                        label = "menu";
                        linux,code = <KEY_MENU>;
                        press-threshold-microvolt = <987000>;
                };
 
-               vol-down-key {
+               button-down {
                        label = "volume down";
                        linux,code = <KEY_VOLUMEDOWN>;
                        press-threshold-microvolt = <300000>;
                };
 
-               vol-up-key {
+               button-up {
                        label = "volume up";
                        linux,code = <KEY_VOLUMEUP>;
                        press-threshold-microvolt = <17000>;
index 9fe9b0d..184b84f 100644 (file)
@@ -23,7 +23,7 @@
                poll-interval = <100>;
                keyup-threshold-microvolt = <1800000>;
 
-               func-key {
+               button-func {
                        linux,code = <KEY_FN>;
                        label = "function";
                        press-threshold-microvolt = <18000>;
                poll-interval = <100>;
                keyup-threshold-microvolt = <1800000>;
 
-               esc-key {
+               button-esc {
                        linux,code = <KEY_MICMUTE>;
                        label = "micmute";
                        press-threshold-microvolt = <1130000>;
                };
 
-               home-key {
+               button-home {
                        linux,code = <KEY_MODE>;
                        label = "mode";
                        press-threshold-microvolt = <901000>;
                };
 
-               menu-key {
+               button-menu {
                        linux,code = <KEY_PLAY>;
                        label = "play";
                        press-threshold-microvolt = <624000>;
                };
 
-               vol-down-key {
+               button-down {
                        linux,code = <KEY_VOLUMEDOWN>;
                        label = "volume down";
                        press-threshold-microvolt = <300000>;
                };
 
-               vol-up-key {
+               button-up {
                        linux,code = <KEY_VOLUMEUP>;
                        label = "volume up";
                        press-threshold-microvolt = <18000>;
index ea68209..7ea4816 100644 (file)
@@ -19,7 +19,7 @@
                stdout-path = "serial2:1500000n8";
        };
 
-       ir_rx {
+       ir-receiver {
                compatible = "gpio-ir-receiver";
                gpios = <&gpio0 RK_PC0 GPIO_ACTIVE_HIGH>;
                pinctrl-names = "default";
index 43c928a..1deef53 100644 (file)
@@ -25,7 +25,7 @@
                keyup-threshold-microvolt = <1800000>;
                poll-interval = <100>;
 
-               recovery {
+               button-recovery {
                        label = "recovery";
                        linux,code = <KEY_VENDOR>;
                        press-threshold-microvolt = <17000>;
index 7f5bba0..81d1064 100644 (file)
                vin-supply = <&vcc_sys>;
        };
 
-       hym8563: hym8563@51 {
+       hym8563: rtc@51 {
                compatible = "haoyu,hym8563";
                reg = <0x51>;
                #clock-cells = <0>;
-               clock-frequency = <32768>;
                clock-output-names = "xin32k";
                /* rtc_int is not connected */
        };
index 38d757c..5589f3d 100644 (file)
                vin-supply = <&vcc_sys>;
        };
 
-       hym8563: hym8563@51 {
+       hym8563: rtc@51 {
                compatible = "haoyu,hym8563";
                reg = <0x51>;
                #clock-cells = <0>;
-               clock-frequency = <32768>;
                clock-output-names = "xin32k";
                /* rtc_int is not connected */
        };
index ed3348b..a47d9f7 100644 (file)
@@ -734,10 +734,6 @@ camera: &i2c7 {
 };
 
 /* PINCTRL OVERRIDES */
-&ec_ap_int_l {
-       rockchip,pins = <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_up>;
-};
-
 &ap_fw_wp {
        rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
 };
index 2a33276..9d9297b 100644 (file)
                keyup-threshold-microvolt = <1800000>;
                poll-interval = <100>;
 
-               recovery {
+               button-recovery {
                        label = "Recovery";
                        linux,code = <KEY_VENDOR>;
                        press-threshold-microvolt = <18000>;
index 452728b..3bf8f95 100644 (file)
@@ -39,7 +39,7 @@
                keyup-threshold-microvolt = <1800000>;
                poll-interval = <100>;
 
-               recovery {
+               button-recovery {
                        label = "Recovery";
                        linux,code = <KEY_VENDOR>;
                        press-threshold-microvolt = <18000>;
index 72182c5..65cb218 100644 (file)
@@ -19,7 +19,7 @@
                keyup-threshold-microvolt = <1500000>;
                poll-interval = <100>;
 
-               recovery {
+               button-recovery {
                        label = "Recovery";
                        linux,code = <KEY_VENDOR>;
                        press-threshold-microvolt = <18000>;
index 278123b..b6e082f 100644 (file)
 };
 
 &emmc_phy {
+       rockchip,enable-strobe-pulldown;
        status = "okay";
 };
 
index 9e2e246..dba4d03 100644 (file)
                        press-threshold-microvolt = <300000>;
                };
 
-               back {
+               button-back {
                        label = "Back";
                        linux,code = <KEY_BACK>;
                        press-threshold-microvolt = <985000>;
                };
 
-               menu {
+               button-menu {
                        label = "Menu";
                        linux,code = <KEY_MENU>;
                        press-threshold-microvolt = <1314000>;
index 04c752f..115c14c 100644 (file)
        cap-sd-highspeed;
        cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
        disable-wp;
-       max-frequency = <150000000>;
+       max-frequency = <40000000>;
        pinctrl-names = "default";
        pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
        vmmc-supply = <&vcc3v3_baseboard>;
index 5a2661a..7ba1c28 100644 (file)
 };
 
 &i2c0 {
-       hym8563: hym8563@51 {
+       hym8563: rtc@51 {
                compatible = "haoyu,hym8563";
                reg = <0x51>;
                interrupt-parent = <&gpio0>;
                interrupts = <RK_PA5 IRQ_TYPE_EDGE_FALLING>;
                #clock-cells = <0>;
-               clock-frequency = <32768>;
                clock-output-names = "xin32k";
                pinctrl-names = "default";
                pinctrl-0 = <&hym8563_int>;
index 2f4b1b2..bbf1e3f 100644 (file)
@@ -41,7 +41,7 @@
                keyup-threshold-microvolt = <1500000>;
                poll-interval = <100>;
 
-               recovery {
+               button-recovery {
                        label = "Recovery";
                        linux,code = <KEY_VENDOR>;
                        press-threshold-microvolt = <18000>;
index 645ced6..1f76d35 100644 (file)
 &i2s1 {
        rockchip,playback-channels = <2>;
        rockchip,capture-channels = <2>;
-       status = "okay";
 };
 
 &i2s2 {
index 13927e7..dbec2b7 100644 (file)
                        press-threshold-microvolt = <300000>;
                };
 
-               back {
+               button-back {
                        label = "Back";
                        linux,code = <KEY_BACK>;
                        press-threshold-microvolt = <985000>;
                };
 
-               menu {
+               button-menu {
                        label = "Menu";
                        linux,code = <KEY_MENU>;
                        press-threshold-microvolt = <1314000>;
index 935b8c6..bf9eb04 100644 (file)
        clock-frequency = <400000>;
        status = "okay";
 
-       hym8563: hym8563@51 {
+       hym8563: rtc@51 {
                compatible = "haoyu,hym8563";
                reg = <0x51>;
                #clock-cells = <0>;
-               clock-frequency = <32768>;
                clock-output-names = "hym8563";
                pinctrl-names = "default";
                pinctrl-0 = <&hym8563_int>;
index 0d45868..8d61f82 100644 (file)
@@ -23,7 +23,7 @@
                io-channel-names = "buttons";
                keyup-threshold-microvolt = <1750000>;
 
-               recovery {
+               button-recovery {
                        label = "recovery";
                        linux,code = <KEY_VENDOR>;
                        press-threshold-microvolt = <0>;
index a05460b..25a8c78 100644 (file)
 
 &uart1 {
        pinctrl-names = "default";
-       pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn>;
+       pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn &uart1m0_rtsn>;
        status = "okay";
        uart-has-rtscts;
 
                compatible = "brcm,bcm43438-bt";
                clocks = <&rk817 1>;
                clock-names = "lpo";
-               device-wakeup-gpios = <&gpio2 RK_PC1 GPIO_ACTIVE_HIGH>;
-               host-wakeup-gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>;
+               host-wakeup-gpios = <&gpio2 RK_PC1 GPIO_ACTIVE_HIGH>;
+               device-wakeup-gpios = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>;
                shutdown-gpios = <&gpio2 RK_PB7 GPIO_ACTIVE_HIGH>;
                pinctrl-names = "default";
                pinctrl-0 = <&bt_host_wake_l &bt_wake_l &bt_enable_h>;
                vbat-supply = <&vcc_sys>;
                vddio-supply = <&vcca1v8_pmu>;
+               max-speed = <3000000>;
        };
 };
 
index 77b179c..b276eb0 100644 (file)
                compatible = "rockchip,rk809";
                reg = <0x20>;
                interrupt-parent = <&gpio0>;
-               interrupts = <RK_PA7 IRQ_TYPE_LEVEL_LOW>;
+               interrupts = <RK_PA3 IRQ_TYPE_LEVEL_LOW>;
                assigned-clocks = <&cru I2S1_MCLKOUT_TX>;
                assigned-clock-parents = <&cru CLK_I2S1_8CH_TX>;
                clock-names = "mclk";
index dba648c..9fd2623 100644 (file)
        assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru SCLK_GMAC1>;
        assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru SCLK_GMAC1>, <&gmac1_clkin>;
        clock_in_out = "input";
-       phy-mode = "rgmii-id";
+       phy-mode = "rgmii";
        phy-supply = <&vcc_3v3>;
        pinctrl-names = "default";
        pinctrl-0 = <&gmac1m0_miim
 
 &i2c3 {
        pinctrl-names = "default";
-       pinctrl-0 = <&i2c3m1_xfer>;
-       status = "okay";
-};
-
-&i2c5 {
+       pinctrl-0 = <&i2c3m0_xfer>;
        status = "okay";
 };
 
index c282f6e..26d7fda 100644 (file)
                interrupt-parent = <&gpio0>;
                interrupts = <RK_PD3 IRQ_TYPE_EDGE_FALLING>;
                #clock-cells = <0>;
-               clock-frequency = <32768>;
                clock-output-names = "rtcic_32kout";
                pinctrl-names = "default";
                pinctrl-0 = <&hym8563_int>;
index fb87a16..539ef8c 100644 (file)
                interrupt-parent = <&gpio0>;
                interrupts = <RK_PD3 IRQ_TYPE_LEVEL_LOW>;
                #clock-cells = <0>;
-               clock-frequency = <32768>;
                clock-output-names = "rtcic_32kout";
                pinctrl-names = "default";
                pinctrl-0 = <&hym8563_int>;
index 71a1af4..edf6625 100644 (file)
@@ -863,12 +863,12 @@ static inline bool pte_user_accessible_page(pte_t pte)
 
 static inline bool pmd_user_accessible_page(pmd_t pmd)
 {
-       return pmd_present(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
+       return pmd_leaf(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
 }
 
 static inline bool pud_user_accessible_page(pud_t pud)
 {
-       return pud_present(pud) && pud_user(pud);
+       return pud_leaf(pud) && pud_user(pud);
 }
 #endif
 
index 795344a..322a831 100644 (file)
@@ -299,11 +299,11 @@ SYM_TYPED_FUNC_START(ftrace_stub)
        ret
 SYM_FUNC_END(ftrace_stub)
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 SYM_TYPED_FUNC_START(ftrace_stub_graph)
        ret
 SYM_FUNC_END(ftrace_stub_graph)
 
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 /*
  * void return_to_handler(void)
  *
index f4cb54d..01b57b7 100644 (file)
@@ -97,7 +97,7 @@ KBUILD_LDFLAGS        += -m $(ld-emul)
 
 ifdef CONFIG_LOONGARCH
 CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
-       egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
+       grep -E -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
        sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
 endif
 
index d06d454..5332b14 100644 (file)
@@ -117,7 +117,7 @@ extern struct fwnode_handle *liointc_handle;
 extern struct fwnode_handle *pch_lpc_handle;
 extern struct fwnode_handle *pch_pic_handle[MAX_IO_PICS];
 
-extern irqreturn_t loongson3_ipi_interrupt(int irq, void *dev);
+extern irqreturn_t loongson_ipi_interrupt(int irq, void *dev);
 
 #include <asm-generic/irq.h>
 
index 946704b..aa0e0e0 100644 (file)
@@ -349,13 +349,17 @@ static inline pte_t pte_mkclean(pte_t pte)
 
 static inline pte_t pte_mkdirty(pte_t pte)
 {
-       pte_val(pte) |= (_PAGE_DIRTY | _PAGE_MODIFIED);
+       pte_val(pte) |= _PAGE_MODIFIED;
+       if (pte_val(pte) & _PAGE_WRITE)
+               pte_val(pte) |= _PAGE_DIRTY;
        return pte;
 }
 
 static inline pte_t pte_mkwrite(pte_t pte)
 {
-       pte_val(pte) |= (_PAGE_WRITE | _PAGE_DIRTY);
+       pte_val(pte) |= _PAGE_WRITE;
+       if (pte_val(pte) & _PAGE_MODIFIED)
+               pte_val(pte) |= _PAGE_DIRTY;
        return pte;
 }
 
@@ -455,7 +459,9 @@ static inline int pmd_write(pmd_t pmd)
 
 static inline pmd_t pmd_mkwrite(pmd_t pmd)
 {
-       pmd_val(pmd) |= (_PAGE_WRITE | _PAGE_DIRTY);
+       pmd_val(pmd) |= _PAGE_WRITE;
+       if (pmd_val(pmd) & _PAGE_MODIFIED)
+               pmd_val(pmd) |= _PAGE_DIRTY;
        return pmd;
 }
 
@@ -478,7 +484,9 @@ static inline pmd_t pmd_mkclean(pmd_t pmd)
 
 static inline pmd_t pmd_mkdirty(pmd_t pmd)
 {
-       pmd_val(pmd) |= (_PAGE_DIRTY | _PAGE_MODIFIED);
+       pmd_val(pmd) |= _PAGE_MODIFIED;
+       if (pmd_val(pmd) & _PAGE_WRITE)
+               pmd_val(pmd) |= _PAGE_DIRTY;
        return pmd;
 }
 
index 71189b2..3dd172d 100644 (file)
@@ -19,21 +19,21 @@ extern cpumask_t cpu_sibling_map[];
 extern cpumask_t cpu_core_map[];
 extern cpumask_t cpu_foreign_map[];
 
-void loongson3_smp_setup(void);
-void loongson3_prepare_cpus(unsigned int max_cpus);
-void loongson3_boot_secondary(int cpu, struct task_struct *idle);
-void loongson3_init_secondary(void);
-void loongson3_smp_finish(void);
-void loongson3_send_ipi_single(int cpu, unsigned int action);
-void loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action);
+void loongson_smp_setup(void);
+void loongson_prepare_cpus(unsigned int max_cpus);
+void loongson_boot_secondary(int cpu, struct task_struct *idle);
+void loongson_init_secondary(void);
+void loongson_smp_finish(void);
+void loongson_send_ipi_single(int cpu, unsigned int action);
+void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action);
 #ifdef CONFIG_HOTPLUG_CPU
-int loongson3_cpu_disable(void);
-void loongson3_cpu_die(unsigned int cpu);
+int loongson_cpu_disable(void);
+void loongson_cpu_die(unsigned int cpu);
 #endif
 
 static inline void plat_smp_setup(void)
 {
-       loongson3_smp_setup();
+       loongson_smp_setup();
 }
 
 static inline int raw_smp_processor_id(void)
@@ -85,28 +85,28 @@ extern void show_ipi_list(struct seq_file *p, int prec);
  */
 static inline void smp_send_reschedule(int cpu)
 {
-       loongson3_send_ipi_single(cpu, SMP_RESCHEDULE);
+       loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
 }
 
 static inline void arch_send_call_function_single_ipi(int cpu)
 {
-       loongson3_send_ipi_single(cpu, SMP_CALL_FUNCTION);
+       loongson_send_ipi_single(cpu, SMP_CALL_FUNCTION);
 }
 
 static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 {
-       loongson3_send_ipi_mask(mask, SMP_CALL_FUNCTION);
+       loongson_send_ipi_mask(mask, SMP_CALL_FUNCTION);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
 static inline int __cpu_disable(void)
 {
-       return loongson3_cpu_disable();
+       return loongson_cpu_disable();
 }
 
 static inline void __cpu_die(unsigned int cpu)
 {
-       loongson3_cpu_die(cpu);
+       loongson_cpu_die(cpu);
 }
 
 extern void play_dead(void);
index 3353984..8319cc4 100644 (file)
@@ -56,23 +56,6 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
                return ioremap_cache(phys, size);
 }
 
-void __init acpi_boot_table_init(void)
-{
-       /*
-        * If acpi_disabled, bail out
-        */
-       if (acpi_disabled)
-               return;
-
-       /*
-        * Initialize the ACPI boot-time table parser.
-        */
-       if (acpi_table_init()) {
-               disable_acpi();
-               return;
-       }
-}
-
 #ifdef CONFIG_SMP
 static int set_processor_mask(u32 id, u32 flags)
 {
@@ -156,13 +139,21 @@ static void __init acpi_process_madt(void)
        loongson_sysconf.nr_cpus = num_processors;
 }
 
-int __init acpi_boot_init(void)
+void __init acpi_boot_table_init(void)
 {
        /*
         * If acpi_disabled, bail out
         */
        if (acpi_disabled)
-               return -1;
+               return;
+
+       /*
+        * Initialize the ACPI boot-time table parser.
+        */
+       if (acpi_table_init()) {
+               disable_acpi();
+               return;
+       }
 
        loongson_sysconf.boot_cpu_id = read_csr_cpuid();
 
@@ -173,8 +164,6 @@ int __init acpi_boot_init(void)
 
        /* Do not enable ACPI SPCR console by default */
        acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
-
-       return 0;
 }
 
 #ifdef CONFIG_ACPI_NUMA
index 1ba19c7..0524bf1 100644 (file)
@@ -117,7 +117,7 @@ void __init init_IRQ(void)
        if (ipi_irq < 0)
                panic("IPI IRQ mapping failed\n");
        irq_set_percpu_devid(ipi_irq);
-       r = request_percpu_irq(ipi_irq, loongson3_ipi_interrupt, "IPI", &ipi_dummy_dev);
+       r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &ipi_dummy_dev);
        if (r < 0)
                panic("IPI IRQ request failed\n");
 #endif
index 2526b68..ddb8ba4 100644 (file)
@@ -152,7 +152,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
                childregs->csr_crmd = p->thread.csr_crmd;
                childregs->csr_prmd = p->thread.csr_prmd;
                childregs->csr_ecfg = p->thread.csr_ecfg;
-               return 0;
+               goto out;
        }
 
        /* user thread */
@@ -171,14 +171,15 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
         */
        childregs->csr_euen = 0;
 
+       if (clone_flags & CLONE_SETTLS)
+               childregs->regs[2] = tls;
+
+out:
        clear_tsk_thread_flag(p, TIF_USEDFPU);
        clear_tsk_thread_flag(p, TIF_USEDSIMD);
        clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE);
        clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE);
 
-       if (clone_flags & CLONE_SETTLS)
-               childregs->regs[2] = tls;
-
        return 0;
 }
 
index 1eb63fa..ae436de 100644 (file)
@@ -257,7 +257,6 @@ void __init platform_init(void)
 #ifdef CONFIG_ACPI
        acpi_gbl_use_default_register_widths = false;
        acpi_boot_table_init();
-       acpi_boot_init();
 #endif
 
 #ifdef CONFIG_NUMA
index 781a4d4..6ed72f7 100644 (file)
@@ -136,12 +136,12 @@ static void ipi_write_action(int cpu, u32 action)
        }
 }
 
-void loongson3_send_ipi_single(int cpu, unsigned int action)
+void loongson_send_ipi_single(int cpu, unsigned int action)
 {
        ipi_write_action(cpu_logical_map(cpu), (u32)action);
 }
 
-void loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
 {
        unsigned int i;
 
@@ -149,7 +149,7 @@ void loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action)
                ipi_write_action(cpu_logical_map(i), (u32)action);
 }
 
-irqreturn_t loongson3_ipi_interrupt(int irq, void *dev)
+irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
 {
        unsigned int action;
        unsigned int cpu = smp_processor_id();
@@ -169,7 +169,7 @@ irqreturn_t loongson3_ipi_interrupt(int irq, void *dev)
        return IRQ_HANDLED;
 }
 
-void __init loongson3_smp_setup(void)
+void __init loongson_smp_setup(void)
 {
        cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
        cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
@@ -178,7 +178,7 @@ void __init loongson3_smp_setup(void)
        pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
 }
 
-void __init loongson3_prepare_cpus(unsigned int max_cpus)
+void __init loongson_prepare_cpus(unsigned int max_cpus)
 {
        int i = 0;
 
@@ -193,7 +193,7 @@ void __init loongson3_prepare_cpus(unsigned int max_cpus)
 /*
  * Setup the PC, SP, and TP of a secondary processor and start it running!
  */
-void loongson3_boot_secondary(int cpu, struct task_struct *idle)
+void loongson_boot_secondary(int cpu, struct task_struct *idle)
 {
        unsigned long entry;
 
@@ -205,13 +205,13 @@ void loongson3_boot_secondary(int cpu, struct task_struct *idle)
 
        csr_mail_send(entry, cpu_logical_map(cpu), 0);
 
-       loongson3_send_ipi_single(cpu, SMP_BOOT_CPU);
+       loongson_send_ipi_single(cpu, SMP_BOOT_CPU);
 }
 
 /*
  * SMP init and finish on secondary CPUs
  */
-void loongson3_init_secondary(void)
+void loongson_init_secondary(void)
 {
        unsigned int cpu = smp_processor_id();
        unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
@@ -231,7 +231,7 @@ void loongson3_init_secondary(void)
                     cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
 }
 
-void loongson3_smp_finish(void)
+void loongson_smp_finish(void)
 {
        local_irq_enable();
        iocsr_write64(0, LOONGARCH_IOCSR_MBUF0);
@@ -240,7 +240,7 @@ void loongson3_smp_finish(void)
 
 #ifdef CONFIG_HOTPLUG_CPU
 
-int loongson3_cpu_disable(void)
+int loongson_cpu_disable(void)
 {
        unsigned long flags;
        unsigned int cpu = smp_processor_id();
@@ -262,7 +262,7 @@ int loongson3_cpu_disable(void)
        return 0;
 }
 
-void loongson3_cpu_die(unsigned int cpu)
+void loongson_cpu_die(unsigned int cpu)
 {
        while (per_cpu(cpu_state, cpu) != CPU_DEAD)
                cpu_relax();
@@ -300,19 +300,19 @@ void play_dead(void)
  */
 #ifdef CONFIG_PM
 
-static int loongson3_ipi_suspend(void)
+static int loongson_ipi_suspend(void)
 {
        return 0;
 }
 
-static void loongson3_ipi_resume(void)
+static void loongson_ipi_resume(void)
 {
        iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
 }
 
-static struct syscore_ops loongson3_ipi_syscore_ops = {
-       .resume         = loongson3_ipi_resume,
-       .suspend        = loongson3_ipi_suspend,
+static struct syscore_ops loongson_ipi_syscore_ops = {
+       .resume         = loongson_ipi_resume,
+       .suspend        = loongson_ipi_suspend,
 };
 
 /*
@@ -321,7 +321,7 @@ static struct syscore_ops loongson3_ipi_syscore_ops = {
  */
 static int __init ipi_pm_init(void)
 {
-       register_syscore_ops(&loongson3_ipi_syscore_ops);
+       register_syscore_ops(&loongson_ipi_syscore_ops);
        return 0;
 }
 
@@ -425,7 +425,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 {
        init_new_context(current, &init_mm);
        current_thread_info()->cpu = 0;
-       loongson3_prepare_cpus(max_cpus);
+       loongson_prepare_cpus(max_cpus);
        set_cpu_sibling_map(0);
        set_cpu_core_map(0);
        calculate_cpu_foreign_map();
@@ -436,7 +436,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 
 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
-       loongson3_boot_secondary(cpu, tidle);
+       loongson_boot_secondary(cpu, tidle);
 
        /* Wait for CPU to start and be ready to sync counters */
        if (!wait_for_completion_timeout(&cpu_starting,
@@ -465,7 +465,7 @@ asmlinkage void start_secondary(void)
 
        cpu_probe();
        constant_clockevent_init();
-       loongson3_init_secondary();
+       loongson_init_secondary();
 
        set_cpu_sibling_map(cpu);
        set_cpu_core_map(cpu);
@@ -487,11 +487,11 @@ asmlinkage void start_secondary(void)
        complete(&cpu_running);
 
        /*
-        * irq will be enabled in loongson3_smp_finish(), enabling it too
+        * irq will be enabled in loongson_smp_finish(), enabling it too
         * early is dangerous.
         */
        WARN_ON_ONCE(!irqs_disabled());
-       loongson3_smp_finish();
+       loongson_smp_finish();
 
        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
index b206d91..4571c3c 100644 (file)
@@ -43,7 +43,8 @@ static bool unwind_by_prologue(struct unwind_state *state)
 {
        struct stack_info *info = &state->stack_info;
        union loongarch_instruction *ip, *ip_end;
-       unsigned long frame_size = 0, frame_ra = -1;
+       long frame_ra = -1;
+       unsigned long frame_size = 0;
        unsigned long size, offset, pc = state->pc;
 
        if (state->sp >= info->end || state->sp < info->begin)
index 3f8a86c..02e6be9 100644 (file)
@@ -67,12 +67,12 @@ linux.bin.ub linux.bin.gz: linux.bin
 linux.bin: vmlinux
 linux.bin linux.bin.gz linux.bin.ub:
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-       @echo 'Kernel: $(boot)/$@ is ready' ' (#'`cat .version`')'
+       @echo 'Kernel: $(boot)/$@ is ready' ' (#'$(or $(KBUILD_BUILD_VERSION),`cat .version`)')'
 
 PHONY += simpleImage.$(DTB)
 simpleImage.$(DTB): vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(addprefix $(boot)/$@., ub unstrip strip)
-       @echo 'Kernel: $(boot)/$@ is ready' ' (#'`cat .version`')'
+       @echo 'Kernel: $(boot)/$@ is ready' ' (#'$(or $(KBUILD_BUILD_VERSION),`cat .version`)')'
 
 define archhelp
   echo '* linux.bin    - Create raw binary'
index 8c3ad76..29c11a0 100644 (file)
@@ -20,7 +20,7 @@ $(obj)/vmlinux.bin: vmlinux FORCE
 $(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE
        $(call if_changed,gzip)
 
-$(obj)/vmImage: $(obj)/vmlinux.gz
+$(obj)/vmImage: $(obj)/vmlinux.gz FORCE
        $(call if_changed,uimage)
        @$(kecho) 'Kernel: $@ is ready'
 
index 7786e3a..8c3862b 100644 (file)
@@ -142,7 +142,7 @@ SECTIONS
 #endif
 
        .data.rel.ro : AT(ADDR(.data.rel.ro) - LOAD_OFFSET) {
-               *(.data.rel.ro*)
+               *(.data.rel.ro .data.rel.ro.*)
        }
 
        .branch_lt : AT(ADDR(.branch_lt) - LOAD_OFFSET) {
index 87be3e8..c907f74 100644 (file)
@@ -199,7 +199,16 @@ unsigned long __get_wchan(struct task_struct *p);
 /* Has task runtime instrumentation enabled ? */
 #define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
 
-register unsigned long current_stack_pointer asm("r15");
+/* avoid using global register due to gcc bug in versions < 8.4 */
+#define current_stack_pointer (__current_stack_pointer())
+
+static __always_inline unsigned long __current_stack_pointer(void)
+{
+       unsigned long sp;
+
+       asm volatile("lgr %0,15" : "=d" (sp));
+       return sp;
+}
 
 static __always_inline unsigned short stap(void)
 {
index dd74fe6..e4ef67e 100644 (file)
@@ -46,7 +46,7 @@ struct save_area {
        u64 fprs[16];
        u32 fpc;
        u32 prefix;
-       u64 todpreg;
+       u32 todpreg;
        u64 timer;
        u64 todcmp;
        u64 vxrs_low[16];
index 9860ca5..9e38ffa 100644 (file)
@@ -83,7 +83,7 @@ cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \
 
 $(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
        $(call if_changed,image)
-       @$(kecho) 'Kernel: $@ is ready' ' (#'`cat .version`')'
+       @$(kecho) 'Kernel: $@ is ready' ' (#'$(or $(KBUILD_BUILD_VERSION),`cat .version`)')'
 
 OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note -R .comment -S
 $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
index 8b70237..d6f3703 100644 (file)
@@ -861,8 +861,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
        pmu_enabled = cpuc->enabled;
        cpuc->enabled = 0;
 
-       /* stop everything (includes BRS) */
-       amd_pmu_disable_all();
+       amd_brs_disable_all();
 
        /* Drain BRS is in use (could be inactive) */
        if (cpuc->lbr_users)
@@ -873,7 +872,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
 
        cpuc->enabled = pmu_enabled;
        if (pmu_enabled)
-               amd_pmu_enable_all(0);
+               amd_brs_enable_all();
 
        return amd_pmu_adjust_nmi_window(handled);
 }
index d568afc..83f15fe 100644 (file)
@@ -553,6 +553,7 @@ static void uncore_clean_online(void)
 
        hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
                hlist_del(&uncore->node);
+               kfree(uncore->events);
                kfree(uncore);
        }
 }
index 82ef87e..42a5579 100644 (file)
@@ -1263,6 +1263,15 @@ static int pt_buffer_try_single(struct pt_buffer *buf, int nr_pages)
        if (1 << order != nr_pages)
                goto out;
 
+       /*
+        * Some processors cannot always support single range for more than
+        * 4KB - refer errata TGL052, ADL037 and RPL017. Future processors might
+        * also be affected, so for now rather than trying to keep track of
+        * which ones, just disable it for all.
+        */
+       if (nr_pages > 1)
+               goto out;
+
        buf->single = true;
        buf->nr_pages = nr_pages;
        ret = 0;
index f49bc3e..a269049 100644 (file)
@@ -77,7 +77,7 @@ static int hyperv_init_ghcb(void)
 static int hv_cpu_init(unsigned int cpu)
 {
        union hv_vp_assist_msr_contents msr = { 0 };
-       struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()];
+       struct hv_vp_assist_page **hvp = &hv_vp_assist_page[cpu];
        int ret;
 
        ret = hv_common_cpu_init(cpu);
@@ -87,34 +87,32 @@ static int hv_cpu_init(unsigned int cpu)
        if (!hv_vp_assist_page)
                return 0;
 
-       if (!*hvp) {
-               if (hv_root_partition) {
-                       /*
-                        * For root partition we get the hypervisor provided VP assist
-                        * page, instead of allocating a new page.
-                        */
-                       rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
-                       *hvp = memremap(msr.pfn <<
-                                       HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT,
-                                       PAGE_SIZE, MEMREMAP_WB);
-               } else {
-                       /*
-                        * The VP assist page is an "overlay" page (see Hyper-V TLFS's
-                        * Section 5.2.1 "GPA Overlay Pages"). Here it must be zeroed
-                        * out to make sure we always write the EOI MSR in
-                        * hv_apic_eoi_write() *after* the EOI optimization is disabled
-                        * in hv_cpu_die(), otherwise a CPU may not be stopped in the
-                        * case of CPU offlining and the VM will hang.
-                        */
+       if (hv_root_partition) {
+               /*
+                * For root partition we get the hypervisor provided VP assist
+                * page, instead of allocating a new page.
+                */
+               rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
+               *hvp = memremap(msr.pfn << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT,
+                               PAGE_SIZE, MEMREMAP_WB);
+       } else {
+               /*
+                * The VP assist page is an "overlay" page (see Hyper-V TLFS's
+                * Section 5.2.1 "GPA Overlay Pages"). Here it must be zeroed
+                * out to make sure we always write the EOI MSR in
+                * hv_apic_eoi_write() *after* the EOI optimization is disabled
+                * in hv_cpu_die(), otherwise a CPU may not be stopped in the
+                * case of CPU offlining and the VM will hang.
+                */
+               if (!*hvp)
                        *hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO);
-                       if (*hvp)
-                               msr.pfn = vmalloc_to_pfn(*hvp);
-               }
-               WARN_ON(!(*hvp));
-               if (*hvp) {
-                       msr.enable = 1;
-                       wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
-               }
+               if (*hvp)
+                       msr.pfn = vmalloc_to_pfn(*hvp);
+
+       }
+       if (!WARN_ON(!(*hvp))) {
+               msr.enable = 1;
+               wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
        }
 
        return hyperv_init_ghcb();
index b71f4f2..b2da7cb 100644 (file)
 #define X86_FEATURE_USE_IBPB_FW                (11*32+16) /* "" Use IBPB during runtime firmware calls */
 #define X86_FEATURE_RSB_VMEXIT_LITE    (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
 
+
+#define X86_FEATURE_MSR_TSX_CTRL       (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
+
 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
 #define X86_FEATURE_AVX_VNNI           (12*32+ 4) /* AVX VNNI instructions */
 #define X86_FEATURE_AVX512_BF16                (12*32+ 5) /* AVX512 BFLOAT16 instructions */
index 10ac527..4a2af82 100644 (file)
 #define MSR_AMD64_CPUID_FN_1           0xc0011004
 #define MSR_AMD64_LS_CFG               0xc0011020
 #define MSR_AMD64_DC_CFG               0xc0011022
+
+#define MSR_AMD64_DE_CFG               0xc0011029
+#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT   1
+#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE      BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
+
 #define MSR_AMD64_BU_CFG2              0xc001102a
 #define MSR_AMD64_IBSFETCHCTL          0xc0011030
 #define MSR_AMD64_IBSFETCHLINAD                0xc0011031
 #define FAM10H_MMIO_CONF_BASE_MASK     0xfffffffULL
 #define FAM10H_MMIO_CONF_BASE_SHIFT    20
 #define MSR_FAM10H_NODE_ID             0xc001100c
-#define MSR_F10H_DECFG                 0xc0011029
-#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT    1
-#define MSR_F10H_DECFG_LFENCE_SERIALIZE                BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
 
 /* K8 MSRs */
 #define MSR_K8_TOP_MEM1                        0xc001001a
index 60ece59..dbb38a6 100644 (file)
@@ -37,7 +37,7 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
  *   rsi = lockval           (second argument)
  *   rdx = internal variable (set to 0)
  */
-asm    (".pushsection .spinlock.text;"
+asm    (".pushsection .spinlock.text, \"ax\";"
        ".globl " PV_UNLOCK ";"
        ".type " PV_UNLOCK ", @function;"
        ".align 4,0x90;"
index 860b602..c75d75b 100644 (file)
@@ -770,8 +770,6 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
                set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
 }
 
-#define MSR_AMD64_DE_CFG       0xC0011029
-
 static void init_amd_ln(struct cpuinfo_x86 *c)
 {
        /*
@@ -965,8 +963,8 @@ static void init_amd(struct cpuinfo_x86 *c)
                 * msr_set_bit() uses the safe accessors, too, even if the MSR
                 * is not present.
                 */
-               msr_set_bit(MSR_F10H_DECFG,
-                           MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
+               msr_set_bit(MSR_AMD64_DE_CFG,
+                           MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
 
                /* A serializing LFENCE stops RDTSC speculation */
                set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
index 21fd425..c393b87 100644 (file)
@@ -326,8 +326,8 @@ static void init_hygon(struct cpuinfo_x86 *c)
                 * msr_set_bit() uses the safe accessors, too, even if the MSR
                 * is not present.
                 */
-               msr_set_bit(MSR_F10H_DECFG,
-                           MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
+               msr_set_bit(MSR_AMD64_DE_CFG,
+                           MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
 
                /* A serializing LFENCE stops RDTSC speculation */
                set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
index ebe79d6..da8b8ea 100644 (file)
@@ -356,6 +356,9 @@ static int sgx_validate_offset_length(struct sgx_encl *encl,
        if (!length || !IS_ALIGNED(length, PAGE_SIZE))
                return -EINVAL;
 
+       if (offset + length < offset)
+               return -EINVAL;
+
        if (offset + length - PAGE_SIZE >= encl->size)
                return -EINVAL;
 
index ec7bbac..8009c83 100644 (file)
@@ -58,24 +58,6 @@ static void tsx_enable(void)
        wrmsrl(MSR_IA32_TSX_CTRL, tsx);
 }
 
-static bool tsx_ctrl_is_supported(void)
-{
-       u64 ia32_cap = x86_read_arch_cap_msr();
-
-       /*
-        * TSX is controlled via MSR_IA32_TSX_CTRL.  However, support for this
-        * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
-        *
-        * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
-        * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
-        * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
-        * MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
-        * tsx= cmdline requests will do nothing on CPUs without
-        * MSR_IA32_TSX_CTRL support.
-        */
-       return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR);
-}
-
 static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
 {
        if (boot_cpu_has_bug(X86_BUG_TAA))
@@ -135,7 +117,7 @@ static void tsx_clear_cpuid(void)
                rdmsrl(MSR_TSX_FORCE_ABORT, msr);
                msr |= MSR_TFA_TSX_CPUID_CLEAR;
                wrmsrl(MSR_TSX_FORCE_ABORT, msr);
-       } else if (tsx_ctrl_is_supported()) {
+       } else if (cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL)) {
                rdmsrl(MSR_IA32_TSX_CTRL, msr);
                msr |= TSX_CTRL_CPUID_CLEAR;
                wrmsrl(MSR_IA32_TSX_CTRL, msr);
@@ -158,7 +140,8 @@ static void tsx_dev_mode_disable(void)
        u64 mcu_opt_ctrl;
 
        /* Check if RTM_ALLOW exists */
-       if (!boot_cpu_has_bug(X86_BUG_TAA) || !tsx_ctrl_is_supported() ||
+       if (!boot_cpu_has_bug(X86_BUG_TAA) ||
+           !cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL) ||
            !cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL))
                return;
 
@@ -191,7 +174,20 @@ void __init tsx_init(void)
                return;
        }
 
-       if (!tsx_ctrl_is_supported()) {
+       /*
+        * TSX is controlled via MSR_IA32_TSX_CTRL.  However, support for this
+        * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
+        *
+        * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
+        * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
+        * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
+        * MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
+        * tsx= cmdline requests will do nothing on CPUs without
+        * MSR_IA32_TSX_CTRL support.
+        */
+       if (x86_read_arch_cap_msr() & ARCH_CAP_TSX_CTRL_MSR) {
+               setup_force_cpu_cap(X86_FEATURE_MSR_TSX_CTRL);
+       } else {
                tsx_ctrl_state = TSX_CTRL_NOT_SUPPORTED;
                return;
        }
index 3b28c5b..d00db56 100644 (file)
@@ -605,9 +605,9 @@ int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal)
        if (test_thread_flag(TIF_NEED_FPU_LOAD))
                fpregs_restore_userregs();
        save_fpregs_to_fpstate(dst_fpu);
+       fpregs_unlock();
        if (!(clone_flags & CLONE_THREAD))
                fpu_inherit_perms(dst_fpu);
-       fpregs_unlock();
 
        /*
         * Children never inherit PASID state.
index 1ccb769..b6f96d4 100644 (file)
@@ -2443,6 +2443,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
 {
        bool list_unstable, zapped_root = false;
 
+       lockdep_assert_held_write(&kvm->mmu_lock);
        trace_kvm_mmu_prepare_zap_page(sp);
        ++kvm->stat.mmu_shadow_zapped;
        *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
@@ -4262,14 +4263,14 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        if (is_page_fault_stale(vcpu, fault, mmu_seq))
                goto out_unlock;
 
-       r = make_mmu_pages_available(vcpu);
-       if (r)
-               goto out_unlock;
-
-       if (is_tdp_mmu_fault)
+       if (is_tdp_mmu_fault) {
                r = kvm_tdp_mmu_map(vcpu, fault);
-       else
+       } else {
+               r = make_mmu_pages_available(vcpu);
+               if (r)
+                       goto out_unlock;
                r = __direct_map(vcpu, fault);
+       }
 
 out_unlock:
        if (is_tdp_mmu_fault)
index 4c62099..995bc0f 100644 (file)
@@ -1091,6 +1091,12 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
 
 static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
 {
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SHUTDOWN))
+               return;
+
+       kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
        nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
 }
 
@@ -1125,6 +1131,9 @@ void svm_free_nested(struct vcpu_svm *svm)
        if (!svm->nested.initialized)
                return;
 
+       if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr))
+               svm_switch_vmcb(svm, &svm->vmcb01);
+
        svm_vcpu_free_msrpm(svm->nested.msrpm);
        svm->nested.msrpm = NULL;
 
@@ -1143,9 +1152,6 @@ void svm_free_nested(struct vcpu_svm *svm)
        svm->nested.initialized = false;
 }
 
-/*
- * Forcibly leave nested mode in order to be able to reset the VCPU later on.
- */
 void svm_leave_nested(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
index 9f88c8e..ce362e8 100644 (file)
@@ -346,12 +346,6 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
        return 0;
 }
 
-static int is_external_interrupt(u32 info)
-{
-       info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
-       return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
-}
-
 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -1438,6 +1432,7 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
         */
        svm_clear_current_vmcb(svm->vmcb);
 
+       svm_leave_nested(vcpu);
        svm_free_nested(svm);
 
        sev_free_vcpu(vcpu);
@@ -2709,9 +2704,9 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
        msr->data = 0;
 
        switch (msr->index) {
-       case MSR_F10H_DECFG:
-               if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
-                       msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
+       case MSR_AMD64_DE_CFG:
+               if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
+                       msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
                break;
        case MSR_IA32_PERF_CAPABILITIES:
                return 0;
@@ -2812,7 +2807,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        msr_info->data = 0x1E;
                }
                break;
-       case MSR_F10H_DECFG:
+       case MSR_AMD64_DE_CFG:
                msr_info->data = svm->msr_decfg;
                break;
        default:
@@ -3041,7 +3036,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
        case MSR_VM_IGNNE:
                vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
                break;
-       case MSR_F10H_DECFG: {
+       case MSR_AMD64_DE_CFG: {
                struct kvm_msr_entry msr_entry;
 
                msr_entry.index = msr->index;
@@ -3425,15 +3420,6 @@ static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
                return 0;
        }
 
-       if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
-           exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
-           exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
-           exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
-               printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
-                      "exit_code 0x%x\n",
-                      __func__, svm->vmcb->control.exit_int_info,
-                      exit_code);
-
        if (exit_fastpath != EXIT_FASTPATH_NONE)
                return 1;
 
index 0c62352..5b0d485 100644 (file)
@@ -4854,6 +4854,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
 
 static void nested_vmx_triple_fault(struct kvm_vcpu *vcpu)
 {
+       kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
        nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
 }
 
@@ -6440,9 +6441,6 @@ out:
        return kvm_state.size;
 }
 
-/*
- * Forcibly leave nested mode in order to be able to reset the VCPU later on.
- */
 void vmx_leave_nested(struct kvm_vcpu *vcpu)
 {
        if (is_guest_mode(vcpu)) {
index ecea83f..2835bd7 100644 (file)
@@ -628,6 +628,12 @@ static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vecto
        ex->payload = payload;
 }
 
+/* Forcibly leave the nested mode in cases like a vCPU reset */
+static void kvm_leave_nested(struct kvm_vcpu *vcpu)
+{
+       kvm_x86_ops.nested_ops->leave_nested(vcpu);
+}
+
 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
                unsigned nr, bool has_error, u32 error_code,
                bool has_payload, unsigned long payload, bool reinject)
@@ -1557,7 +1563,7 @@ static const u32 msr_based_features_all[] = {
        MSR_IA32_VMX_EPT_VPID_CAP,
        MSR_IA32_VMX_VMFUNC,
 
-       MSR_F10H_DECFG,
+       MSR_AMD64_DE_CFG,
        MSR_IA32_UCODE_REV,
        MSR_IA32_ARCH_CAPABILITIES,
        MSR_IA32_PERF_CAPABILITIES,
@@ -5195,7 +5201,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
 
        if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
                if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
-                       kvm_x86_ops.nested_ops->leave_nested(vcpu);
+                       kvm_leave_nested(vcpu);
                        kvm_smm_changed(vcpu, events->smi.smm);
                }
 
@@ -9805,7 +9811,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
 
 int kvm_check_nested_events(struct kvm_vcpu *vcpu)
 {
-       if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
+       if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
                kvm_x86_ops.nested_ops->triple_fault(vcpu);
                return 1;
        }
@@ -10560,15 +10566,16 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        r = 0;
                        goto out;
                }
-               if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
-                       if (is_guest_mode(vcpu)) {
+               if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
+                       if (is_guest_mode(vcpu))
                                kvm_x86_ops.nested_ops->triple_fault(vcpu);
-                       } else {
+
+                       if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
                                vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
                                vcpu->mmio_needed = 0;
                                r = 0;
-                               goto out;
                        }
+                       goto out;
                }
                if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
                        /* Page is swapped out. Do synthetic halt */
@@ -11997,8 +12004,18 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
        WARN_ON_ONCE(!init_event &&
                     (old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu)));
 
+       /*
+        * SVM doesn't unconditionally VM-Exit on INIT and SHUTDOWN, thus it's
+        * possible to INIT the vCPU while L2 is active.  Force the vCPU back
+        * into L1 as EFER.SVME is cleared on INIT (along with all other EFER
+        * bits), i.e. virtualization is disabled.
+        */
+       if (is_guest_mode(vcpu))
+               kvm_leave_nested(vcpu);
+
        kvm_lapic_reset(vcpu, init_event);
 
+       WARN_ON_ONCE(is_guest_mode(vcpu) || is_smm(vcpu));
        vcpu->arch.hflags = 0;
 
        vcpu->arch.smi_pending = 0;
index 2dae413..f3098c0 100644 (file)
@@ -954,6 +954,14 @@ static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
        return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
 }
 
+static inline int max_evtchn_port(struct kvm *kvm)
+{
+       if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode)
+               return EVTCHN_2L_NR_CHANNELS;
+       else
+               return COMPAT_EVTCHN_2L_NR_CHANNELS;
+}
+
 static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
                               evtchn_port_t *ports)
 {
@@ -1042,6 +1050,10 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
                        *r = -EFAULT;
                        goto out;
                }
+               if (ports[i] >= max_evtchn_port(vcpu->kvm)) {
+                       *r = -EINVAL;
+                       goto out;
+               }
        }
 
        if (sched_poll.nr_ports == 1)
@@ -1215,6 +1227,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
        bool longmode;
        u64 input, params[6], r = -ENOSYS;
        bool handled = false;
+       u8 cpl;
 
        input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
 
@@ -1242,9 +1255,17 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
                params[5] = (u64)kvm_r9_read(vcpu);
        }
 #endif
+       cpl = static_call(kvm_x86_get_cpl)(vcpu);
        trace_kvm_xen_hypercall(input, params[0], params[1], params[2],
                                params[3], params[4], params[5]);
 
+       /*
+        * Only allow hypercall acceleration for CPL0. The rare hypercalls that
+        * are permitted in guest userspace can be handled by the VMM.
+        */
+       if (unlikely(cpl > 0))
+               goto handle_in_userspace;
+
        switch (input) {
        case __HYPERVISOR_xen_version:
                if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) {
@@ -1279,10 +1300,11 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
        if (handled)
                return kvm_xen_hypercall_set_result(vcpu, r);
 
+handle_in_userspace:
        vcpu->run->exit_reason = KVM_EXIT_XEN;
        vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
        vcpu->run->xen.u.hcall.longmode = longmode;
-       vcpu->run->xen.u.hcall.cpl = static_call(kvm_x86_get_cpl)(vcpu);
+       vcpu->run->xen.u.hcall.cpl = cpl;
        vcpu->run->xen.u.hcall.input = input;
        vcpu->run->xen.u.hcall.params[0] = params[0];
        vcpu->run->xen.u.hcall.params[1] = params[1];
@@ -1297,14 +1319,6 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static inline int max_evtchn_port(struct kvm *kvm)
-{
-       if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode)
-               return EVTCHN_2L_NR_CHANNELS;
-       else
-               return COMPAT_EVTCHN_2L_NR_CHANNELS;
-}
-
 static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port)
 {
        int poll_evtchn = vcpu->arch.xen.poll_evtchn;
index 78c5bc6..6453fba 100644 (file)
@@ -217,9 +217,15 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size,
         * Mappings have to be page-aligned
         */
        offset = phys_addr & ~PAGE_MASK;
-       phys_addr &= PHYSICAL_PAGE_MASK;
+       phys_addr &= PAGE_MASK;
        size = PAGE_ALIGN(last_addr+1) - phys_addr;
 
+       /*
+        * Mask out any bits not part of the actual physical
+        * address, like memory encryption bits.
+        */
+       phys_addr &= PHYSICAL_PAGE_MASK;
+
        retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
                                                pcm, &new_pcm);
        if (retval) {
index 00127ab..9962042 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/bpf.h>
 #include <linux/memory.h>
 #include <linux/sort.h>
-#include <linux/init.h>
 #include <asm/extable.h>
 #include <asm/set_memory.h>
 #include <asm/nospec-branch.h>
@@ -389,18 +388,6 @@ out:
        return ret;
 }
 
-int __init bpf_arch_init_dispatcher_early(void *ip)
-{
-       const u8 *nop_insn = x86_nops[5];
-
-       if (is_endbr(*(u32 *)ip))
-               ip += ENDBR_INSN_SIZE;
-
-       if (memcmp(ip, nop_insn, X86_PATCH_SIZE))
-               text_poke_early(ip, nop_insn, X86_PATCH_SIZE);
-       return 0;
-}
-
 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
                       void *old_addr, void *new_addr)
 {
index bb176c7..93ae332 100644 (file)
@@ -513,15 +513,23 @@ static int pm_cpu_check(const struct x86_cpu_id *c)
 
 static void pm_save_spec_msr(void)
 {
-       u32 spec_msr_id[] = {
-               MSR_IA32_SPEC_CTRL,
-               MSR_IA32_TSX_CTRL,
-               MSR_TSX_FORCE_ABORT,
-               MSR_IA32_MCU_OPT_CTRL,
-               MSR_AMD64_LS_CFG,
+       struct msr_enumeration {
+               u32 msr_no;
+               u32 feature;
+       } msr_enum[] = {
+               { MSR_IA32_SPEC_CTRL,    X86_FEATURE_MSR_SPEC_CTRL },
+               { MSR_IA32_TSX_CTRL,     X86_FEATURE_MSR_TSX_CTRL },
+               { MSR_TSX_FORCE_ABORT,   X86_FEATURE_TSX_FORCE_ABORT },
+               { MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL },
+               { MSR_AMD64_LS_CFG,      X86_FEATURE_LS_CFG_SSBD },
+               { MSR_AMD64_DE_CFG,      X86_FEATURE_LFENCE_RDTSC },
        };
+       int i;
 
-       msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
+       for (i = 0; i < ARRAY_SIZE(msr_enum); i++) {
+               if (boot_cpu_has(msr_enum[i].feature))
+                       msr_build_context(&msr_enum[i].msr_no, 1);
+       }
 }
 
 static int pm_check_save_msr(void)
index f82857e..038da45 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/start_kernel.h>
 #include <linux/sched.h>
 #include <linux/kprobes.h>
+#include <linux/kstrtox.h>
 #include <linux/memblock.h>
 #include <linux/export.h>
 #include <linux/mm.h>
@@ -113,7 +114,7 @@ static __read_mostly bool xen_msr_safe = IS_ENABLED(CONFIG_XEN_PV_MSR_SAFE);
 static int __init parse_xen_msr_safe(char *str)
 {
        if (str)
-               return strtobool(str, &xen_msr_safe);
+               return kstrtobool(str, &xen_msr_safe);
        return -EINVAL;
 }
 early_param("xen_msr_safe", parse_xen_msr_safe);
index 4f43095..8db26f1 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/init.h>
 #include <linux/sched.h>
+#include <linux/kstrtox.h>
 #include <linux/mm.h>
 #include <linux/pm.h>
 #include <linux/memblock.h>
@@ -85,7 +86,7 @@ static void __init xen_parse_512gb(void)
        arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
        if (!arg)
                val = true;
-       else if (strtobool(arg + strlen("xen_512gb_limit="), &val))
+       else if (kstrtobool(arg + strlen("xen_512gb_limit="), &val))
                return;
 
        xen_512gb_limit = val;
index 6a5c849..ed761c6 100644 (file)
@@ -1213,7 +1213,7 @@ static int blkcg_css_online(struct cgroup_subsys_state *css)
         * parent so that offline always happens towards the root.
         */
        if (parent)
-               blkcg_pin_online(css);
+               blkcg_pin_online(&parent->css);
        return 0;
 }
 
index 1766715..5487912 100644 (file)
@@ -425,7 +425,6 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
                                PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
                goto fail_stats;
 
-       blk_queue_dma_alignment(q, 511);
        blk_set_default_limits(&q->limits);
        q->nr_requests = BLKDEV_DEFAULT_RQ;
 
index 6a789cd..228a669 100644 (file)
@@ -4045,9 +4045,14 @@ EXPORT_SYMBOL(__blk_mq_alloc_disk);
 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
                struct lock_class_key *lkclass)
 {
+       struct gendisk *disk;
+
        if (!blk_get_queue(q))
                return NULL;
-       return __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
+       disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
+       if (!disk)
+               blk_put_queue(q);
+       return disk;
 }
 EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
 
index 8bb9eef..8ac1038 100644 (file)
@@ -57,8 +57,8 @@ void blk_set_default_limits(struct queue_limits *lim)
        lim->misaligned = 0;
        lim->zoned = BLK_ZONED_NONE;
        lim->zone_write_granularity = 0;
+       lim->dma_alignment = 511;
 }
-EXPORT_SYMBOL(blk_set_default_limits);
 
 /**
  * blk_set_stacking_limits - set default limits for stacking devices
@@ -600,6 +600,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 
        t->io_min = max(t->io_min, b->io_min);
        t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
+       t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
 
        /* Set non-power-of-2 compatible chunk_sectors boundary */
        if (b->chunk_sectors)
@@ -773,7 +774,7 @@ EXPORT_SYMBOL(blk_queue_virt_boundary);
  **/
 void blk_queue_dma_alignment(struct request_queue *q, int mask)
 {
-       q->dma_alignment = mask;
+       q->limits.dma_alignment = mask;
 }
 EXPORT_SYMBOL(blk_queue_dma_alignment);
 
@@ -795,8 +796,8 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
 {
        BUG_ON(mask > PAGE_SIZE);
 
-       if (mask > q->dma_alignment)
-               q->dma_alignment = mask;
+       if (mask > q->limits.dma_alignment)
+               q->limits.dma_alignment = mask;
 }
 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
 
index d6ea0d1..a186ea2 100644 (file)
@@ -331,6 +331,7 @@ void blk_rq_set_mixed_merge(struct request *rq);
 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
 
+void blk_set_default_limits(struct queue_limits *lim);
 int blk_dev_init(void);
 
 /*
index f522652..73db0cb 100644 (file)
@@ -1778,7 +1778,7 @@ static void speakup_con_update(struct vc_data *vc)
 {
        unsigned long flags;
 
-       if (!speakup_console[vc->vc_num] || spk_parked)
+       if (!speakup_console[vc->vc_num] || spk_parked || !synth)
                return;
        if (!spin_trylock_irqsave(&speakup_info.spinlock, flags))
                /* Speakup output, discard */
index 4bf2ee8..4ce9a12 100644 (file)
@@ -54,7 +54,7 @@ static inline int oops(const char *msg, const char *info)
 
 static inline struct st_key *hash_name(char *name)
 {
-       u_char *pn = (u_char *)name;
+       unsigned char *pn = (unsigned char *)name;
        int hash = 0;
 
        while (*pn) {
index 1c39cfc..4ad42b0 100644 (file)
@@ -739,6 +739,12 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
        const char *failure_string;
        struct binder_buffer *buffer;
 
+       if (unlikely(vma->vm_mm != alloc->mm)) {
+               ret = -EINVAL;
+               failure_string = "invalid vma->vm_mm";
+               goto err_invalid_mm;
+       }
+
        mutex_lock(&binder_alloc_mmap_lock);
        if (alloc->buffer_size) {
                ret = -EBUSY;
@@ -785,6 +791,7 @@ err_alloc_pages_failed:
        alloc->buffer_size = 0;
 err_already_mapped:
        mutex_unlock(&binder_alloc_mmap_lock);
+err_invalid_mm:
        binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
                           "%s: %d %lx-%lx %s failed %d\n", __func__,
                           alloc->pid, vma->vm_start, vma->vm_end,
index f3e4db1..8532b83 100644 (file)
@@ -2672,7 +2672,7 @@ static int init_submitter(struct drbd_device *device)
 enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
 {
        struct drbd_resource *resource = adm_ctx->resource;
-       struct drbd_connection *connection;
+       struct drbd_connection *connection, *n;
        struct drbd_device *device;
        struct drbd_peer_device *peer_device, *tmp_peer_device;
        struct gendisk *disk;
@@ -2789,7 +2789,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
        return NO_ERROR;
 
 out_idr_remove_from_resource:
-       for_each_connection(connection, resource) {
+       for_each_connection_safe(connection, n, resource) {
                peer_device = idr_remove(&connection->peer_devices, vnr);
                if (peer_device)
                        kref_put(&connection->kref, drbd_destroy_connection);
index f96cb01..e9de9d8 100644 (file)
 #define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD)
 
 struct ublk_rq_data {
-       union {
-               struct callback_head work;
-               struct llist_node node;
-       };
+       struct llist_node node;
+       struct callback_head work;
 };
 
 struct ublk_uring_cmd_pdu {
@@ -766,15 +764,31 @@ static inline void __ublk_rq_task_work(struct request *req)
        ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
 }
 
+static inline void ublk_forward_io_cmds(struct ublk_queue *ubq)
+{
+       struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
+       struct ublk_rq_data *data, *tmp;
+
+       io_cmds = llist_reverse_order(io_cmds);
+       llist_for_each_entry_safe(data, tmp, io_cmds, node)
+               __ublk_rq_task_work(blk_mq_rq_from_pdu(data));
+}
+
+static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
+{
+       struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
+       struct ublk_rq_data *data, *tmp;
+
+       llist_for_each_entry_safe(data, tmp, io_cmds, node)
+               __ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
+}
+
 static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
 {
        struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
        struct ublk_queue *ubq = pdu->ubq;
-       struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
-       struct ublk_rq_data *data;
 
-       llist_for_each_entry(data, io_cmds, node)
-               __ublk_rq_task_work(blk_mq_rq_from_pdu(data));
+       ublk_forward_io_cmds(ubq);
 }
 
 static void ublk_rq_task_work_fn(struct callback_head *work)
@@ -782,14 +796,20 @@ static void ublk_rq_task_work_fn(struct callback_head *work)
        struct ublk_rq_data *data = container_of(work,
                        struct ublk_rq_data, work);
        struct request *req = blk_mq_rq_from_pdu(data);
+       struct ublk_queue *ubq = req->mq_hctx->driver_data;
 
-       __ublk_rq_task_work(req);
+       ublk_forward_io_cmds(ubq);
 }
 
-static void ublk_submit_cmd(struct ublk_queue *ubq, const struct request *rq)
+static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
 {
-       struct ublk_io *io = &ubq->ios[rq->tag];
+       struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
+       struct ublk_io *io;
 
+       if (!llist_add(&data->node, &ubq->io_cmds))
+               return;
+
+       io = &ubq->ios[rq->tag];
        /*
         * If the check pass, we know that this is a re-issued request aborted
         * previously in monitor_work because the ubq_daemon(cmd's task) is
@@ -803,11 +823,11 @@ static void ublk_submit_cmd(struct ublk_queue *ubq, const struct request *rq)
         * guarantees that here is a re-issued request aborted previously.
         */
        if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) {
-               struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
-               struct ublk_rq_data *data;
-
-               llist_for_each_entry(data, io_cmds, node)
-                       __ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
+               ublk_abort_io_cmds(ubq);
+       } else if (ublk_can_use_task_work(ubq)) {
+               if (task_work_add(ubq->ubq_daemon, &data->work,
+                                       TWA_SIGNAL_NO_IPI))
+                       ublk_abort_io_cmds(ubq);
        } else {
                struct io_uring_cmd *cmd = io->cmd;
                struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
@@ -817,23 +837,6 @@ static void ublk_submit_cmd(struct ublk_queue *ubq, const struct request *rq)
        }
 }
 
-static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq,
-               bool last)
-{
-       struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
-
-       if (ublk_can_use_task_work(ubq)) {
-               enum task_work_notify_mode notify_mode = last ?
-                       TWA_SIGNAL_NO_IPI : TWA_NONE;
-
-               if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode))
-                       __ublk_abort_rq(ubq, rq);
-       } else {
-               if (llist_add(&data->node, &ubq->io_cmds))
-                       ublk_submit_cmd(ubq, rq);
-       }
-}
-
 static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
                const struct blk_mq_queue_data *bd)
 {
@@ -865,19 +868,11 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
                return BLK_STS_OK;
        }
 
-       ublk_queue_cmd(ubq, rq, bd->last);
+       ublk_queue_cmd(ubq, rq);
 
        return BLK_STS_OK;
 }
 
-static void ublk_commit_rqs(struct blk_mq_hw_ctx *hctx)
-{
-       struct ublk_queue *ubq = hctx->driver_data;
-
-       if (ublk_can_use_task_work(ubq))
-               __set_notify_signal(ubq->ubq_daemon);
-}
-
 static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
                unsigned int hctx_idx)
 {
@@ -899,7 +894,6 @@ static int ublk_init_rq(struct blk_mq_tag_set *set, struct request *req,
 
 static const struct blk_mq_ops ublk_mq_ops = {
        .queue_rq       = ublk_queue_rq,
-       .commit_rqs     = ublk_commit_rqs,
        .init_hctx      = ublk_init_hctx,
        .init_request   = ublk_init_rq,
 };
@@ -1197,7 +1191,7 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
        struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
        struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
 
-       ublk_queue_cmd(ubq, req, true);
+       ublk_queue_cmd(ubq, req);
 }
 
 static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
index a438844..91db001 100644 (file)
@@ -49,7 +49,7 @@
 #define IXP4XX_EXP_SIZE_SHIFT          10
 #define IXP4XX_EXP_CNFG_0              BIT(9) /* Always zero */
 #define IXP43X_EXP_SYNC_INTEL          BIT(8) /* Only on IXP43x */
-#define IXP43X_EXP_EXP_CHIP            BIT(7) /* Only on IXP43x */
+#define IXP43X_EXP_EXP_CHIP            BIT(7) /* Only on IXP43x, dangerous to touch on IXP42x */
 #define IXP4XX_EXP_BYTE_RD16           BIT(6)
 #define IXP4XX_EXP_HRDY_POL            BIT(5) /* Only on IXP42x */
 #define IXP4XX_EXP_MUX_EN              BIT(4)
@@ -57,8 +57,6 @@
 #define IXP4XX_EXP_WORD                        BIT(2) /* Always zero */
 #define IXP4XX_EXP_WR_EN               BIT(1)
 #define IXP4XX_EXP_BYTE_EN             BIT(0)
-#define IXP42X_RESERVED                        (BIT(30)|IXP4XX_EXP_CNFG_0|BIT(8)|BIT(7)|IXP4XX_EXP_WORD)
-#define IXP43X_RESERVED                        (BIT(30)|IXP4XX_EXP_CNFG_0|BIT(5)|IXP4XX_EXP_WORD)
 
 #define IXP4XX_EXP_CNFG0               0x20
 #define IXP4XX_EXP_CNFG0_MEM_MAP       BIT(31)
@@ -252,10 +250,9 @@ static void ixp4xx_exp_setup_chipselect(struct ixp4xx_eb *eb,
                cs_cfg |= val << IXP4XX_EXP_CYC_TYPE_SHIFT;
        }
 
-       if (eb->is_42x)
-               cs_cfg &= ~IXP42X_RESERVED;
        if (eb->is_43x) {
-               cs_cfg &= ~IXP43X_RESERVED;
+               /* Should always be zero */
+               cs_cfg &= ~IXP4XX_EXP_WORD;
                /*
                 * This bit for Intel strata flash is currently unused, but let's
                 * report it if we find one.
index 4cd2e12..3aa91ae 100644 (file)
@@ -267,6 +267,9 @@ EXPORT_SYMBOL_GPL(sunxi_rsb_driver_register);
 /* common code that starts a transfer */
 static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
 {
+       u32 int_mask, status;
+       bool timeout;
+
        if (readl(rsb->regs + RSB_CTRL) & RSB_CTRL_START_TRANS) {
                dev_dbg(rsb->dev, "RSB transfer still in progress\n");
                return -EBUSY;
@@ -274,13 +277,23 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
 
        reinit_completion(&rsb->complete);
 
-       writel(RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER,
-              rsb->regs + RSB_INTE);
+       int_mask = RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER;
+       writel(int_mask, rsb->regs + RSB_INTE);
        writel(RSB_CTRL_START_TRANS | RSB_CTRL_GLOBAL_INT_ENB,
               rsb->regs + RSB_CTRL);
 
-       if (!wait_for_completion_io_timeout(&rsb->complete,
-                                           msecs_to_jiffies(100))) {
+       if (irqs_disabled()) {
+               timeout = readl_poll_timeout_atomic(rsb->regs + RSB_INTS,
+                                                   status, (status & int_mask),
+                                                   10, 100000);
+               writel(status, rsb->regs + RSB_INTS);
+       } else {
+               timeout = !wait_for_completion_io_timeout(&rsb->complete,
+                                                         msecs_to_jiffies(100));
+               status = rsb->status;
+       }
+
+       if (timeout) {
                dev_dbg(rsb->dev, "RSB timeout\n");
 
                /* abort the transfer */
@@ -292,18 +305,18 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
                return -ETIMEDOUT;
        }
 
-       if (rsb->status & RSB_INTS_LOAD_BSY) {
+       if (status & RSB_INTS_LOAD_BSY) {
                dev_dbg(rsb->dev, "RSB busy\n");
                return -EBUSY;
        }
 
-       if (rsb->status & RSB_INTS_TRANS_ERR) {
-               if (rsb->status & RSB_INTS_TRANS_ERR_ACK) {
+       if (status & RSB_INTS_TRANS_ERR) {
+               if (status & RSB_INTS_TRANS_ERR_ACK) {
                        dev_dbg(rsb->dev, "RSB slave nack\n");
                        return -EINVAL;
                }
 
-               if (rsb->status & RSB_INTS_TRANS_ERR_DATA) {
+               if (status & RSB_INTS_TRANS_ERR_DATA) {
                        dev_dbg(rsb->dev, "RSB transfer data error\n");
                        return -EIO;
                }
@@ -812,14 +825,6 @@ static int sunxi_rsb_remove(struct platform_device *pdev)
        return 0;
 }
 
-static void sunxi_rsb_shutdown(struct platform_device *pdev)
-{
-       struct sunxi_rsb *rsb = platform_get_drvdata(pdev);
-
-       pm_runtime_disable(&pdev->dev);
-       sunxi_rsb_hw_exit(rsb);
-}
-
 static const struct dev_pm_ops sunxi_rsb_dev_pm_ops = {
        SET_RUNTIME_PM_OPS(sunxi_rsb_runtime_suspend,
                           sunxi_rsb_runtime_resume, NULL)
@@ -835,7 +840,6 @@ MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table);
 static struct platform_driver sunxi_rsb_driver = {
        .probe = sunxi_rsb_probe,
        .remove = sunxi_rsb_remove,
-       .shutdown = sunxi_rsb_shutdown,
        .driver = {
                .name = RSB_CTRL_NAME,
                .of_match_table = sunxi_rsb_of_match_table,
index 310779b..00476e9 100644 (file)
@@ -35,7 +35,7 @@ config X86_PCC_CPUFREQ
          If in doubt, say N.
 
 config X86_AMD_PSTATE
-       tristate "AMD Processor P-State driver"
+       bool "AMD Processor P-State driver"
        depends on X86 && ACPI
        select ACPI_PROCESSOR
        select ACPI_CPPC_LIB if X86_64
index ace7d50..204e390 100644 (file)
  * we disable it by default to go acpi-cpufreq on these processors and add a
  * module parameter to be able to enable it manually for debugging.
  */
-static bool shared_mem = false;
-module_param(shared_mem, bool, 0444);
-MODULE_PARM_DESC(shared_mem,
-                "enable amd-pstate on processors with shared memory solution (false = disabled (default), true = enabled)");
-
 static struct cpufreq_driver amd_pstate_driver;
+static int cppc_load __initdata;
 
 static inline int pstate_enable(bool enable)
 {
@@ -424,12 +420,22 @@ static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
        amd_pstate_driver.boost_enabled = true;
 }
 
+static void amd_perf_ctl_reset(unsigned int cpu)
+{
+       wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
+}
+
 static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
 {
        int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
        struct device *dev;
        struct amd_cpudata *cpudata;
 
+       /*
+        * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
+        * which is ideal for initialization process.
+        */
+       amd_perf_ctl_reset(policy->cpu);
        dev = get_cpu_device(policy->cpu);
        if (!dev)
                return -ENODEV;
@@ -616,6 +622,15 @@ static int __init amd_pstate_init(void)
 
        if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
                return -ENODEV;
+       /*
+        * by default the pstate driver is disabled to load
+        * enable the amd_pstate passive mode driver explicitly
+        * with amd_pstate=passive in kernel command line
+        */
+       if (!cppc_load) {
+               pr_debug("driver load is disabled, boot with amd_pstate=passive to enable this\n");
+               return -ENODEV;
+       }
 
        if (!acpi_cpc_valid()) {
                pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
@@ -630,13 +645,11 @@ static int __init amd_pstate_init(void)
        if (boot_cpu_has(X86_FEATURE_CPPC)) {
                pr_debug("AMD CPPC MSR based functionality is supported\n");
                amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf;
-       } else if (shared_mem) {
+       } else {
+               pr_debug("AMD CPPC shared memory based functionality is supported\n");
                static_call_update(amd_pstate_enable, cppc_enable);
                static_call_update(amd_pstate_init_perf, cppc_init_perf);
                static_call_update(amd_pstate_update_perf, cppc_update_perf);
-       } else {
-               pr_info("This processor supports shared memory solution, you can enable it with amd_pstate.shared_mem=1\n");
-               return -ENODEV;
        }
 
        /* enable amd pstate feature */
@@ -653,16 +666,22 @@ static int __init amd_pstate_init(void)
 
        return ret;
 }
+device_initcall(amd_pstate_init);
 
-static void __exit amd_pstate_exit(void)
+static int __init amd_pstate_param(char *str)
 {
-       cpufreq_unregister_driver(&amd_pstate_driver);
+       if (!str)
+               return -EINVAL;
 
-       amd_pstate_enable(false);
-}
+       if (!strcmp(str, "disable")) {
+               cppc_load = 0;
+               pr_info("driver is explicitly disabled\n");
+       } else if (!strcmp(str, "passive"))
+               cppc_load = 1;
 
-module_init(amd_pstate_init);
-module_exit(amd_pstate_exit);
+       return 0;
+}
+early_param("amd_pstate", amd_pstate_param);
 
 MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
 MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
index dd0f83e..e6f36c0 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 #include <linux/dma-buf.h>
 #include <linux/dma-fence.h>
+#include <linux/dma-fence-unwrap.h>
 #include <linux/anon_inodes.h>
 #include <linux/export.h>
 #include <linux/debugfs.h>
@@ -391,8 +392,10 @@ static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
                                     const void __user *user_data)
 {
        struct dma_buf_import_sync_file arg;
-       struct dma_fence *fence;
+       struct dma_fence *fence, *f;
        enum dma_resv_usage usage;
+       struct dma_fence_unwrap iter;
+       unsigned int num_fences;
        int ret = 0;
 
        if (copy_from_user(&arg, user_data, sizeof(arg)))
@@ -411,13 +414,21 @@ static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
        usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
                                                   DMA_RESV_USAGE_READ;
 
-       dma_resv_lock(dmabuf->resv, NULL);
+       num_fences = 0;
+       dma_fence_unwrap_for_each(f, &iter, fence)
+               ++num_fences;
 
-       ret = dma_resv_reserve_fences(dmabuf->resv, 1);
-       if (!ret)
-               dma_resv_add_fence(dmabuf->resv, fence, usage);
+       if (num_fences > 0) {
+               dma_resv_lock(dmabuf->resv, NULL);
 
-       dma_resv_unlock(dmabuf->resv);
+               ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
+               if (!ret) {
+                       dma_fence_unwrap_for_each(f, &iter, fence)
+                               dma_resv_add_fence(dmabuf->resv, f, usage);
+               }
+
+               dma_resv_unlock(dmabuf->resv);
+       }
 
        dma_fence_put(fence);
 
index 8f5848a..59d1588 100644 (file)
@@ -233,18 +233,6 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
                return ERR_PTR(-EINVAL);
        }
 
-       /* check the name is unique */
-       mutex_lock(&heap_list_lock);
-       list_for_each_entry(h, &heap_list, list) {
-               if (!strcmp(h->name, exp_info->name)) {
-                       mutex_unlock(&heap_list_lock);
-                       pr_err("dma_heap: Already registered heap named %s\n",
-                              exp_info->name);
-                       return ERR_PTR(-EINVAL);
-               }
-       }
-       mutex_unlock(&heap_list_lock);
-
        heap = kzalloc(sizeof(*heap), GFP_KERNEL);
        if (!heap)
                return ERR_PTR(-ENOMEM);
@@ -283,13 +271,27 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
                err_ret = ERR_CAST(dev_ret);
                goto err2;
        }
-       /* Add heap to the list */
+
        mutex_lock(&heap_list_lock);
+       /* check the name is unique */
+       list_for_each_entry(h, &heap_list, list) {
+               if (!strcmp(h->name, exp_info->name)) {
+                       mutex_unlock(&heap_list_lock);
+                       pr_err("dma_heap: Already registered heap named %s\n",
+                              exp_info->name);
+                       err_ret = ERR_PTR(-EINVAL);
+                       goto err3;
+               }
+       }
+
+       /* Add heap to the list */
        list_add(&heap->list, &heap_list);
        mutex_unlock(&heap_list_lock);
 
        return heap;
 
+err3:
+       device_destroy(dma_heap_class, heap->heap_devt);
 err2:
        cdev_del(&heap->heap_cdev);
 err1:
index 41041ff..2a120d8 100644 (file)
@@ -327,7 +327,13 @@ static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
                return IRQ_NONE;
 
        tusb320_extcon_irq_handler(priv, reg);
-       tusb320_typec_irq_handler(priv, reg);
+
+       /*
+        * Type-C support is optional. Only call the Type-C handler if a
+        * port had been registered previously.
+        */
+       if (priv->port)
+               tusb320_typec_irq_handler(priv, reg);
 
        regmap_write(priv->regmap, TUSB320_REG9, reg);
 
index c52bcaa..9ca21fe 100644 (file)
@@ -149,12 +149,8 @@ static int coreboot_table_probe(struct platform_device *pdev)
        if (!ptr)
                return -ENOMEM;
 
-       ret = bus_register(&coreboot_bus_type);
-       if (!ret) {
-               ret = coreboot_table_populate(dev, ptr);
-               if (ret)
-                       bus_unregister(&coreboot_bus_type);
-       }
+       ret = coreboot_table_populate(dev, ptr);
+
        memunmap(ptr);
 
        return ret;
@@ -169,7 +165,6 @@ static int __cb_dev_unregister(struct device *dev, void *dummy)
 static int coreboot_table_remove(struct platform_device *pdev)
 {
        bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister);
-       bus_unregister(&coreboot_bus_type);
        return 0;
 }
 
@@ -199,6 +194,32 @@ static struct platform_driver coreboot_table_driver = {
                .of_match_table = of_match_ptr(coreboot_of_match),
        },
 };
-module_platform_driver(coreboot_table_driver);
+
+static int __init coreboot_table_driver_init(void)
+{
+       int ret;
+
+       ret = bus_register(&coreboot_bus_type);
+       if (ret)
+               return ret;
+
+       ret = platform_driver_register(&coreboot_table_driver);
+       if (ret) {
+               bus_unregister(&coreboot_bus_type);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void __exit coreboot_table_driver_exit(void)
+{
+       platform_driver_unregister(&coreboot_table_driver);
+       bus_unregister(&coreboot_bus_type);
+}
+
+module_init(coreboot_table_driver_init);
+module_exit(coreboot_table_driver_exit);
+
 MODULE_AUTHOR("Google, Inc.");
 MODULE_LICENSE("GPL");
index 8639a4f..2eca582 100644 (file)
@@ -1293,6 +1293,7 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
                                u32 reg, u32 v);
 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
                                            struct dma_fence *gang);
+bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
 
 /* atpx handler */
 #if defined(CONFIG_VGA_SWITCHEROO)
index c8935d7..4485bb2 100644 (file)
@@ -41,5 +41,6 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
        .get_atc_vmid_pasid_mapping_info =
                                kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
        .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
+       .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
        .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
 };
index 84f44f7..1f76e27 100644 (file)
@@ -171,9 +171,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
            (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
             kfd_mem_limit.max_ttm_mem_limit) ||
            (adev && adev->kfd.vram_used + vram_needed >
-            adev->gmc.real_vram_size -
-            atomic64_read(&adev->vram_pin_size) -
-            reserved_for_pt)) {
+            adev->gmc.real_vram_size - reserved_for_pt)) {
                ret = -ENOMEM;
                goto release;
        }
@@ -988,6 +986,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
        struct amdkfd_process_info *process_info = mem->process_info;
        struct amdgpu_bo *bo = mem->bo;
        struct ttm_operation_ctx ctx = { true, false };
+       struct hmm_range *range;
        int ret = 0;
 
        mutex_lock(&process_info->lock);
@@ -1017,7 +1016,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
                return 0;
        }
 
-       ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
+       ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
        if (ret) {
                pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
                goto unregister_out;
@@ -1035,7 +1034,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
        amdgpu_bo_unreserve(bo);
 
 release_out:
-       amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+       amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
 unregister_out:
        if (ret)
                amdgpu_mn_unregister(bo);
@@ -2372,6 +2371,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
        /* Go through userptr_inval_list and update any invalid user_pages */
        list_for_each_entry(mem, &process_info->userptr_inval_list,
                            validate_list.head) {
+               struct hmm_range *range;
+
                invalid = atomic_read(&mem->invalid);
                if (!invalid)
                        /* BO hasn't been invalidated since the last
@@ -2382,7 +2383,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
                bo = mem->bo;
 
                /* Get updated user pages */
-               ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
+               ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
+                                                  &range);
                if (ret) {
                        pr_debug("Failed %d to get user pages\n", ret);
 
@@ -2401,7 +2403,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
                         * FIXME: Cannot ignore the return code, must hold
                         * notifier_lock
                         */
-                       amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+                       amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
                }
 
                /* Mark the BO as valid unless it was invalidated
index 2168163..252a876 100644 (file)
@@ -209,6 +209,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
                        list_add_tail(&e->tv.head, &bucket[priority]);
 
                e->user_pages = NULL;
+               e->range = NULL;
        }
 
        /* Connect the sorted buckets in the output list. */
index 9caea16..e4d7849 100644 (file)
@@ -26,6 +26,8 @@
 #include <drm/ttm/ttm_execbuf_util.h>
 #include <drm/amdgpu_drm.h>
 
+struct hmm_range;
+
 struct amdgpu_device;
 struct amdgpu_bo;
 struct amdgpu_bo_va;
@@ -36,6 +38,7 @@ struct amdgpu_bo_list_entry {
        struct amdgpu_bo_va             *bo_va;
        uint32_t                        priority;
        struct page                     **user_pages;
+       struct hmm_range                *range;
        bool                            user_invalidated;
 };
 
index 491d484..cfb2629 100644 (file)
@@ -328,7 +328,6 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector)
 
        kfree(amdgpu_connector->edid);
        amdgpu_connector->edid = NULL;
-       drm_connector_update_edid_property(connector, NULL);
 }
 
 static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
index d371000..365e3fb 100644 (file)
@@ -109,6 +109,7 @@ static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
                return r;
 
        ++(num_ibs[r]);
+       p->gang_leader_idx = r;
        return 0;
 }
 
@@ -287,8 +288,10 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
                }
        }
 
-       if (!p->gang_size)
-               return -EINVAL;
+       if (!p->gang_size) {
+               ret = -EINVAL;
+               goto free_partial_kdata;
+       }
 
        for (i = 0; i < p->gang_size; ++i) {
                ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm);
@@ -300,7 +303,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
                if (ret)
                        goto free_all_kdata;
        }
-       p->gang_leader = p->jobs[p->gang_size - 1];
+       p->gang_leader = p->jobs[p->gang_leader_idx];
 
        if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) {
                ret = -ECANCELED;
@@ -910,7 +913,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                        goto out_free_user_pages;
                }
 
-               r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
+               r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
                if (r) {
                        kvfree(e->user_pages);
                        e->user_pages = NULL;
@@ -988,9 +991,10 @@ out_free_user_pages:
 
                if (!e->user_pages)
                        continue;
-               amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+               amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
                kvfree(e->user_pages);
                e->user_pages = NULL;
+               e->range = NULL;
        }
        mutex_unlock(&p->bo_list->bo_list_mutex);
        return r;
@@ -1195,16 +1199,18 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
                        return r;
        }
 
-       for (i = 0; i < p->gang_size - 1; ++i) {
+       for (i = 0; i < p->gang_size; ++i) {
+               if (p->jobs[i] == leader)
+                       continue;
+
                r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync);
                if (r)
                        return r;
        }
 
-       r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_size - 1]);
+       r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
        if (r && r != -ERESTARTSYS)
                DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
-
        return r;
 }
 
@@ -1238,9 +1244,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        for (i = 0; i < p->gang_size; ++i)
                drm_sched_job_arm(&p->jobs[i]->base);
 
-       for (i = 0; i < (p->gang_size - 1); ++i) {
+       for (i = 0; i < p->gang_size; ++i) {
                struct dma_fence *fence;
 
+               if (p->jobs[i] == leader)
+                       continue;
+
                fence = &p->jobs[i]->base.s_fence->scheduled;
                r = amdgpu_sync_fence(&leader->sync, fence);
                if (r)
@@ -1265,7 +1274,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 
-               r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+               r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
+               e->range = NULL;
        }
        if (r) {
                r = -EAGAIN;
@@ -1276,7 +1286,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        list_for_each_entry(e, &p->validated, tv.head) {
 
                /* Everybody except for the gang leader uses READ */
-               for (i = 0; i < (p->gang_size - 1); ++i) {
+               for (i = 0; i < p->gang_size; ++i) {
+                       if (p->jobs[i] == leader)
+                               continue;
+
                        dma_resv_add_fence(e->tv.bo->base.resv,
                                           &p->jobs[i]->base.s_fence->finished,
                                           DMA_RESV_USAGE_READ);
@@ -1286,7 +1299,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
                e->tv.num_shared = 0;
        }
 
-       seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_size - 1],
+       seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
                                   p->fence);
        amdgpu_cs_post_dependencies(p);
 
index cbaa19b..f80adf9 100644 (file)
@@ -54,6 +54,7 @@ struct amdgpu_cs_parser {
 
        /* scheduler job objects */
        unsigned int            gang_size;
+       unsigned int            gang_leader_idx;
        struct drm_sched_entity *entities[AMDGPU_CS_GANG_SIZE];
        struct amdgpu_job       *jobs[AMDGPU_CS_GANG_SIZE];
        struct amdgpu_job       *gang_leader;
index 6451089..f1e9663 100644 (file)
@@ -6044,3 +6044,44 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
        dma_fence_put(old);
        return NULL;
 }
+
+bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+       case CHIP_HAINAN:
+#endif
+       case CHIP_TOPAZ:
+               /* chips with no display hardware */
+               return false;
+#ifdef CONFIG_DRM_AMDGPU_SI
+       case CHIP_TAHITI:
+       case CHIP_PITCAIRN:
+       case CHIP_VERDE:
+       case CHIP_OLAND:
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+       case CHIP_BONAIRE:
+       case CHIP_HAWAII:
+       case CHIP_KAVERI:
+       case CHIP_KABINI:
+       case CHIP_MULLINS:
+#endif
+       case CHIP_TONGA:
+       case CHIP_FIJI:
+       case CHIP_POLARIS10:
+       case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
+       case CHIP_VEGAM:
+       case CHIP_CARRIZO:
+       case CHIP_STONEY:
+               /* chips with display hardware */
+               return true;
+       default:
+               /* IP discovery */
+               if (!adev->ip_versions[DCE_HWIP][0] ||
+                   (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
+                       return false;
+               return true;
+       }
+}
index 8ef31d6..91571b1 100644 (file)
@@ -378,6 +378,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
        struct amdgpu_device *adev = drm_to_adev(dev);
        struct drm_amdgpu_gem_userptr *args = data;
        struct drm_gem_object *gobj;
+       struct hmm_range *range;
        struct amdgpu_bo *bo;
        uint32_t handle;
        int r;
@@ -413,14 +414,13 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
        if (r)
                goto release_object;
 
-       if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
-               r = amdgpu_mn_register(bo, args->addr);
-               if (r)
-                       goto release_object;
-       }
+       r = amdgpu_mn_register(bo, args->addr);
+       if (r)
+               goto release_object;
 
        if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
-               r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
+               r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
+                                                &range);
                if (r)
                        goto release_object;
 
@@ -443,7 +443,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
 
 user_pages_done:
        if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
-               amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+               amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
 
 release_object:
        drm_gem_object_put(gobj);
index 34233a7..28612e5 100644 (file)
@@ -479,6 +479,12 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
        unsigned i;
        unsigned vmhub, inv_eng;
 
+       if (adev->enable_mes) {
+               /* reserve engine 5 for firmware */
+               for (vmhub = 0; vmhub < AMDGPU_MAX_VMHUBS; vmhub++)
+                       vm_inv_engs[vmhub] &= ~(1 << 5);
+       }
+
        for (i = 0; i < adev->num_rings; ++i) {
                ring = adev->rings[i];
                vmhub = ring->funcs->vmhub;
@@ -656,7 +662,7 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
        }
 
        if (amdgpu_sriov_vf(adev) ||
-           !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) {
+           !amdgpu_device_has_display_hardware(adev)) {
                size = 0;
        } else {
                size = amdgpu_gmc_get_vbios_fb_size(adev);
index cd968e7..adac650 100644 (file)
@@ -169,7 +169,11 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
        amdgpu_sync_free(&job->sync);
        amdgpu_sync_free(&job->sched_sync);
 
-       dma_fence_put(&job->hw_fence);
+       /* only put the hw fence if has embedded fence */
+       if (!job->hw_fence.ops)
+               kfree(job);
+       else
+               dma_fence_put(&job->hw_fence);
 }
 
 void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
@@ -254,6 +258,9 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
                        DRM_ERROR("Error adding fence (%d)\n", r);
        }
 
+       if (!fence && job->gang_submit)
+               fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
+
        while (fence == NULL && vm && !job->vmid) {
                r = amdgpu_vmid_grab(vm, ring, &job->sync,
                                     &job->base.s_fence->finished,
@@ -264,9 +271,6 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
                fence = amdgpu_sync_get_fence(&job->sync);
        }
 
-       if (!fence && job->gang_submit)
-               fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
-
        return fence;
 }
 
index effa7df..7978307 100644 (file)
@@ -172,6 +172,7 @@ void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
 {
        amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
                              &mem_ctx->shared_buf);
+       mem_ctx->shared_bo = NULL;
 }
 
 static void psp_free_shared_bufs(struct psp_context *psp)
@@ -182,6 +183,7 @@ static void psp_free_shared_bufs(struct psp_context *psp)
        /* free TMR memory buffer */
        pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
        amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+       psp->tmr_bo = NULL;
 
        /* free xgmi shared memory */
        psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
@@ -743,7 +745,7 @@ static int psp_load_toc(struct psp_context *psp,
 /* Set up Trusted Memory Region */
 static int psp_tmr_init(struct psp_context *psp)
 {
-       int ret;
+       int ret = 0;
        int tmr_size;
        void *tmr_buf;
        void **pptr;
@@ -770,10 +772,12 @@ static int psp_tmr_init(struct psp_context *psp)
                }
        }
 
-       pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
-       ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
-                                     AMDGPU_GEM_DOMAIN_VRAM,
-                                     &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+       if (!psp->tmr_bo) {
+               pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
+               ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
+                                             AMDGPU_GEM_DOMAIN_VRAM,
+                                             &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+       }
 
        return ret;
 }
@@ -2732,8 +2736,6 @@ static int psp_suspend(void *handle)
        }
 
 out:
-       psp_free_shared_bufs(psp);
-
        return ret;
 }
 
index 57277b1..b64938e 100644 (file)
@@ -643,9 +643,6 @@ struct amdgpu_ttm_tt {
        struct task_struct      *usertask;
        uint32_t                userflags;
        bool                    bound;
-#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
-       struct hmm_range        *range;
-#endif
 };
 
 #define ttm_to_amdgpu_ttm_tt(ptr)      container_of(ptr, struct amdgpu_ttm_tt, ttm)
@@ -658,7 +655,8 @@ struct amdgpu_ttm_tt {
  * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
  * once afterwards to stop HMM tracking
  */
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
+                                struct hmm_range **range)
 {
        struct ttm_tt *ttm = bo->tbo.ttm;
        struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
@@ -668,16 +666,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
        bool readonly;
        int r = 0;
 
+       /* Make sure get_user_pages_done() can cleanup gracefully */
+       *range = NULL;
+
        mm = bo->notifier.mm;
        if (unlikely(!mm)) {
                DRM_DEBUG_DRIVER("BO is not registered?\n");
                return -EFAULT;
        }
 
-       /* Another get_user_pages is running at the same time?? */
-       if (WARN_ON(gtt->range))
-               return -EFAULT;
-
        if (!mmget_not_zero(mm)) /* Happens during process shutdown */
                return -ESRCH;
 
@@ -695,7 +692,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
 
        readonly = amdgpu_ttm_tt_is_readonly(ttm);
        r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
-                                      ttm->num_pages, &gtt->range, readonly,
+                                      ttm->num_pages, range, readonly,
                                       true, NULL);
 out_unlock:
        mmap_read_unlock(mm);
@@ -713,30 +710,24 @@ out_unlock:
  *
  * Returns: true if pages are still valid
  */
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
+bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
+                                      struct hmm_range *range)
 {
        struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
-       bool r = false;
 
-       if (!gtt || !gtt->userptr)
+       if (!gtt || !gtt->userptr || !range)
                return false;
 
        DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
                gtt->userptr, ttm->num_pages);
 
-       WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
-               "No user pages to check\n");
+       WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
 
-       if (gtt->range) {
-               /*
-                * FIXME: Must always hold notifier_lock for this, and must
-                * not ignore the return code.
-                */
-               r = amdgpu_hmm_range_get_pages_done(gtt->range);
-               gtt->range = NULL;
-       }
-
-       return !r;
+       /*
+        * FIXME: Must always hold notifier_lock for this, and must
+        * not ignore the return code.
+        */
+       return !amdgpu_hmm_range_get_pages_done(range);
 }
 #endif
 
@@ -813,20 +804,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
        /* unmap the pages mapped to the device */
        dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
        sg_free_table(ttm->sg);
-
-#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
-       if (gtt->range) {
-               unsigned long i;
-
-               for (i = 0; i < ttm->num_pages; i++) {
-                       if (ttm->pages[i] !=
-                           hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
-                               break;
-               }
-
-               WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
-       }
-#endif
 }
 
 static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
index 6a70818..a372070 100644 (file)
@@ -39,6 +39,8 @@
 
 #define AMDGPU_POISON  0xd0bed0be
 
+struct hmm_range;
+
 struct amdgpu_gtt_mgr {
        struct ttm_resource_manager manager;
        struct drm_mm mm;
@@ -149,15 +151,19 @@ void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
 
 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages);
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm);
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
+                                struct hmm_range **range);
+bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
+                                      struct hmm_range *range);
 #else
 static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
-                                              struct page **pages)
+                                              struct page **pages,
+                                              struct hmm_range **range)
 {
        return -EPERM;
 }
-static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
+static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
+                                                    struct hmm_range *range)
 {
        return false;
 }
index f772bb4..0312c71 100644 (file)
@@ -32,7 +32,6 @@
 
 #define RB_ENABLED (1 << 0)
 #define RB4_ENABLED (1 << 1)
-#define MMSCH_DOORBELL_OFFSET 0x8
 
 #define MMSCH_VF_ENGINE_STATUS__PASS 0x1
 
index 21d822b..88f9b32 100644 (file)
@@ -45,6 +45,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin");
 MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin");
 MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
 MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");
 
 /* For large FW files the time to complete can be very long */
 #define USBC_PD_POLLING_LIMIT_S 240
index 897a5ce..dcc49b0 100644 (file)
@@ -100,7 +100,6 @@ static int vcn_v4_0_sw_init(void *handle)
        struct amdgpu_ring *ring;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int i, r;
-       int vcn_doorbell_index = 0;
 
        r = amdgpu_vcn_sw_init(adev);
        if (r)
@@ -112,12 +111,6 @@ static int vcn_v4_0_sw_init(void *handle)
        if (r)
                return r;
 
-       if (amdgpu_sriov_vf(adev)) {
-               vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 - MMSCH_DOORBELL_OFFSET;
-               /* get DWORD offset */
-               vcn_doorbell_index = vcn_doorbell_index << 1;
-       }
-
        for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
                volatile struct amdgpu_vcn4_fw_shared *fw_shared;
 
@@ -135,7 +128,7 @@ static int vcn_v4_0_sw_init(void *handle)
                ring = &adev->vcn.inst[i].ring_enc[0];
                ring->use_doorbell = true;
                if (amdgpu_sriov_vf(adev))
-                       ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1;
+                       ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * (adev->vcn.num_enc_rings + 1) + 1;
                else
                        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
 
index 509739d..512c323 100644 (file)
@@ -147,6 +147,14 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
 /* Number of bytes in PSP footer for firmware. */
 #define PSP_FOOTER_BYTES 0x100
 
+/*
+ * DMUB Async to Sync Mechanism Status
+ */
+#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
+#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
+#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
+#define DMUB_ASYNC_TO_SYNC_ACCESS_INVALID 4
+
 /**
  * DOC: overview
  *
@@ -1364,7 +1372,44 @@ static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
                },
        },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
+               },
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
+               },
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
+               },
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
+               },
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
+               },
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
+               },
+       },
        {}
+       /* TODO: refactor this from a fixed table to a dynamic option */
 };
 
 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
@@ -1637,12 +1682,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                }
        }
 
-       if (amdgpu_dm_initialize_drm_device(adev)) {
-               DRM_ERROR(
-               "amdgpu: failed to initialize sw for display support.\n");
-               goto error;
-       }
-
        /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
         * It is expected that DMUB will resend any pending notifications at this point, for
         * example HPD from DPIA.
@@ -1650,6 +1689,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
        if (dc_is_dmub_outbox_supported(adev->dm.dc))
                dc_enable_dmub_outbox(adev->dm.dc);
 
+       if (amdgpu_dm_initialize_drm_device(adev)) {
+               DRM_ERROR(
+               "amdgpu: failed to initialize sw for display support.\n");
+               goto error;
+       }
+
        /* create fake encoders for MST */
        dm_dp_create_fake_mst_encoders(adev);
 
@@ -6467,7 +6512,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
        struct drm_connector_state *new_con_state;
        struct amdgpu_dm_connector *aconnector;
        struct dm_connector_state *dm_conn_state;
-       int i, j;
+       int i, j, ret;
        int vcpi, pbn_div, pbn, slot_num = 0;
 
        for_each_new_connector_in_state(state, connector, new_con_state, i) {
@@ -6514,8 +6559,11 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
                        dm_conn_state->pbn = pbn;
                        dm_conn_state->vcpi_slots = slot_num;
 
-                       drm_dp_mst_atomic_enable_dsc(state, aconnector->port, dm_conn_state->pbn,
-                                                    false);
+                       ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port,
+                                                          dm_conn_state->pbn, false);
+                       if (ret < 0)
+                               return ret;
+
                        continue;
                }
 
@@ -9529,10 +9577,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        if (dc_resource_is_dsc_encoding_supported(dc)) {
-               if (!pre_validate_dsc(state, &dm_state, vars)) {
-                       ret = -EINVAL;
+               ret = pre_validate_dsc(state, &dm_state, vars);
+               if (ret != 0)
                        goto fail;
-               }
        }
 #endif
 
@@ -9627,9 +9674,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                }
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-               if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
+               ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
+               if (ret) {
                        DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
-                       ret = -EINVAL;
                        goto fail;
                }
 
@@ -10109,6 +10156,8 @@ static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
                        *operation_result = AUX_RET_ERROR_TIMEOUT;
                } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
                        *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
+               } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_INVALID) {
+                       *operation_result = AUX_RET_ERROR_INVALID_REPLY;
                } else {
                        *operation_result = AUX_RET_ERROR_UNKNOWN;
                }
@@ -10156,6 +10205,16 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context
                        payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
                        if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
                            payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
+
+                               if (payload->length != adev->dm.dmub_notify->aux_reply.length) {
+                                       DRM_WARN("invalid read from DPIA AUX %x(%d) got length %d!\n",
+                                                       payload->address, payload->length,
+                                                       adev->dm.dmub_notify->aux_reply.length);
+                                       return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, ctx,
+                                                       DMUB_ASYNC_TO_SYNC_ACCESS_INVALID,
+                                                       (uint32_t *)operation_result);
+                               }
+
                                memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
                                       adev->dm.dmub_notify->aux_reply.length);
                        }
index b5ce15c..635c398 100644 (file)
 #define AMDGPU_DMUB_NOTIFICATION_MAX 5
 
 /*
- * DMUB Async to Sync Mechanism Status
- */
-#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
-#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
-#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
-/*
 #include "include/amdgpu_dal_power_if.h"
 #include "amdgpu_dm_irq.h"
 */
index 594fe8a..64dd029 100644 (file)
@@ -412,7 +412,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
 {
        struct amdgpu_crtc *acrtc = NULL;
        struct drm_plane *cursor_plane;
-
+       bool is_dcn;
        int res = -ENOMEM;
 
        cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
@@ -450,8 +450,14 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
        acrtc->otg_inst = -1;
 
        dm->adev->mode_info.crtcs[crtc_index] = acrtc;
-       drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
+
+       /* Don't enable DRM CRTC degamma property for DCE since it doesn't
+        * support programmable degamma anywhere.
+        */
+       is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
+       drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0,
                                   true, MAX_COLOR_LUT_ENTRIES);
+
        drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
 
        return 0;
index 6ff96b4..6483ba2 100644 (file)
@@ -703,13 +703,13 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
        return dsc_config.bits_per_pixel;
 }
 
-static bool increase_dsc_bpp(struct drm_atomic_state *state,
-                            struct drm_dp_mst_topology_state *mst_state,
-                            struct dc_link *dc_link,
-                            struct dsc_mst_fairness_params *params,
-                            struct dsc_mst_fairness_vars *vars,
-                            int count,
-                            int k)
+static int increase_dsc_bpp(struct drm_atomic_state *state,
+                           struct drm_dp_mst_topology_state *mst_state,
+                           struct dc_link *dc_link,
+                           struct dsc_mst_fairness_params *params,
+                           struct dsc_mst_fairness_vars *vars,
+                           int count,
+                           int k)
 {
        int i;
        bool bpp_increased[MAX_PIPES];
@@ -719,6 +719,7 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state,
        int remaining_to_increase = 0;
        int link_timeslots_used;
        int fair_pbn_alloc;
+       int ret = 0;
 
        for (i = 0; i < count; i++) {
                if (vars[i + k].dsc_enabled) {
@@ -757,52 +758,60 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state,
 
                if (initial_slack[next_index] > fair_pbn_alloc) {
                        vars[next_index].pbn += fair_pbn_alloc;
-                       if (drm_dp_atomic_find_time_slots(state,
-                                                         params[next_index].port->mgr,
-                                                         params[next_index].port,
-                                                         vars[next_index].pbn) < 0)
-                               return false;
-                       if (!drm_dp_mst_atomic_check(state)) {
+                       ret = drm_dp_atomic_find_time_slots(state,
+                                                           params[next_index].port->mgr,
+                                                           params[next_index].port,
+                                                           vars[next_index].pbn);
+                       if (ret < 0)
+                               return ret;
+
+                       ret = drm_dp_mst_atomic_check(state);
+                       if (ret == 0) {
                                vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
                        } else {
                                vars[next_index].pbn -= fair_pbn_alloc;
-                               if (drm_dp_atomic_find_time_slots(state,
-                                                                 params[next_index].port->mgr,
-                                                                 params[next_index].port,
-                                                                 vars[next_index].pbn) < 0)
-                                       return false;
+                               ret = drm_dp_atomic_find_time_slots(state,
+                                                                   params[next_index].port->mgr,
+                                                                   params[next_index].port,
+                                                                   vars[next_index].pbn);
+                               if (ret < 0)
+                                       return ret;
                        }
                } else {
                        vars[next_index].pbn += initial_slack[next_index];
-                       if (drm_dp_atomic_find_time_slots(state,
-                                                         params[next_index].port->mgr,
-                                                         params[next_index].port,
-                                                         vars[next_index].pbn) < 0)
-                               return false;
-                       if (!drm_dp_mst_atomic_check(state)) {
+                       ret = drm_dp_atomic_find_time_slots(state,
+                                                           params[next_index].port->mgr,
+                                                           params[next_index].port,
+                                                           vars[next_index].pbn);
+                       if (ret < 0)
+                               return ret;
+
+                       ret = drm_dp_mst_atomic_check(state);
+                       if (ret == 0) {
                                vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
                        } else {
                                vars[next_index].pbn -= initial_slack[next_index];
-                               if (drm_dp_atomic_find_time_slots(state,
-                                                                 params[next_index].port->mgr,
-                                                                 params[next_index].port,
-                                                                 vars[next_index].pbn) < 0)
-                                       return false;
+                               ret = drm_dp_atomic_find_time_slots(state,
+                                                                   params[next_index].port->mgr,
+                                                                   params[next_index].port,
+                                                                   vars[next_index].pbn);
+                               if (ret < 0)
+                                       return ret;
                        }
                }
 
                bpp_increased[next_index] = true;
                remaining_to_increase--;
        }
-       return true;
+       return 0;
 }
 
-static bool try_disable_dsc(struct drm_atomic_state *state,
-                           struct dc_link *dc_link,
-                           struct dsc_mst_fairness_params *params,
-                           struct dsc_mst_fairness_vars *vars,
-                           int count,
-                           int k)
+static int try_disable_dsc(struct drm_atomic_state *state,
+                          struct dc_link *dc_link,
+                          struct dsc_mst_fairness_params *params,
+                          struct dsc_mst_fairness_vars *vars,
+                          int count,
+                          int k)
 {
        int i;
        bool tried[MAX_PIPES];
@@ -810,6 +819,7 @@ static bool try_disable_dsc(struct drm_atomic_state *state,
        int max_kbps_increase;
        int next_index;
        int remaining_to_try = 0;
+       int ret;
 
        for (i = 0; i < count; i++) {
                if (vars[i + k].dsc_enabled
@@ -840,49 +850,52 @@ static bool try_disable_dsc(struct drm_atomic_state *state,
                        break;
 
                vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
-               if (drm_dp_atomic_find_time_slots(state,
-                                                 params[next_index].port->mgr,
-                                                 params[next_index].port,
-                                                 vars[next_index].pbn) < 0)
-                       return false;
+               ret = drm_dp_atomic_find_time_slots(state,
+                                                   params[next_index].port->mgr,
+                                                   params[next_index].port,
+                                                   vars[next_index].pbn);
+               if (ret < 0)
+                       return ret;
 
-               if (!drm_dp_mst_atomic_check(state)) {
+               ret = drm_dp_mst_atomic_check(state);
+               if (ret == 0) {
                        vars[next_index].dsc_enabled = false;
                        vars[next_index].bpp_x16 = 0;
                } else {
                        vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
-                       if (drm_dp_atomic_find_time_slots(state,
-                                                         params[next_index].port->mgr,
-                                                         params[next_index].port,
-                                                         vars[next_index].pbn) < 0)
-                               return false;
+                       ret = drm_dp_atomic_find_time_slots(state,
+                                                           params[next_index].port->mgr,
+                                                           params[next_index].port,
+                                                           vars[next_index].pbn);
+                       if (ret < 0)
+                               return ret;
                }
 
                tried[next_index] = true;
                remaining_to_try--;
        }
-       return true;
+       return 0;
 }
 
-static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
-                                            struct dc_state *dc_state,
-                                            struct dc_link *dc_link,
-                                            struct dsc_mst_fairness_vars *vars,
-                                            struct drm_dp_mst_topology_mgr *mgr,
-                                            int *link_vars_start_index)
+static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+                                           struct dc_state *dc_state,
+                                           struct dc_link *dc_link,
+                                           struct dsc_mst_fairness_vars *vars,
+                                           struct drm_dp_mst_topology_mgr *mgr,
+                                           int *link_vars_start_index)
 {
        struct dc_stream_state *stream;
        struct dsc_mst_fairness_params params[MAX_PIPES];
        struct amdgpu_dm_connector *aconnector;
        struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr);
        int count = 0;
-       int i, k;
+       int i, k, ret;
        bool debugfs_overwrite = false;
 
        memset(params, 0, sizeof(params));
 
        if (IS_ERR(mst_state))
-               return false;
+               return PTR_ERR(mst_state);
 
        mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
 #if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -933,7 +946,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
 
        if (count == 0) {
                ASSERT(0);
-               return true;
+               return 0;
        }
 
        /* k is start index of vars for current phy link used by mst hub */
@@ -947,13 +960,17 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
                vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
                vars[i + k].dsc_enabled = false;
                vars[i + k].bpp_x16 = 0;
-               if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
-                                                 vars[i + k].pbn) < 0)
-                       return false;
+               ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
+                                                   vars[i + k].pbn);
+               if (ret < 0)
+                       return ret;
        }
-       if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) {
+       ret = drm_dp_mst_atomic_check(state);
+       if (ret == 0 && !debugfs_overwrite) {
                set_dsc_configs_from_fairness_vars(params, vars, count, k);
-               return true;
+               return 0;
+       } else if (ret != -ENOSPC) {
+               return ret;
        }
 
        /* Try max compression */
@@ -962,31 +979,36 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
                        vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
                        vars[i + k].dsc_enabled = true;
                        vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
-                       if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
-                                                         params[i].port, vars[i + k].pbn) < 0)
-                               return false;
+                       ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+                                                           params[i].port, vars[i + k].pbn);
+                       if (ret < 0)
+                               return ret;
                } else {
                        vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
                        vars[i + k].dsc_enabled = false;
                        vars[i + k].bpp_x16 = 0;
-                       if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
-                                                         params[i].port, vars[i + k].pbn) < 0)
-                               return false;
+                       ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+                                                           params[i].port, vars[i + k].pbn);
+                       if (ret < 0)
+                               return ret;
                }
        }
-       if (drm_dp_mst_atomic_check(state))
-               return false;
+       ret = drm_dp_mst_atomic_check(state);
+       if (ret != 0)
+               return ret;
 
        /* Optimize degree of compression */
-       if (!increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k))
-               return false;
+       ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k);
+       if (ret < 0)
+               return ret;
 
-       if (!try_disable_dsc(state, dc_link, params, vars, count, k))
-               return false;
+       ret = try_disable_dsc(state, dc_link, params, vars, count, k);
+       if (ret < 0)
+               return ret;
 
        set_dsc_configs_from_fairness_vars(params, vars, count, k);
 
-       return true;
+       return 0;
 }
 
 static bool is_dsc_need_re_compute(
@@ -1087,15 +1109,17 @@ static bool is_dsc_need_re_compute(
        return is_dsc_need_re_compute;
 }
 
-bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
-                                      struct dc_state *dc_state,
-                                      struct dsc_mst_fairness_vars *vars)
+int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+                                     struct dc_state *dc_state,
+                                     struct dsc_mst_fairness_vars *vars)
 {
        int i, j;
        struct dc_stream_state *stream;
        bool computed_streams[MAX_PIPES];
        struct amdgpu_dm_connector *aconnector;
+       struct drm_dp_mst_topology_mgr *mst_mgr;
        int link_vars_start_index = 0;
+       int ret = 0;
 
        for (i = 0; i < dc_state->stream_count; i++)
                computed_streams[i] = false;
@@ -1108,7 +1132,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
 
                aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
 
-               if (!aconnector || !aconnector->dc_sink)
+               if (!aconnector || !aconnector->dc_sink || !aconnector->port)
                        continue;
 
                if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
@@ -1118,19 +1142,16 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
                        continue;
 
                if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
-                       return false;
+                       return -EINVAL;
 
                if (!is_dsc_need_re_compute(state, dc_state, stream->link))
                        continue;
 
-               mutex_lock(&aconnector->mst_mgr.lock);
-               if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
-                                                     &aconnector->mst_mgr,
-                                                     &link_vars_start_index)) {
-                       mutex_unlock(&aconnector->mst_mgr.lock);
-                       return false;
-               }
-               mutex_unlock(&aconnector->mst_mgr.lock);
+               mst_mgr = aconnector->port->mgr;
+               ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
+                                                      &link_vars_start_index);
+               if (ret != 0)
+                       return ret;
 
                for (j = 0; j < dc_state->stream_count; j++) {
                        if (dc_state->streams[j]->link == stream->link)
@@ -1143,22 +1164,23 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
 
                if (stream->timing.flags.DSC == 1)
                        if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
-                               return false;
+                               return -EINVAL;
        }
 
-       return true;
+       return ret;
 }
 
-static bool
-       pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
-                                             struct dc_state *dc_state,
-                                             struct dsc_mst_fairness_vars *vars)
+static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+                                                struct dc_state *dc_state,
+                                                struct dsc_mst_fairness_vars *vars)
 {
        int i, j;
        struct dc_stream_state *stream;
        bool computed_streams[MAX_PIPES];
        struct amdgpu_dm_connector *aconnector;
+       struct drm_dp_mst_topology_mgr *mst_mgr;
        int link_vars_start_index = 0;
+       int ret = 0;
 
        for (i = 0; i < dc_state->stream_count; i++)
                computed_streams[i] = false;
@@ -1171,7 +1193,7 @@ static bool
 
                aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
 
-               if (!aconnector || !aconnector->dc_sink)
+               if (!aconnector || !aconnector->dc_sink || !aconnector->port)
                        continue;
 
                if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
@@ -1183,14 +1205,11 @@ static bool
                if (!is_dsc_need_re_compute(state, dc_state, stream->link))
                        continue;
 
-               mutex_lock(&aconnector->mst_mgr.lock);
-               if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
-                                                     &aconnector->mst_mgr,
-                                                     &link_vars_start_index)) {
-                       mutex_unlock(&aconnector->mst_mgr.lock);
-                       return false;
-               }
-               mutex_unlock(&aconnector->mst_mgr.lock);
+               mst_mgr = aconnector->port->mgr;
+               ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
+                                                      &link_vars_start_index);
+               if (ret != 0)
+                       return ret;
 
                for (j = 0; j < dc_state->stream_count; j++) {
                        if (dc_state->streams[j]->link == stream->link)
@@ -1198,7 +1217,7 @@ static bool
                }
        }
 
-       return true;
+       return ret;
 }
 
 static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state,
@@ -1253,9 +1272,9 @@ static bool is_dsc_precompute_needed(struct drm_atomic_state *state)
        return ret;
 }
 
-bool pre_validate_dsc(struct drm_atomic_state *state,
-                     struct dm_atomic_state **dm_state_ptr,
-                     struct dsc_mst_fairness_vars *vars)
+int pre_validate_dsc(struct drm_atomic_state *state,
+                    struct dm_atomic_state **dm_state_ptr,
+                    struct dsc_mst_fairness_vars *vars)
 {
        int i;
        struct dm_atomic_state *dm_state;
@@ -1264,11 +1283,12 @@ bool pre_validate_dsc(struct drm_atomic_state *state,
 
        if (!is_dsc_precompute_needed(state)) {
                DRM_INFO_ONCE("DSC precompute is not needed.\n");
-               return true;
+               return 0;
        }
-       if (dm_atomic_get_state(state, dm_state_ptr)) {
+       ret = dm_atomic_get_state(state, dm_state_ptr);
+       if (ret != 0) {
                DRM_INFO_ONCE("dm_atomic_get_state() failed\n");
-               return false;
+               return ret;
        }
        dm_state = *dm_state_ptr;
 
@@ -1280,7 +1300,7 @@ bool pre_validate_dsc(struct drm_atomic_state *state,
 
        local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL);
        if (!local_dc_state)
-               return false;
+               return -ENOMEM;
 
        for (i = 0; i < local_dc_state->stream_count; i++) {
                struct dc_stream_state *stream = dm_state->context->streams[i];
@@ -1316,9 +1336,9 @@ bool pre_validate_dsc(struct drm_atomic_state *state,
        if (ret != 0)
                goto clean_exit;
 
-       if (!pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars)) {
+       ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
+       if (ret != 0) {
                DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
-               ret = -EINVAL;
                goto clean_exit;
        }
 
@@ -1349,7 +1369,7 @@ clean_exit:
 
        kfree(local_dc_state);
 
-       return (ret == 0);
+       return ret;
 }
 
 static unsigned int kbps_from_pbn(unsigned int pbn)
@@ -1392,6 +1412,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
        unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
        unsigned int max_compressed_bw_in_kbps = 0;
        struct dc_dsc_bw_range bw_range = {0};
+       struct drm_dp_mst_topology_mgr *mst_mgr;
 
        /*
         * check if the mode could be supported if DSC pass-through is supported
@@ -1400,7 +1421,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
         */
        if (is_dsc_common_config_possible(stream, &bw_range) &&
            aconnector->port->passthrough_aux) {
-               mutex_lock(&aconnector->mst_mgr.lock);
+               mst_mgr = aconnector->port->mgr;
+               mutex_lock(&mst_mgr->lock);
 
                cur_link_settings = stream->link->verified_link_cap;
 
@@ -1413,7 +1435,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
                end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
                                            down_link_bw_in_kbps);
 
-               mutex_unlock(&aconnector->mst_mgr.lock);
+               mutex_unlock(&mst_mgr->lock);
 
                /*
                 * use the maximum dsc compression bandwidth as the required
index b92a7c5..97fd70d 100644 (file)
@@ -53,15 +53,15 @@ struct dsc_mst_fairness_vars {
        struct amdgpu_dm_connector *aconnector;
 };
 
-bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
-                                      struct dc_state *dc_state,
-                                      struct dsc_mst_fairness_vars *vars);
+int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
+                                     struct dc_state *dc_state,
+                                     struct dsc_mst_fairness_vars *vars);
 
 bool needs_dsc_aux_workaround(struct dc_link *link);
 
-bool pre_validate_dsc(struct drm_atomic_state *state,
-                     struct dm_atomic_state **dm_state_ptr,
-                     struct dsc_mst_fairness_vars *vars);
+int pre_validate_dsc(struct drm_atomic_state *state,
+                    struct dm_atomic_state **dm_state_ptr,
+                    struct dsc_mst_fairness_vars *vars);
 
 enum dc_status dm_dp_mst_is_port_support_mode(
        struct amdgpu_dm_connector *aconnector,
index ee0456b..e0c8d6f 100644 (file)
@@ -2393,6 +2393,26 @@ static enum bp_result get_vram_info_v25(
        return result;
 }
 
+static enum bp_result get_vram_info_v30(
+       struct bios_parser *bp,
+       struct dc_vram_info *info)
+{
+       struct atom_vram_info_header_v3_0 *info_v30;
+       enum bp_result result = BP_RESULT_OK;
+
+       info_v30 = GET_IMAGE(struct atom_vram_info_header_v3_0,
+                                               DATA_TABLES(vram_info));
+
+       if (info_v30 == NULL)
+               return BP_RESULT_BADBIOSTABLE;
+
+       info->num_chans = info_v30->channel_num;
+       info->dram_channel_width_bytes = (1 << info_v30->channel_width) / 8;
+
+       return result;
+}
+
+
 /*
  * get_integrated_info_v11
  *
@@ -3060,6 +3080,16 @@ static enum bp_result bios_parser_get_vram_info(
                        }
                        break;
 
+               case 3:
+                       switch (revision.minor) {
+                       case 0:
+                               result = get_vram_info_v30(bp, info);
+                               break;
+                       default:
+                               break;
+                       }
+                       break;
+
                default:
                        return result;
                }
index ef0795b..2db5956 100644 (file)
@@ -123,9 +123,10 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
        uint32_t result;
 
        result = dcn314_smu_wait_for_response(clk_mgr, 10, 200000);
-       ASSERT(result == VBIOSSMC_Result_OK);
 
-       smu_print("SMU response after wait: %d\n", result);
+       if (result != VBIOSSMC_Result_OK)
+               smu_print("SMU Response was not OK. SMU response after wait received is: %d\n",
+                               result);
 
        if (result == VBIOSSMC_Status_BUSY)
                return -1;
@@ -216,6 +217,12 @@ int dcn314_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
                        VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
                        khz_to_mhz_ceil(requested_dcfclk_khz));
 
+#ifdef DBG
+       smu_print("actual_dcfclk_set_mhz %d is set to : %d\n",
+                       actual_dcfclk_set_mhz,
+                       actual_dcfclk_set_mhz * 1000);
+#endif
+
        return actual_dcfclk_set_mhz * 1000;
 }
 
index 1b70b78..af63108 100644 (file)
@@ -359,7 +359,8 @@ static const struct dce_audio_registers audio_regs[] = {
        audio_regs(2),
        audio_regs(3),
        audio_regs(4),
-       audio_regs(5)
+       audio_regs(5),
+       audio_regs(6),
 };
 
 #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
index b9765b3..ef52e6b 100644 (file)
@@ -436,34 +436,48 @@ void dpp1_set_cursor_position(
                uint32_t height)
 {
        struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
-       int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
-       int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
+       int x_pos = pos->x - param->viewport.x;
+       int y_pos = pos->y - param->viewport.y;
+       int x_hotspot = pos->x_hotspot;
+       int y_hotspot = pos->y_hotspot;
+       int src_x_offset = x_pos - pos->x_hotspot;
+       int src_y_offset = y_pos - pos->y_hotspot;
+       int cursor_height = (int)height;
+       int cursor_width = (int)width;
        uint32_t cur_en = pos->enable ? 1 : 0;
 
-       // Cursor width/height and hotspots need to be rotated for offset calculation
+       // Transform cursor width / height and hotspots for offset calculations
        if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
-               swap(width, height);
+               swap(cursor_height, cursor_width);
+               swap(x_hotspot, y_hotspot);
+
                if (param->rotation == ROTATION_ANGLE_90) {
-                       src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
-                       src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
+                       // hotspot = (-y, x)
+                       src_x_offset = x_pos - (cursor_width - x_hotspot);
+                       src_y_offset = y_pos - y_hotspot;
+               } else if (param->rotation == ROTATION_ANGLE_270) {
+                       // hotspot = (y, -x)
+                       src_x_offset = x_pos - x_hotspot;
+                       src_y_offset = y_pos - (cursor_height - y_hotspot);
                }
        } else if (param->rotation == ROTATION_ANGLE_180) {
+               // hotspot = (-x, -y)
                if (!param->mirror)
-                       src_x_offset = pos->x - param->viewport.x;
+                       src_x_offset = x_pos - (cursor_width - x_hotspot);
 
-               src_y_offset = pos->y - param->viewport.y;
+               src_y_offset = y_pos - (cursor_height - y_hotspot);
        }
 
        if (src_x_offset >= (int)param->viewport.width)
                cur_en = 0;  /* not visible beyond right edge*/
 
-       if (src_x_offset + (int)width <= 0)
+       if (src_x_offset + cursor_width <= 0)
                cur_en = 0;  /* not visible beyond left edge*/
 
        if (src_y_offset >= (int)param->viewport.height)
                cur_en = 0;  /* not visible beyond bottom edge*/
 
-       if (src_y_offset + (int)height <= 0)
+       if (src_y_offset + cursor_height <= 0)
                cur_en = 0;  /* not visible beyond top edge*/
 
        REG_UPDATE(CURSOR0_CONTROL,
index 52e201e..a142a00 100644 (file)
@@ -1179,10 +1179,12 @@ void hubp1_cursor_set_position(
                const struct dc_cursor_mi_param *param)
 {
        struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
-       int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
-       int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
+       int x_pos = pos->x - param->viewport.x;
+       int y_pos = pos->y - param->viewport.y;
        int x_hotspot = pos->x_hotspot;
        int y_hotspot = pos->y_hotspot;
+       int src_x_offset = x_pos - pos->x_hotspot;
+       int src_y_offset = y_pos - pos->y_hotspot;
        int cursor_height = (int)hubp->curs_attr.height;
        int cursor_width = (int)hubp->curs_attr.width;
        uint32_t dst_x_offset;
@@ -1200,18 +1202,26 @@ void hubp1_cursor_set_position(
        if (hubp->curs_attr.address.quad_part == 0)
                return;
 
-       // Rotated cursor width/height and hotspots tweaks for offset calculation
+       // Transform cursor width / height and hotspots for offset calculations
        if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
                swap(cursor_height, cursor_width);
+               swap(x_hotspot, y_hotspot);
+
                if (param->rotation == ROTATION_ANGLE_90) {
-                       src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
-                       src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
+                       // hotspot = (-y, x)
+                       src_x_offset = x_pos - (cursor_width - x_hotspot);
+                       src_y_offset = y_pos - y_hotspot;
+               } else if (param->rotation == ROTATION_ANGLE_270) {
+                       // hotspot = (y, -x)
+                       src_x_offset = x_pos - x_hotspot;
+                       src_y_offset = y_pos - (cursor_height - y_hotspot);
                }
        } else if (param->rotation == ROTATION_ANGLE_180) {
+               // hotspot = (-x, -y)
                if (!param->mirror)
-                       src_x_offset = pos->x - param->viewport.x;
+                       src_x_offset = x_pos - (cursor_width - x_hotspot);
 
-               src_y_offset = pos->y - param->viewport.y;
+               src_y_offset = y_pos - (cursor_height - y_hotspot);
        }
 
        dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
@@ -1248,8 +1258,8 @@ void hubp1_cursor_set_position(
                        CURSOR_Y_POSITION, pos->y);
 
        REG_SET_2(CURSOR_HOT_SPOT, 0,
-                       CURSOR_HOT_SPOT_X, x_hotspot,
-                       CURSOR_HOT_SPOT_Y, y_hotspot);
+                       CURSOR_HOT_SPOT_X, pos->x_hotspot,
+                       CURSOR_HOT_SPOT_Y, pos->y_hotspot);
 
        REG_SET(CURSOR_DST_OFFSET, 0,
                        CURSOR_DST_X_OFFSET, dst_x_offset);
index 938dba5..4566bc7 100644 (file)
@@ -973,10 +973,12 @@ void hubp2_cursor_set_position(
                const struct dc_cursor_mi_param *param)
 {
        struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
-       int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
-       int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
+       int x_pos = pos->x - param->viewport.x;
+       int y_pos = pos->y - param->viewport.y;
        int x_hotspot = pos->x_hotspot;
        int y_hotspot = pos->y_hotspot;
+       int src_x_offset = x_pos - pos->x_hotspot;
+       int src_y_offset = y_pos - pos->y_hotspot;
        int cursor_height = (int)hubp->curs_attr.height;
        int cursor_width = (int)hubp->curs_attr.width;
        uint32_t dst_x_offset;
@@ -994,18 +996,26 @@ void hubp2_cursor_set_position(
        if (hubp->curs_attr.address.quad_part == 0)
                return;
 
-       // Rotated cursor width/height and hotspots tweaks for offset calculation
+       // Transform cursor width / height and hotspots for offset calculations
        if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
                swap(cursor_height, cursor_width);
+               swap(x_hotspot, y_hotspot);
+
                if (param->rotation == ROTATION_ANGLE_90) {
-                       src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
-                       src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
+                       // hotspot = (-y, x)
+                       src_x_offset = x_pos - (cursor_width - x_hotspot);
+                       src_y_offset = y_pos - y_hotspot;
+               } else if (param->rotation == ROTATION_ANGLE_270) {
+                       // hotspot = (y, -x)
+                       src_x_offset = x_pos - x_hotspot;
+                       src_y_offset = y_pos - (cursor_height - y_hotspot);
                }
        } else if (param->rotation == ROTATION_ANGLE_180) {
+               // hotspot = (-x, -y)
                if (!param->mirror)
-                       src_x_offset = pos->x - param->viewport.x;
+                       src_x_offset = x_pos - (cursor_width - x_hotspot);
 
-               src_y_offset = pos->y - param->viewport.y;
+               src_y_offset = y_pos - (cursor_height - y_hotspot);
        }
 
        dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
@@ -1042,8 +1052,8 @@ void hubp2_cursor_set_position(
                        CURSOR_Y_POSITION, pos->y);
 
        REG_SET_2(CURSOR_HOT_SPOT, 0,
-                       CURSOR_HOT_SPOT_X, x_hotspot,
-                       CURSOR_HOT_SPOT_Y, y_hotspot);
+                       CURSOR_HOT_SPOT_X, pos->x_hotspot,
+                       CURSOR_HOT_SPOT_Y, pos->y_hotspot);
 
        REG_SET(CURSOR_DST_OFFSET, 0,
                        CURSOR_DST_X_OFFSET, dst_x_offset);
@@ -1052,8 +1062,8 @@ void hubp2_cursor_set_position(
        hubp->pos.cur_ctl.bits.cur_enable = cur_en;
        hubp->pos.position.bits.x_pos = pos->x;
        hubp->pos.position.bits.y_pos = pos->y;
-       hubp->pos.hot_spot.bits.x_hot = x_hotspot;
-       hubp->pos.hot_spot.bits.y_hot = y_hotspot;
+       hubp->pos.hot_spot.bits.x_hot = pos->x_hotspot;
+       hubp->pos.hot_spot.bits.y_hot = pos->y_hotspot;
        hubp->pos.dst_offset.bits.dst_x_offset = dst_x_offset;
        /* Cursor Rectangle Cache
         * Cursor bitmaps have different hotspot values
index 84e1486..39a57bc 100644 (file)
@@ -87,6 +87,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
        .hubp_init = hubp3_init,
        .set_unbounded_requesting = hubp31_set_unbounded_requesting,
        .hubp_soft_reset = hubp31_soft_reset,
+       .hubp_set_flip_int = hubp1_set_flip_int,
        .hubp_in_blank = hubp1_in_blank,
        .program_extended_blank = hubp31_program_extended_blank,
 };
index 1bd7e0f..389a893 100644 (file)
@@ -96,6 +96,13 @@ static void dccg314_set_pixel_rate_div(
        struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
        enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
 
+       // Don't program 0xF into the register field. Not valid since
+       // K1 / K2 field is only 1 / 2 bits wide
+       if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA) {
+               BREAK_TO_DEBUGGER();
+               return;
+       }
+
        dccg314_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
        if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA || (k1 == cur_k1 && k2 == cur_k2))
                return;
index 588c1c7..a074179 100644 (file)
@@ -348,10 +348,8 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
        two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
        odm_combine_factor = get_odm_config(pipe_ctx, NULL);
 
-       if (pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
-               return odm_combine_factor;
-
        if (is_dp_128b_132b_signal(pipe_ctx)) {
+               *k1_div = PIXEL_RATE_DIV_BY_1;
                *k2_div = PIXEL_RATE_DIV_BY_1;
        } else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
                *k1_div = PIXEL_RATE_DIV_BY_1;
@@ -359,7 +357,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
                        *k2_div = PIXEL_RATE_DIV_BY_2;
                else
                        *k2_div = PIXEL_RATE_DIV_BY_4;
-       } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+       } else if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
                if (two_pix_per_container) {
                        *k1_div = PIXEL_RATE_DIV_BY_1;
                        *k2_div = PIXEL_RATE_DIV_BY_2;
index 47eb162..7dd36e4 100644 (file)
@@ -237,7 +237,7 @@ static struct timing_generator_funcs dcn314_tg_funcs = {
                .clear_optc_underflow = optc1_clear_optc_underflow,
                .setup_global_swap_lock = NULL,
                .get_crc = optc1_get_crc,
-               .configure_crc = optc2_configure_crc,
+               .configure_crc = optc1_configure_crc,
                .set_dsc_config = optc3_set_dsc_config,
                .get_dsc_status = optc2_get_dsc_status,
                .set_dwb_source = NULL,
index e4daed4..df4f251 100644 (file)
@@ -96,8 +96,10 @@ static void dccg32_set_pixel_rate_div(
 
        // Don't program 0xF into the register field. Not valid since
        // K1 / K2 field is only 1 / 2 bits wide
-       if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA)
+       if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA) {
+               BREAK_TO_DEBUGGER();
                return;
+       }
 
        dccg32_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
        if (k1 == cur_k1 && k2 == cur_k2)
index cf5bd97..d0b46a3 100644 (file)
@@ -283,8 +283,7 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
                        using the max for calculation */
 
                if (hubp->curs_attr.width > 0) {
-                               // Round cursor width to next multiple of 64
-                               cursor_size = (((hubp->curs_attr.width + 63) / 64) * 64) * hubp->curs_attr.height;
+                               cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
 
                                switch (pipe->stream->cursor_attributes.color_format) {
                                case CURSOR_MODE_MONO:
@@ -309,9 +308,9 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
                                                cursor_size > 16384) {
                                        /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1)
                                         */
-                                       cache_lines_used += (((hubp->curs_attr.width * hubp->curs_attr.height * cursor_bpp +
-                                                                               DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES) *
-                                                                               DCN3_2_MALL_MBLK_SIZE_BYTES) / dc->caps.cache_line_size + 2;
+                                       cache_lines_used += (((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) /
+                                                       DCN3_2_MALL_MBLK_SIZE_BYTES) * DCN3_2_MALL_MBLK_SIZE_BYTES) /
+                                                       dc->caps.cache_line_size + 2;
                                }
                                break;
                        }
@@ -727,10 +726,7 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
                struct hubp *hubp = pipe->plane_res.hubp;
 
                if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
-                       //Round cursor width up to next multiple of 64
-                       int cursor_width = ((hubp->curs_attr.width + 63) / 64) * 64;
-                       int cursor_height = hubp->curs_attr.height;
-                       int cursor_size = cursor_width * cursor_height;
+                       int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
 
                        switch (hubp->curs_attr.color_format) {
                        case CURSOR_MODE_MONO:
@@ -1175,10 +1171,8 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
        two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
        odm_combine_factor = get_odm_config(pipe_ctx, NULL);
 
-       if (pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
-               return odm_combine_factor;
-
        if (is_dp_128b_132b_signal(pipe_ctx)) {
+               *k1_div = PIXEL_RATE_DIV_BY_1;
                *k2_div = PIXEL_RATE_DIV_BY_1;
        } else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
                *k1_div = PIXEL_RATE_DIV_BY_1;
index b03a781..fa37788 100644 (file)
@@ -111,7 +111,7 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
                        mall_alloc_width_blk_aligned = full_vp_width_blk_aligned;
 
                        /* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */
-                       mall_alloc_height_blk_aligned = (pipe->stream->timing.v_addressable - 1 + mblk_height - 1) /
+                       mall_alloc_height_blk_aligned = (pipe->plane_res.scl_data.viewport.height - 1 + mblk_height - 1) /
                                        mblk_height * mblk_height + mblk_height;
 
                        /* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c;
index 659323e..2abe396 100644 (file)
@@ -157,7 +157,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
        .dispclk_dppclk_vco_speed_mhz = 4300.0,
        .do_urgent_latency_adjustment = true,
        .urgent_latency_adjustment_fabric_clock_component_us = 1.0,
-       .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
+       .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
 };
 
 void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
@@ -211,7 +211,7 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
        /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
        if (clk_mgr->base.ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
                clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].valid = true;
-               clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 38;
+               clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50;
                clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us;
                clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
                clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
@@ -221,7 +221,7 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
                clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
                clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
                clk_mgr->base.bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz * 16;
-               clk_mgr->base.bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 38;
+               clk_mgr->base.bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50;
                clk_mgr->base.bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[1].memclk_mhz * 16;
                clk_mgr->base.bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9;
                clk_mgr->base.bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz * 16;
@@ -1803,6 +1803,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
                         */
                        context->bw_ctx.dml.soc.dram_clock_change_latency_us =
                                        dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+                       /* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so
+                        * prefetch is scheduled correctly to account for dummy pstate.
+                        */
+                       if (dummy_latency_index == 0)
+                               context->bw_ctx.dml.soc.fclk_change_latency_us =
+                                               dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
                        dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
                        maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
                        dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
@@ -1904,7 +1910,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
 
                if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] ==
                        dm_dram_clock_change_unsupported) {
-                       int min_dram_speed_mts_offset = dc->clk_mgr->bw_params->clk_table.num_entries - 1;
+                       int min_dram_speed_mts_offset = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1;
 
                        min_dram_speed_mts =
                                dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16;
@@ -1990,6 +1996,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
 
        context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
 
+       if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && dummy_latency_index == 0)
+               context->bw_ctx.dml.soc.fclk_change_latency_us =
+                               dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
+
        dcn32_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
 
        if (!pstate_en)
@@ -1997,8 +2007,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
                context->bw_ctx.dml.soc.dram_clock_change_latency_us =
                                dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
 
-       if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
+       if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
                dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(dc, context);
+               if (dummy_latency_index == 0)
+                       context->bw_ctx.dml.soc.fclk_change_latency_us =
+                                       dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
+       }
 }
 
 static void dcn32_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
index 244fd15..9afd9ba 100644 (file)
@@ -718,6 +718,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
 
        do {
                MaxTotalRDBandwidth = 0;
+               DestinationLineTimesForPrefetchLessThan2 = false;
+               VRatioPrefetchMoreThanMax = false;
 #ifdef __DML_VBA_DEBUG__
                dml_print("DML::%s: Start loop: VStartup = %d\n", __func__, mode_lib->vba.VStartupLines);
 #endif
index f82e14c..c8b28c8 100644 (file)
@@ -46,6 +46,8 @@
 // Prefetch schedule max vratio
 #define __DML_MAX_VRATIO_PRE__ 4.0
 
+#define __DML_VBA_MAX_DST_Y_PRE__    63.75
+
 #define BPP_INVALID 0
 #define BPP_BLENDED_PIPE 0xffffffff
 
index 635fc54..debe46b 100644 (file)
@@ -3475,7 +3475,6 @@ bool dml32_CalculatePrefetchSchedule(
        double  min_Lsw;
        double  Tsw_est1 = 0;
        double  Tsw_est3 = 0;
-       double  TPreMargin = 0;
 
        if (v->GPUVMEnable == true && v->HostVMEnable == true)
                HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
@@ -3669,6 +3668,7 @@ bool dml32_CalculatePrefetchSchedule(
        dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime -
                        (*DSTYAfterScaler + (double) *DSTXAfterScaler / (double) myPipe->HTotal);
 
+       dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, __DML_VBA_MAX_DST_Y_PRE__);
 #ifdef __DML_VBA_DEBUG__
        dml_print("DML::%s: HTotal = %d\n", __func__, myPipe->HTotal);
        dml_print("DML::%s: min_Lsw = %f\n", __func__, min_Lsw);
@@ -3701,8 +3701,6 @@ bool dml32_CalculatePrefetchSchedule(
 
        dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
        Tpre_rounded = dst_y_prefetch_equ * LineTime;
-
-       TPreMargin = Tpre_rounded - TPreReq;
 #ifdef __DML_VBA_DEBUG__
        dml_print("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, dst_y_prefetch_equ);
        dml_print("DML::%s: LineTime: %f\n", __func__, LineTime);
@@ -3730,7 +3728,8 @@ bool dml32_CalculatePrefetchSchedule(
        *VRatioPrefetchY = 0;
        *VRatioPrefetchC = 0;
        *RequiredPrefetchPixDataBWLuma = 0;
-       if (dst_y_prefetch_equ > 1 && TPreMargin > 0.0) {
+       if (dst_y_prefetch_equ > 1 &&
+                       (Tpre_rounded >= TPreReq || dst_y_prefetch_equ == __DML_VBA_MAX_DST_Y_PRE__)) {
                double PrefetchBandwidth1;
                double PrefetchBandwidth2;
                double PrefetchBandwidth3;
index 432b4ec..f4b1765 100644 (file)
@@ -126,9 +126,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
        .sr_enter_plus_exit_z8_time_us = 320,
        .writeback_latency_us = 12.0,
        .round_trip_ping_latency_dcfclk_cycles = 263,
-       .urgent_latency_pixel_data_only_us = 9.35,
-       .urgent_latency_pixel_mixed_with_vm_data_us = 9.35,
-       .urgent_latency_vm_data_only_us = 9.35,
+       .urgent_latency_pixel_data_only_us = 4,
+       .urgent_latency_pixel_mixed_with_vm_data_us = 4,
+       .urgent_latency_vm_data_only_us = 4,
        .fclk_change_latency_us = 20,
        .usr_retraining_latency_us = 2,
        .smn_latency_us = 2,
@@ -156,7 +156,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
        .dispclk_dppclk_vco_speed_mhz = 4300.0,
        .do_urgent_latency_adjustment = true,
        .urgent_latency_adjustment_fabric_clock_component_us = 1.0,
-       .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
+       .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
 };
 
 static void get_optimal_ntuple(struct _vcs_dpi_voltage_scaling_st *entry)
index 4fe75dd..b880f4d 100644 (file)
@@ -1156,22 +1156,21 @@ static int smu_smc_hw_setup(struct smu_context *smu)
        uint64_t features_supported;
        int ret = 0;
 
-       if (adev->in_suspend && smu_is_dpm_running(smu)) {
-               dev_info(adev->dev, "dpm has been enabled\n");
-               /* this is needed specifically */
-               switch (adev->ip_versions[MP1_HWIP][0]) {
-               case IP_VERSION(11, 0, 7):
-               case IP_VERSION(11, 0, 11):
-               case IP_VERSION(11, 5, 0):
-               case IP_VERSION(11, 0, 12):
+       switch (adev->ip_versions[MP1_HWIP][0]) {
+       case IP_VERSION(11, 0, 7):
+       case IP_VERSION(11, 0, 11):
+       case IP_VERSION(11, 5, 0):
+       case IP_VERSION(11, 0, 12):
+               if (adev->in_suspend && smu_is_dpm_running(smu)) {
+                       dev_info(adev->dev, "dpm has been enabled\n");
                        ret = smu_system_features_control(smu, true);
                        if (ret)
                                dev_err(adev->dev, "Failed system features control!\n");
-                       break;
-               default:
-                       break;
+                       return ret;
                }
-               return ret;
+               break;
+       default:
+               break;
        }
 
        ret = smu_init_display_count(smu, 0);
index e2fa3b0..f816b1d 100644 (file)
@@ -1388,6 +1388,14 @@ enum smu_cmn2asic_mapping_type {
        CMN2ASIC_MAPPING_WORKLOAD,
 };
 
+enum smu_baco_seq {
+       BACO_SEQ_BACO = 0,
+       BACO_SEQ_MSR,
+       BACO_SEQ_BAMACO,
+       BACO_SEQ_ULPS,
+       BACO_SEQ_COUNT,
+};
+
 #define MSG_MAP(msg, index, valid_in_vf) \
        [SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
 
index 25c08f9..d6b1393 100644 (file)
 
 // *** IMPORTANT ***
 // PMFW TEAM: Always increment the interface version on any change to this file
-#define SMU13_DRIVER_IF_VERSION  0x2C
+#define SMU13_DRIVER_IF_VERSION  0x35
 
 //Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x20
+#define PPTABLE_VERSION 0x27
 
 #define NUM_GFXCLK_DPM_LEVELS    16
 #define NUM_SOCCLK_DPM_LEVELS    8
@@ -96,7 +96,7 @@
 #define FEATURE_MEM_TEMP_READ_BIT             47
 #define FEATURE_ATHUB_MMHUB_PG_BIT            48
 #define FEATURE_SOC_PCC_BIT                   49
-#define FEATURE_SPARE_50_BIT                  50
+#define FEATURE_EDC_PWRBRK_BIT                50
 #define FEATURE_SPARE_51_BIT                  51
 #define FEATURE_SPARE_52_BIT                  52
 #define FEATURE_SPARE_53_BIT                  53
@@ -282,15 +282,15 @@ typedef enum {
 } I2cControllerPort_e;
 
 typedef enum {
-  I2C_CONTROLLER_NAME_VR_GFX = 0,
-  I2C_CONTROLLER_NAME_VR_SOC,
-  I2C_CONTROLLER_NAME_VR_VMEMP,
-  I2C_CONTROLLER_NAME_VR_VDDIO,
-  I2C_CONTROLLER_NAME_LIQUID0,
-  I2C_CONTROLLER_NAME_LIQUID1,
-  I2C_CONTROLLER_NAME_PLX,
-  I2C_CONTROLLER_NAME_OTHER,
-  I2C_CONTROLLER_NAME_COUNT,
+       I2C_CONTROLLER_NAME_VR_GFX = 0,
+       I2C_CONTROLLER_NAME_VR_SOC,
+       I2C_CONTROLLER_NAME_VR_VMEMP,
+       I2C_CONTROLLER_NAME_VR_VDDIO,
+       I2C_CONTROLLER_NAME_LIQUID0,
+       I2C_CONTROLLER_NAME_LIQUID1,
+       I2C_CONTROLLER_NAME_PLX,
+       I2C_CONTROLLER_NAME_FAN_INTAKE,
+       I2C_CONTROLLER_NAME_COUNT,
 } I2cControllerName_e;
 
 typedef enum {
@@ -302,6 +302,7 @@ typedef enum {
   I2C_CONTROLLER_THROTTLER_LIQUID0,
   I2C_CONTROLLER_THROTTLER_LIQUID1,
   I2C_CONTROLLER_THROTTLER_PLX,
+  I2C_CONTROLLER_THROTTLER_FAN_INTAKE,
   I2C_CONTROLLER_THROTTLER_INA3221,
   I2C_CONTROLLER_THROTTLER_COUNT,
 } I2cControllerThrottler_e;
@@ -309,8 +310,9 @@ typedef enum {
 typedef enum {
   I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5,
   I2C_CONTROLLER_PROTOCOL_VR_IR35217,
-  I2C_CONTROLLER_PROTOCOL_TMP_TMP102A,
+  I2C_CONTROLLER_PROTOCOL_TMP_MAX31875,
   I2C_CONTROLLER_PROTOCOL_INA3221,
+  I2C_CONTROLLER_PROTOCOL_TMP_MAX6604,
   I2C_CONTROLLER_PROTOCOL_COUNT,
 } I2cControllerProtocol_e;
 
@@ -690,6 +692,9 @@ typedef struct {
 #define PP_OD_FEATURE_UCLK_BIT      8
 #define PP_OD_FEATURE_ZERO_FAN_BIT      9
 #define PP_OD_FEATURE_TEMPERATURE_BIT 10
+#define PP_OD_FEATURE_POWER_FEATURE_CTRL_BIT 11
+#define PP_OD_FEATURE_ASIC_TDC_BIT 12
+#define PP_OD_FEATURE_COUNT 13
 
 typedef enum {
   PP_OD_POWER_FEATURE_ALWAYS_ENABLED,
@@ -697,6 +702,11 @@ typedef enum {
   PP_OD_POWER_FEATURE_ALWAYS_DISABLED,
 } PP_OD_POWER_FEATURE_e;
 
+typedef enum {
+  FAN_MODE_AUTO = 0,
+  FAN_MODE_MANUAL_LINEAR,
+} FanMode_e;
+
 typedef struct {
   uint32_t FeatureCtrlMask;
 
@@ -708,8 +718,8 @@ typedef struct {
   uint8_t                RuntimePwrSavingFeaturesCtrl;
 
   //Frequency changes
-  int16_t               GfxclkFmin;           // MHz
-  int16_t               GfxclkFmax;           // MHz
+  int16_t                GfxclkFmin;           // MHz
+  int16_t                GfxclkFmax;           // MHz
   uint16_t               UclkFmin;             // MHz
   uint16_t               UclkFmax;             // MHz
 
@@ -730,7 +740,12 @@ typedef struct {
   uint8_t                MaxOpTemp;
   uint8_t                Padding[4];
 
-  uint32_t               Spare[12];
+  uint16_t               GfxVoltageFullCtrlMode;
+  uint16_t               GfxclkFullCtrlMode;
+  uint16_t               UclkFullCtrlMode;
+  int16_t                AsicTdc;
+
+  uint32_t               Spare[10];
   uint32_t               MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
 } OverDriveTable_t;
 
@@ -748,8 +763,8 @@ typedef struct {
   uint8_t                IdlePwrSavingFeaturesCtrl;
   uint8_t                RuntimePwrSavingFeaturesCtrl;
 
-  uint16_t               GfxclkFmin;           // MHz
-  uint16_t               GfxclkFmax;           // MHz
+  int16_t                GfxclkFmin;           // MHz
+  int16_t                GfxclkFmax;           // MHz
   uint16_t               UclkFmin;             // MHz
   uint16_t               UclkFmax;             // MHz
 
@@ -769,7 +784,12 @@ typedef struct {
   uint8_t                MaxOpTemp;
   uint8_t                Padding[4];
 
-  uint32_t               Spare[12];
+  uint16_t               GfxVoltageFullCtrlMode;
+  uint16_t               GfxclkFullCtrlMode;
+  uint16_t               UclkFullCtrlMode;
+  int16_t                AsicTdc;
+
+  uint32_t               Spare[10];
 
 } OverDriveLimits_t;
 
@@ -903,7 +923,8 @@ typedef struct {
   uint16_t  FanStartTempMin;
   uint16_t  FanStartTempMax;
 
-  uint32_t Spare[12];
+  uint16_t  PowerMinPpt0[POWER_SOURCE_COUNT];
+  uint32_t  Spare[11];
 
 } MsgLimits_t;
 
@@ -1086,11 +1107,13 @@ typedef struct {
   uint32_t        GfxoffSpare[15];
 
   // GFX GPO
-  float           DfllBtcMasterScalerM;
+  uint32_t        DfllBtcMasterScalerM;
   int32_t         DfllBtcMasterScalerB;
-  float           DfllBtcSlaveScalerM;
+  uint32_t        DfllBtcSlaveScalerM;
   int32_t         DfllBtcSlaveScalerB;
-  uint32_t        GfxGpoSpare[12];
+  uint32_t        DfllPccAsWaitCtrl; //GDFLL_AS_WAIT_CTRL_PCC register value to be passed to RLC msg
+  uint32_t        DfllPccAsStepCtrl; //GDFLL_AS_STEP_CTRL_PCC register value to be passed to RLC msg
+  uint32_t        GfxGpoSpare[10];
 
   // GFX DCS
 
@@ -1106,7 +1129,10 @@ typedef struct {
   uint16_t        DcsTimeout;           //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin.
 
 
-  uint32_t        DcsSpare[16];
+  uint32_t        DcsSpare[14];
+
+  // UCLK section
+  uint16_t     ShadowFreqTableUclk[NUM_UCLK_DPM_LEVELS];     // In MHz
 
   // UCLK section
   uint8_t      UseStrobeModeOptimizations; //Set to indicate that FW should use strobe mode optimizations
@@ -1163,13 +1189,14 @@ typedef struct {
   uint16_t IntakeTempHighIntakeAcousticLimit;
   uint16_t IntakeTempAcouticLimitReleaseRate;
 
-  uint16_t FanStalledTempLimitOffset;
+  int16_t FanAbnormalTempLimitOffset;
   uint16_t FanStalledTriggerRpm;
-  uint16_t FanAbnormalTriggerRpm;
-  uint16_t FanPadding;
-
-  uint32_t     FanSpare[14];
+  uint16_t FanAbnormalTriggerRpmCoeff;
+  uint16_t FanAbnormalDetectionEnable;
 
+  uint8_t      FanIntakeSensorSupport;
+  uint8_t      FanIntakePadding[3];
+  uint32_t     FanSpare[13];
   // SECTION: VDD_GFX AVFS
 
   uint8_t      OverrideGfxAvfsFuses;
@@ -1193,7 +1220,6 @@ typedef struct {
   uint32_t   dGbV_dT_vmin;
   uint32_t   dGbV_dT_vmax;
 
-  //Unused: PMFW-9370
   uint32_t   V2F_vmin_range_low;
   uint32_t   V2F_vmin_range_high;
   uint32_t   V2F_vmax_range_low;
@@ -1238,8 +1264,21 @@ typedef struct {
   // SECTION: Advanced Options
   uint32_t          DebugOverrides;
 
+  // Section: Total Board Power idle vs active coefficients
+  uint8_t     TotalBoardPowerSupport;
+  uint8_t     TotalBoardPowerPadding[3];
+
+  int16_t     TotalIdleBoardPowerM;
+  int16_t     TotalIdleBoardPowerB;
+  int16_t     TotalBoardPowerM;
+  int16_t     TotalBoardPowerB;
+
+  QuadraticInt_t qFeffCoeffGameClock[POWER_SOURCE_COUNT];
+  QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT];
+  QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT];
+
   // SECTION: Sku Reserved
-  uint32_t         Spare[64];
+  uint32_t         Spare[43];
 
   // Padding for MMHUB - do not modify this
   uint32_t     MmHubPadding[8];
@@ -1304,7 +1343,8 @@ typedef struct {
   // SECTION: Clock Spread Spectrum
 
   // UCLK Spread Spectrum
-  uint16_t     UclkSpreadPadding;
+  uint8_t      UclkTrainingModeSpreadPercent; // Q4.4
+  uint8_t      UclkSpreadPadding;
   uint16_t     UclkSpreadFreq;      // kHz
 
   // UCLK Spread Spectrum
@@ -1317,11 +1357,7 @@ typedef struct {
 
   // Section: Memory Config
   uint8_t      DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e
-  uint8_t      PaddingMem1[3];
-
-  // Section: Total Board Power
-  uint16_t     TotalBoardPower;     //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
-  uint16_t     BoardPowerPadding;
+  uint8_t      PaddingMem1[7];
 
   // SECTION: UMC feature flags
   uint8_t      HsrEnabled;
@@ -1423,8 +1459,11 @@ typedef struct {
   uint16_t Vcn1ActivityPercentage  ;
 
   uint32_t EnergyAccumulator;
-  uint16_t AverageSocketPower    ;
+  uint16_t AverageSocketPower;
+  uint16_t AverageTotalBoardPower;
+
   uint16_t AvgTemperature[TEMP_COUNT];
+  uint16_t AvgTemperatureFanIntake;
 
   uint8_t  PcieRate               ;
   uint8_t  PcieWidth              ;
@@ -1592,5 +1631,7 @@ typedef struct {
 #define IH_INTERRUPT_CONTEXT_ID_AUDIO_D0            0x5
 #define IH_INTERRUPT_CONTEXT_ID_AUDIO_D3            0x6
 #define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING  0x7
+#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL        0x8
+#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY        0x9
 
 #endif
index a921549..d466db6 100644 (file)
@@ -147,14 +147,6 @@ struct smu_11_5_power_context {
        uint32_t        max_fast_ppt_limit;
 };
 
-enum smu_v11_0_baco_seq {
-       BACO_SEQ_BACO = 0,
-       BACO_SEQ_MSR,
-       BACO_SEQ_BAMACO,
-       BACO_SEQ_ULPS,
-       BACO_SEQ_COUNT,
-};
-
 #if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
 
 int smu_v11_0_init_microcode(struct smu_context *smu);
@@ -257,7 +249,7 @@ int smu_v11_0_baco_enter(struct smu_context *smu);
 int smu_v11_0_baco_exit(struct smu_context *smu);
 
 int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu,
-                                     enum smu_v11_0_baco_seq baco_seq);
+                                     enum smu_baco_seq baco_seq);
 
 int smu_v11_0_mode1_reset(struct smu_context *smu);
 
index 80fb583..865d635 100644 (file)
@@ -31,7 +31,7 @@
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x35
 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
 
 #define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500  //500ms
@@ -124,14 +124,6 @@ struct smu_13_0_power_context {
        enum smu_13_0_power_state power_state;
 };
 
-enum smu_v13_0_baco_seq {
-       BACO_SEQ_BACO = 0,
-       BACO_SEQ_MSR,
-       BACO_SEQ_BAMACO,
-       BACO_SEQ_ULPS,
-       BACO_SEQ_COUNT,
-};
-
 #if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
 
 int smu_v13_0_init_microcode(struct smu_context *smu);
@@ -218,6 +210,9 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu);
 int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
                                               struct pp_smu_nv_clock_table *max_clocks);
 
+int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
+                                     enum smu_baco_seq baco_seq);
+
 bool smu_v13_0_baco_is_support(struct smu_context *smu);
 
 enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu);
index 6212fd2..697e98a 100644 (file)
@@ -379,6 +379,10 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
                    ((adev->pdev->device == 0x73BF) &&
                    (adev->pdev->revision == 0xCF)) ||
                    ((adev->pdev->device == 0x7422) &&
+                   (adev->pdev->revision == 0x00)) ||
+                   ((adev->pdev->device == 0x73A3) &&
+                   (adev->pdev->revision == 0x00)) ||
+                   ((adev->pdev->device == 0x73E3) &&
                    (adev->pdev->revision == 0x00)))
                        smu_baco->platform_support = false;
 
index dccbd9f..70b5607 100644 (file)
@@ -1576,7 +1576,7 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
 }
 
 int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu,
-                                     enum smu_v11_0_baco_seq baco_seq)
+                                     enum smu_baco_seq baco_seq)
 {
        return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
 }
index 43fb102..89f0f6e 100644 (file)
@@ -2230,6 +2230,15 @@ int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
        return ret;
 }
 
+int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
+                                     enum smu_baco_seq baco_seq)
+{
+       return smu_cmn_send_smc_msg_with_param(smu,
+                                              SMU_MSG_ArmD3,
+                                              baco_seq,
+                                              NULL);
+}
+
 bool smu_v13_0_baco_is_support(struct smu_context *smu)
 {
        struct smu_baco_context *smu_baco = &smu->smu_baco;
index 2952932..f0121d1 100644 (file)
@@ -120,6 +120,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
        MSG_MAP(Mode1Reset,                     PPSMC_MSG_Mode1Reset,                  0),
        MSG_MAP(PrepareMp1ForUnload,            PPSMC_MSG_PrepareMp1ForUnload,         0),
        MSG_MAP(DFCstateControl,                PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
+       MSG_MAP(ArmD3,                          PPSMC_MSG_ArmD3,                       0),
 };
 
 static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -1566,6 +1567,31 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
                                               NULL);
 }
 
+static int smu_v13_0_0_baco_enter(struct smu_context *smu)
+{
+       struct smu_baco_context *smu_baco = &smu->smu_baco;
+       struct amdgpu_device *adev = smu->adev;
+
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
+               return smu_v13_0_baco_set_armd3_sequence(smu,
+                               smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
+       else
+               return smu_v13_0_baco_enter(smu);
+}
+
+static int smu_v13_0_0_baco_exit(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
+               /* Wait for PMFW handling for the Dstate change */
+               usleep_range(10000, 11000);
+               return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+       } else {
+               return smu_v13_0_baco_exit(smu);
+       }
+}
+
 static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
@@ -1827,8 +1853,8 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
        .baco_is_support = smu_v13_0_baco_is_support,
        .baco_get_state = smu_v13_0_baco_get_state,
        .baco_set_state = smu_v13_0_baco_set_state,
-       .baco_enter = smu_v13_0_baco_enter,
-       .baco_exit = smu_v13_0_baco_exit,
+       .baco_enter = smu_v13_0_0_baco_enter,
+       .baco_exit = smu_v13_0_0_baco_exit,
        .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
        .mode1_reset = smu_v13_0_mode1_reset,
        .set_mp1_state = smu_v13_0_0_set_mp1_state,
index c4102cf..d74debc 100644 (file)
@@ -122,6 +122,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
        MSG_MAP(PrepareMp1ForUnload,            PPSMC_MSG_PrepareMp1ForUnload,         0),
        MSG_MAP(SetMGpuFanBoostLimitRpm,        PPSMC_MSG_SetMGpuFanBoostLimitRpm,     0),
        MSG_MAP(DFCstateControl,                PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
+       MSG_MAP(ArmD3,                          PPSMC_MSG_ArmD3,                       0),
 };
 
 static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -1578,6 +1579,31 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
        return ret;
 }
 
+static int smu_v13_0_7_baco_enter(struct smu_context *smu)
+{
+       struct smu_baco_context *smu_baco = &smu->smu_baco;
+       struct amdgpu_device *adev = smu->adev;
+
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
+               return smu_v13_0_baco_set_armd3_sequence(smu,
+                               smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
+       else
+               return smu_v13_0_baco_enter(smu);
+}
+
+static int smu_v13_0_7_baco_exit(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
+               /* Wait for PMFW handling for the Dstate change */
+               usleep_range(10000, 11000);
+               return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+       } else {
+               return smu_v13_0_baco_exit(smu);
+       }
+}
+
 static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
@@ -1655,8 +1681,8 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
        .baco_is_support = smu_v13_0_baco_is_support,
        .baco_get_state = smu_v13_0_baco_get_state,
        .baco_set_state = smu_v13_0_baco_set_state,
-       .baco_enter = smu_v13_0_baco_enter,
-       .baco_exit = smu_v13_0_baco_exit,
+       .baco_enter = smu_v13_0_7_baco_enter,
+       .baco_exit = smu_v13_0_7_baco_exit,
        .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
        .mode1_reset = smu_v13_0_mode1_reset,
        .set_mp1_state = smu_v13_0_7_set_mp1_state,
index 3ea53bb..bd61e20 100644 (file)
 ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
                              u8 offset, void *buffer, size_t size)
 {
+       u8 zero = 0;
+       char *tmpbuf = NULL;
+       /*
+        * As sub-addressing is not supported by all adaptors,
+        * always explicitly read from the start and discard
+        * any bytes that come before the requested offset.
+        * This way, no matter whether the adaptor supports it
+        * or not, we'll end up reading the proper data.
+        */
        struct i2c_msg msgs[] = {
                {
                        .addr = DP_DUAL_MODE_SLAVE_ADDRESS,
                        .flags = 0,
                        .len = 1,
-                       .buf = &offset,
+                       .buf = &zero,
                },
                {
                        .addr = DP_DUAL_MODE_SLAVE_ADDRESS,
                        .flags = I2C_M_RD,
-                       .len = size,
+                       .len = size + offset,
                        .buf = buffer,
                },
        };
        int ret;
 
+       if (offset) {
+               tmpbuf = kmalloc(size + offset, GFP_KERNEL);
+               if (!tmpbuf)
+                       return -ENOMEM;
+
+               msgs[1].buf = tmpbuf;
+       }
+
        ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
+       if (tmpbuf)
+               memcpy(buffer, tmpbuf + offset, size);
+
+       kfree(tmpbuf);
+
        if (ret < 0)
                return ret;
        if (ret != ARRAY_SIZE(msgs))
@@ -208,18 +230,6 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(const struct drm_device *dev,
        if (ret)
                return DRM_DP_DUAL_MODE_UNKNOWN;
 
-       /*
-        * Sigh. Some (maybe all?) type 1 adaptors are broken and ack
-        * the offset but ignore it, and instead they just always return
-        * data from the start of the HDMI ID buffer. So for a broken
-        * type 1 HDMI adaptor a single byte read will always give us
-        * 0x44, and for a type 1 DVI adaptor it should give 0x00
-        * (assuming it implements any registers). Fortunately neither
-        * of those values will match the type 2 signature of the
-        * DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with
-        * the type 2 adaptor detection safely even in the presence
-        * of broken type 1 adaptors.
-        */
        ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
                                    &adaptor_id, sizeof(adaptor_id));
        drm_dbg_kms(dev, "DP dual mode adaptor ID: %02x (err %zd)\n", adaptor_id, ret);
@@ -233,11 +243,10 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(const struct drm_device *dev,
                                return DRM_DP_DUAL_MODE_TYPE2_DVI;
                }
                /*
-                * If neither a proper type 1 ID nor a broken type 1 adaptor
-                * as described above, assume type 1, but let the user know
-                * that we may have misdetected the type.
+                * If not a proper type 1 ID, still assume type 1, but let
+                * the user know that we may have misdetected the type.
                 */
-               if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0])
+               if (!is_type1_adaptor(adaptor_id))
                        drm_err(dev, "Unexpected DP dual mode adaptor ID %02x\n", adaptor_id);
 
        }
@@ -343,10 +352,8 @@ EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output);
  * @enable: enable (as opposed to disable) the TMDS output buffers
  *
  * Set the state of the TMDS output buffers in the adaptor. For
- * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As
- * some type 1 adaptors have problems with registers (see comments
- * in drm_dp_dual_mode_detect()) we avoid touching the register,
- * making this function a no-op on type 1 adaptors.
+ * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register.
+ * Type1 adaptors do not support any register writes.
  *
  * Returns:
  * 0 on success, negative error code on failure
index ecd22c0..51a4668 100644 (file)
@@ -5186,7 +5186,7 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm
        mst_state = drm_atomic_get_mst_topology_state(state, mgr);
 
        if (IS_ERR(mst_state))
-               return -EINVAL;
+               return PTR_ERR(mst_state);
 
        list_for_each_entry(pos, &mst_state->payloads, next) {
 
index 8214a0b..203bf8d 100644 (file)
@@ -615,7 +615,7 @@ static int drm_dev_init(struct drm_device *dev,
        mutex_init(&dev->clientlist_mutex);
        mutex_init(&dev->master_mutex);
 
-       ret = drmm_add_action(dev, drm_dev_init_release, NULL);
+       ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
        if (ret)
                return ret;
 
index 7bb98e6..5ea5e26 100644 (file)
@@ -104,7 +104,8 @@ static inline void drm_vblank_flush_worker(struct drm_vblank_crtc *vblank)
 
 static inline void drm_vblank_destroy_worker(struct drm_vblank_crtc *vblank)
 {
-       kthread_destroy_worker(vblank->worker);
+       if (vblank->worker)
+               kthread_destroy_worker(vblank->worker);
 }
 
 int drm_vblank_worker_init(struct drm_vblank_crtc *vblank);
index 939d621..688c8af 100644 (file)
@@ -151,9 +151,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
        count = 0;
        connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
        drm_for_each_connector_iter(connector, &conn_iter) {
-               if (connector->registration_state != DRM_CONNECTOR_REGISTERED)
-                       continue;
-
                /* only expose writeback connectors if userspace understands them */
                if (!file_priv->writeback_connectors &&
                    (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK))
index 1e608b9..1a63da2 100644 (file)
@@ -2434,7 +2434,7 @@ intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
 {
        const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
 
-       if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_io == POWER_DOMAIN_INVALID)
+       if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
                return POWER_DOMAIN_PORT_DDI_IO_A;
 
        return domains->ddi_io + (int)(port - domains->port_start);
@@ -2445,7 +2445,7 @@ intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port po
 {
        const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
 
-       if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_lanes == POWER_DOMAIN_INVALID)
+       if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
                return POWER_DOMAIN_PORT_DDI_LANES_A;
 
        return domains->ddi_lanes + (int)(port - domains->port_start);
@@ -2471,7 +2471,7 @@ intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch
 {
        const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
 
-       if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)
+       if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
                return POWER_DOMAIN_AUX_A;
 
        return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
@@ -2482,7 +2482,7 @@ intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch au
 {
        const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
 
-       if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_tbt == POWER_DOMAIN_INVALID)
+       if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
                return POWER_DOMAIN_AUX_TBT1;
 
        return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
index a4aa950..0d6d640 100644 (file)
@@ -612,6 +612,10 @@ static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
 
        WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
 
+       err = ttm_bo_wait(bo, true, false);
+       if (err)
+               return err;
+
        err = i915_ttm_move_notify(bo);
        if (err)
                return err;
@@ -1013,9 +1017,6 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
        }
 
-       if (i915_ttm_cpu_maps_iomem(bo->resource))
-               wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
-
        if (!i915_ttm_resource_mappable(bo->resource)) {
                int err = -ENODEV;
                int i;
@@ -1042,6 +1043,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
                }
        }
 
+       if (i915_ttm_cpu_maps_iomem(bo->resource))
+               wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
+
        if (drm_dev_enter(dev, &idx)) {
                ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
                                               TTM_BO_VM_NUM_PREFAULT);
index 7a45e53..714221f 100644 (file)
@@ -664,8 +664,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
                return -ESRCH;
        }
 
-       kvm_get_kvm(vgpu->vfio_device.kvm);
-
        if (__kvmgt_vgpu_exist(vgpu))
                return -EEXIST;
 
@@ -676,6 +674,7 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
 
        vgpu->track_node.track_write = kvmgt_page_track_write;
        vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
+       kvm_get_kvm(vgpu->vfio_device.kvm);
        kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
                                         &vgpu->track_node);
 
@@ -715,15 +714,14 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
 
        kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
                                           &vgpu->track_node);
+       kvm_put_kvm(vgpu->vfio_device.kvm);
+
        kvmgt_protect_table_destroy(vgpu);
        gvt_cache_destroy(vgpu);
 
        intel_vgpu_release_msi_eventfd_ctx(vgpu);
 
        vgpu->attached = false;
-
-       if (vgpu->vfio_device.kvm)
-               kvm_put_kvm(vgpu->vfio_device.kvm);
 }
 
 static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
index 011be7f..bc8fb4e 100644 (file)
@@ -112,11 +112,6 @@ int lima_devfreq_init(struct lima_device *ldev)
        unsigned long cur_freq;
        int ret;
        const char *regulator_names[] = { "mali", NULL };
-       const char *clk_names[] = { "core", NULL };
-       struct dev_pm_opp_config config = {
-               .regulator_names = regulator_names,
-               .clk_names = clk_names,
-       };
 
        if (!device_property_present(dev, "operating-points-v2"))
                /* Optional, continue without devfreq */
@@ -124,7 +119,15 @@ int lima_devfreq_init(struct lima_device *ldev)
 
        spin_lock_init(&ldevfreq->lock);
 
-       ret = devm_pm_opp_set_config(dev, &config);
+       /*
+        * clkname is set separately so it is not affected by the optional
+        * regulator setting which may return error.
+        */
+       ret = devm_pm_opp_set_clkname(dev, "core");
+       if (ret)
+               return ret;
+
+       ret = devm_pm_opp_set_regulators(dev, regulator_names);
        if (ret) {
                /* Continue if the optional regulator is missing */
                if (ret != -ENODEV)
index 2944228..8a3b685 100644 (file)
@@ -2500,6 +2500,7 @@ static const struct display_timing logictechno_lt161010_2nh_timing = {
 static const struct panel_desc logictechno_lt161010_2nh = {
        .timings = &logictechno_lt161010_2nh_timing,
        .num_timings = 1,
+       .bpc = 6,
        .size = {
                .width = 154,
                .height = 86,
@@ -2529,6 +2530,7 @@ static const struct display_timing logictechno_lt170410_2whc_timing = {
 static const struct panel_desc logictechno_lt170410_2whc = {
        .timings = &logictechno_lt170410_2whc_timing,
        .num_timings = 1,
+       .bpc = 8,
        .size = {
                .width = 217,
                .height = 136,
index 6748ec1..a1f909d 100644 (file)
@@ -1093,6 +1093,10 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev)
        struct host1x *host1x = dev_get_drvdata(dev->dev.parent);
        struct iommu_domain *domain;
 
+       /* Our IOMMU usage policy doesn't currently play well with GART */
+       if (of_machine_is_compatible("nvidia,tegra20"))
+               return false;
+
        /*
         * If the Tegra DRM clients are backed by an IOMMU, push buffers are
         * likely to be allocated beyond the 32-bit boundary if sufficient
index 4419e81..0a6347c 100644 (file)
@@ -197,8 +197,8 @@ vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
        struct drm_private_state *priv_state;
 
        priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
-       if (IS_ERR(priv_state))
-               return ERR_CAST(priv_state);
+       if (!priv_state)
+               return ERR_PTR(-EINVAL);
 
        return to_vc4_hvs_state(priv_state);
 }
@@ -210,8 +210,8 @@ vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
        struct drm_private_state *priv_state;
 
        priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
-       if (IS_ERR(priv_state))
-               return ERR_CAST(priv_state);
+       if (!priv_state)
+               return ERR_PTR(-EINVAL);
 
        return to_vc4_hvs_state(priv_state);
 }
index 0cd3f97..f60ea24 100644 (file)
@@ -292,6 +292,10 @@ static void host1x_setup_virtualization_tables(struct host1x *host)
 
 static bool host1x_wants_iommu(struct host1x *host1x)
 {
+       /* Our IOMMU usage policy doesn't currently play well with GART */
+       if (of_machine_is_compatible("nvidia,tegra20"))
+               return false;
+
        /*
         * If we support addressing a maximum of 32 bits of physical memory
         * and if the host1x firewall is enabled, there's no need to enable
index 5b12040..cc23b90 100644 (file)
@@ -533,13 +533,17 @@ static void vmbus_add_channel_work(struct work_struct *work)
         * Add the new device to the bus. This will kick off device-driver
         * binding which eventually invokes the device driver's AddDevice()
         * method.
+        *
+        * If vmbus_device_register() fails, the 'device_obj' is freed in
+        * vmbus_device_release() as called by device_unregister() in the
+        * error path of vmbus_device_register(). In the outside error
+        * path, there's no need to free it.
         */
        ret = vmbus_device_register(newchannel->device_obj);
 
        if (ret != 0) {
                pr_err("unable to add child device object (relid %d)\n",
                        newchannel->offermsg.child_relid);
-               kfree(newchannel->device_obj);
                goto err_deq_chan;
        }
 
index 8b2e413..e592c48 100644 (file)
@@ -2082,6 +2082,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
        ret = device_register(&child_device_obj->device);
        if (ret) {
                pr_err("Unable to register child device\n");
+               put_device(&child_device_obj->device);
                return ret;
        }
 
index ad8fce3..490c342 100644 (file)
@@ -869,18 +869,6 @@ static int bma400_init(struct bma400_data *data)
        unsigned int val;
        int ret;
 
-       /* Try to read chip_id register. It must return 0x90. */
-       ret = regmap_read(data->regmap, BMA400_CHIP_ID_REG, &val);
-       if (ret) {
-               dev_err(data->dev, "Failed to read chip id register\n");
-               return ret;
-       }
-
-       if (val != BMA400_ID_REG_VAL) {
-               dev_err(data->dev, "Chip ID mismatch\n");
-               return -ENODEV;
-       }
-
        data->regulators[BMA400_VDD_REGULATOR].supply = "vdd";
        data->regulators[BMA400_VDDIO_REGULATOR].supply = "vddio";
        ret = devm_regulator_bulk_get(data->dev,
@@ -906,6 +894,18 @@ static int bma400_init(struct bma400_data *data)
        if (ret)
                return ret;
 
+       /* Try to read chip_id register. It must return 0x90. */
+       ret = regmap_read(data->regmap, BMA400_CHIP_ID_REG, &val);
+       if (ret) {
+               dev_err(data->dev, "Failed to read chip id register\n");
+               return ret;
+       }
+
+       if (val != BMA400_ID_REG_VAL) {
+               dev_err(data->dev, "Chip ID mismatch\n");
+               return -ENODEV;
+       }
+
        ret = bma400_get_power_mode(data);
        if (ret) {
                dev_err(data->dev, "Failed to get the initial power-mode\n");
index 33e2515..870f4cb 100644 (file)
@@ -2307,11 +2307,9 @@ static int at91_adc_temp_sensor_init(struct at91_adc_state *st,
        clb->p6 = buf[AT91_ADC_TS_CLB_IDX_P6];
 
        /*
-        * We prepare here the conversion to milli and also add constant
-        * factor (5 degrees Celsius) to p1 here to avoid doing it on
-        * hotpath.
+        * We prepare here the conversion to milli to avoid doing it on hotpath.
         */
-       clb->p1 = clb->p1 * 1000 + 5000;
+       clb->p1 = clb->p1 * 1000;
 
 free_buf:
        kfree(buf);
index 532daaa..366e252 100644 (file)
@@ -634,8 +634,10 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *idev,
        trig->ops = &at91_adc_trigger_ops;
 
        ret = iio_trigger_register(trig);
-       if (ret)
+       if (ret) {
+               iio_trigger_free(trig);
                return NULL;
+       }
 
        return trig;
 }
index 30a31f1..88e947f 100644 (file)
@@ -57,7 +57,8 @@ static struct iio_map mp2629_adc_maps[] = {
        MP2629_MAP(SYSTEM_VOLT, "system-volt"),
        MP2629_MAP(INPUT_VOLT, "input-volt"),
        MP2629_MAP(BATT_CURRENT, "batt-current"),
-       MP2629_MAP(INPUT_CURRENT, "input-current")
+       MP2629_MAP(INPUT_CURRENT, "input-current"),
+       { }
 };
 
 static int mp2629_read_raw(struct iio_dev *indio_dev,
@@ -74,7 +75,7 @@ static int mp2629_read_raw(struct iio_dev *indio_dev,
                if (ret)
                        return ret;
 
-               if (chan->address == MP2629_INPUT_VOLT)
+               if (chan->channel == MP2629_INPUT_VOLT)
                        rval &= GENMASK(6, 0);
                *val = rval;
                return IIO_VAL_INT;
index 307557a..52744dd 100644 (file)
@@ -632,7 +632,7 @@ static int bno055_set_regmask(struct bno055_priv *priv, int val, int val2,
                        return -EINVAL;
                }
                delta = abs(tbl_val - req_val);
-               if (delta < best_delta || first) {
+               if (first || delta < best_delta) {
                        best_delta = delta;
                        hwval = i;
                        first = false;
index cbc9349..550b75b 100644 (file)
@@ -25,13 +25,6 @@ enum {
        MS5607,
 };
 
-struct ms5611_chip_info {
-       u16 prom[MS5611_PROM_WORDS_NB];
-
-       int (*temp_and_pressure_compensate)(struct ms5611_chip_info *chip_info,
-                                           s32 *temp, s32 *pressure);
-};
-
 /*
  * OverSampling Rate descriptor.
  * Warning: cmd MUST be kept aligned on a word boundary (see
@@ -50,12 +43,15 @@ struct ms5611_state {
        const struct ms5611_osr *pressure_osr;
        const struct ms5611_osr *temp_osr;
 
+       u16 prom[MS5611_PROM_WORDS_NB];
+
        int (*reset)(struct ms5611_state *st);
        int (*read_prom_word)(struct ms5611_state *st, int index, u16 *word);
        int (*read_adc_temp_and_pressure)(struct ms5611_state *st,
                                          s32 *temp, s32 *pressure);
 
-       struct ms5611_chip_info *chip_info;
+       int (*compensate_temp_and_pressure)(struct ms5611_state *st, s32 *temp,
+                                         s32 *pressure);
        struct regulator *vdd;
 };
 
index 717521d..c564a1d 100644 (file)
@@ -85,7 +85,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev)
        struct ms5611_state *st = iio_priv(indio_dev);
 
        for (i = 0; i < MS5611_PROM_WORDS_NB; i++) {
-               ret = st->read_prom_word(st, i, &st->chip_info->prom[i]);
+               ret = st->read_prom_word(st, i, &st->prom[i]);
                if (ret < 0) {
                        dev_err(&indio_dev->dev,
                                "failed to read prom at %d\n", i);
@@ -93,7 +93,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev)
                }
        }
 
-       if (!ms5611_prom_is_valid(st->chip_info->prom, MS5611_PROM_WORDS_NB)) {
+       if (!ms5611_prom_is_valid(st->prom, MS5611_PROM_WORDS_NB)) {
                dev_err(&indio_dev->dev, "PROM integrity check failed\n");
                return -ENODEV;
        }
@@ -114,21 +114,20 @@ static int ms5611_read_temp_and_pressure(struct iio_dev *indio_dev,
                return ret;
        }
 
-       return st->chip_info->temp_and_pressure_compensate(st->chip_info,
-                                                          temp, pressure);
+       return st->compensate_temp_and_pressure(st, temp, pressure);
 }
 
-static int ms5611_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info,
+static int ms5611_temp_and_pressure_compensate(struct ms5611_state *st,
                                               s32 *temp, s32 *pressure)
 {
        s32 t = *temp, p = *pressure;
        s64 off, sens, dt;
 
-       dt = t - (chip_info->prom[5] << 8);
-       off = ((s64)chip_info->prom[2] << 16) + ((chip_info->prom[4] * dt) >> 7);
-       sens = ((s64)chip_info->prom[1] << 15) + ((chip_info->prom[3] * dt) >> 8);
+       dt = t - (st->prom[5] << 8);
+       off = ((s64)st->prom[2] << 16) + ((st->prom[4] * dt) >> 7);
+       sens = ((s64)st->prom[1] << 15) + ((st->prom[3] * dt) >> 8);
 
-       t = 2000 + ((chip_info->prom[6] * dt) >> 23);
+       t = 2000 + ((st->prom[6] * dt) >> 23);
        if (t < 2000) {
                s64 off2, sens2, t2;
 
@@ -154,17 +153,17 @@ static int ms5611_temp_and_pressure_compensate(struct ms5611_chip_info *chip_inf
        return 0;
 }
 
-static int ms5607_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info,
+static int ms5607_temp_and_pressure_compensate(struct ms5611_state *st,
                                               s32 *temp, s32 *pressure)
 {
        s32 t = *temp, p = *pressure;
        s64 off, sens, dt;
 
-       dt = t - (chip_info->prom[5] << 8);
-       off = ((s64)chip_info->prom[2] << 17) + ((chip_info->prom[4] * dt) >> 6);
-       sens = ((s64)chip_info->prom[1] << 16) + ((chip_info->prom[3] * dt) >> 7);
+       dt = t - (st->prom[5] << 8);
+       off = ((s64)st->prom[2] << 17) + ((st->prom[4] * dt) >> 6);
+       sens = ((s64)st->prom[1] << 16) + ((st->prom[3] * dt) >> 7);
 
-       t = 2000 + ((chip_info->prom[6] * dt) >> 23);
+       t = 2000 + ((st->prom[6] * dt) >> 23);
        if (t < 2000) {
                s64 off2, sens2, t2, tmp;
 
@@ -342,15 +341,6 @@ static int ms5611_write_raw(struct iio_dev *indio_dev,
 
 static const unsigned long ms5611_scan_masks[] = {0x3, 0};
 
-static struct ms5611_chip_info chip_info_tbl[] = {
-       [MS5611] = {
-               .temp_and_pressure_compensate = ms5611_temp_and_pressure_compensate,
-       },
-       [MS5607] = {
-               .temp_and_pressure_compensate = ms5607_temp_and_pressure_compensate,
-       }
-};
-
 static const struct iio_chan_spec ms5611_channels[] = {
        {
                .type = IIO_PRESSURE,
@@ -433,7 +423,20 @@ int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
        struct ms5611_state *st = iio_priv(indio_dev);
 
        mutex_init(&st->lock);
-       st->chip_info = &chip_info_tbl[type];
+
+       switch (type) {
+       case MS5611:
+               st->compensate_temp_and_pressure =
+                       ms5611_temp_and_pressure_compensate;
+               break;
+       case MS5607:
+               st->compensate_temp_and_pressure =
+                       ms5607_temp_and_pressure_compensate;
+               break;
+       default:
+               return -EINVAL;
+       }
+
        st->temp_osr =
                &ms5611_avail_temp_osr[ARRAY_SIZE(ms5611_avail_temp_osr) - 1];
        st->pressure_osr =
index 432e912..a0a7205 100644 (file)
@@ -91,7 +91,7 @@ static int ms5611_spi_probe(struct spi_device *spi)
        spi_set_drvdata(spi, indio_dev);
 
        spi->mode = SPI_MODE_0;
-       spi->max_speed_hz = 20000000;
+       spi->max_speed_hz = min(spi->max_speed_hz, 20000000U);
        spi->bits_per_word = 8;
        ret = spi_setup(spi);
        if (ret < 0)
index d6c5e96..6b05eed 100644 (file)
@@ -203,9 +203,13 @@ static int iio_sysfs_trigger_remove(int id)
 
 static int __init iio_sysfs_trig_init(void)
 {
+       int ret;
        device_initialize(&iio_sysfs_trig_dev);
        dev_set_name(&iio_sysfs_trig_dev, "iio_sysfs_trigger");
-       return device_add(&iio_sysfs_trig_dev);
+       ret = device_add(&iio_sysfs_trig_dev);
+       if (ret)
+               put_device(&iio_sysfs_trig_dev);
+       return ret;
 }
 module_init(iio_sysfs_trig_init);
 
index b86de13..84b8752 100644 (file)
@@ -273,22 +273,22 @@ int iforce_init_device(struct device *parent, u16 bustype,
  * Get device info.
  */
 
-       if (!iforce_get_id_packet(iforce, 'M', buf, &len) || len < 3)
+       if (!iforce_get_id_packet(iforce, 'M', buf, &len) && len >= 3)
                input_dev->id.vendor = get_unaligned_le16(buf + 1);
        else
                dev_warn(&iforce->dev->dev, "Device does not respond to id packet M\n");
 
-       if (!iforce_get_id_packet(iforce, 'P', buf, &len) || len < 3)
+       if (!iforce_get_id_packet(iforce, 'P', buf, &len) && len >= 3)
                input_dev->id.product = get_unaligned_le16(buf + 1);
        else
                dev_warn(&iforce->dev->dev, "Device does not respond to id packet P\n");
 
-       if (!iforce_get_id_packet(iforce, 'B', buf, &len) || len < 3)
+       if (!iforce_get_id_packet(iforce, 'B', buf, &len) && len >= 3)
                iforce->device_memory.end = get_unaligned_le16(buf + 1);
        else
                dev_warn(&iforce->dev->dev, "Device does not respond to id packet B\n");
 
-       if (!iforce_get_id_packet(iforce, 'N', buf, &len) || len < 2)
+       if (!iforce_get_id_packet(iforce, 'N', buf, &len) && len >= 2)
                ff_effects = buf[1];
        else
                dev_warn(&iforce->dev->dev, "Device does not respond to id packet N\n");
index 4804761..0948938 100644 (file)
 #include <linux/gpio.h>
 #include <linux/platform_device.h>
 
+static bool use_low_level_irq;
+module_param(use_low_level_irq, bool, 0444);
+MODULE_PARM_DESC(use_low_level_irq, "Use low-level triggered IRQ instead of edge triggered");
+
 struct soc_button_info {
        const char *name;
        int acpi_index;
@@ -74,6 +78,13 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = {
                },
        },
        {
+               /* Acer Switch V 10 SW5-017, same issue as Acer Switch 10 SW5-012. */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "SW5-017"),
+               },
+       },
+       {
                /*
                 * Acer One S1003. _LID method messes with power-button GPIO
                 * IRQ settings, leading to a non working power-button.
@@ -164,7 +175,8 @@ soc_button_device_create(struct platform_device *pdev,
                }
 
                /* See dmi_use_low_level_irq[] comment */
-               if (!autorepeat && dmi_check_system(dmi_use_low_level_irq)) {
+               if (!autorepeat && (use_low_level_irq ||
+                                   dmi_check_system(dmi_use_low_level_irq))) {
                        irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
                        gpio_keys[n_buttons].irq = irq;
                        gpio_keys[n_buttons].gpio = -ENOENT;
index fa021af..b0f7764 100644 (file)
@@ -192,6 +192,7 @@ static const char * const smbus_pnp_ids[] = {
        "SYN3221", /* HP 15-ay000 */
        "SYN323d", /* HP Spectre X360 13-w013dx */
        "SYN3257", /* HP Envy 13-ad105ng */
+       "SYN3286", /* HP Laptop 15-da3001TU */
        NULL
 };
 
index 0778dc0..46f8a69 100644 (file)
@@ -115,18 +115,18 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
                .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_NEVER)
        },
        {
-               /* ASUS ZenBook UX425UA */
+               /* ASUS ZenBook UX425UA/QA */
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425"),
                },
                .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER)
        },
        {
-               /* ASUS ZenBook UM325UA */
+               /* ASUS ZenBook UM325UA/QA */
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325"),
                },
                .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER)
        },
index f948649..6dac7c1 100644 (file)
@@ -1543,8 +1543,6 @@ static int i8042_probe(struct platform_device *dev)
 {
        int error;
 
-       i8042_platform_device = dev;
-
        if (i8042_reset == I8042_RESET_ALWAYS) {
                error = i8042_controller_selftest();
                if (error)
@@ -1582,7 +1580,6 @@ static int i8042_probe(struct platform_device *dev)
        i8042_free_aux_ports(); /* in case KBD failed but AUX not */
        i8042_free_irqs();
        i8042_controller_reset(false);
-       i8042_platform_device = NULL;
 
        return error;
 }
@@ -1592,7 +1589,6 @@ static int i8042_remove(struct platform_device *dev)
        i8042_unregister_ports();
        i8042_free_irqs();
        i8042_controller_reset(false);
-       i8042_platform_device = NULL;
 
        return 0;
 }
index a33cc79..c281e49 100644 (file)
@@ -1158,6 +1158,7 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
        input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0);
        input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
 
+retry_read_config:
        /* Read configuration and apply touchscreen parameters */
        goodix_read_config(ts);
 
@@ -1165,6 +1166,16 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
        touchscreen_parse_properties(ts->input_dev, true, &ts->prop);
 
        if (!ts->prop.max_x || !ts->prop.max_y || !ts->max_touch_num) {
+               if (!ts->reset_controller_at_probe &&
+                   ts->irq_pin_access_method != IRQ_PIN_ACCESS_NONE) {
+                       dev_info(&ts->client->dev, "Config not set, resetting controller\n");
+                       /* Retry after a controller reset */
+                       ts->reset_controller_at_probe = true;
+                       error = goodix_reset(ts);
+                       if (error)
+                               return error;
+                       goto retry_read_config;
+               }
                dev_err(&ts->client->dev,
                        "Invalid config (%d, %d, %d), using defaults\n",
                        ts->prop.max_x, ts->prop.max_y, ts->max_touch_num);
index 48cdcd0..996a8b5 100644 (file)
@@ -959,11 +959,9 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
 
                        domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
                        pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
-                       if (domain_use_first_level(domain)) {
-                               pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
-                               if (iommu_is_dma_domain(&domain->domain))
-                                       pteval |= DMA_FL_PTE_ACCESS;
-                       }
+                       if (domain_use_first_level(domain))
+                               pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
+
                        if (cmpxchg64(&pte->val, 0ULL, pteval))
                                /* Someone else set it while we were thinking; use theirs. */
                                free_pgtable_page(tmp_page);
index c30ddac..e13d7e5 100644 (file)
@@ -642,7 +642,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
         * Since it is a second level only translation setup, we should
         * set SRE bit as well (addresses are expected to be GPAs).
         */
-       if (pasid != PASID_RID2PASID)
+       if (pasid != PASID_RID2PASID && ecap_srs(iommu->ecap))
                pasid_set_sre(pte);
        pasid_set_present(pte);
        spin_unlock(&iommu->lock);
@@ -685,7 +685,8 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
         * We should set SRE bit as well since the addresses are expected
         * to be GPAs.
         */
-       pasid_set_sre(pte);
+       if (ecap_srs(iommu->ecap))
+               pasid_set_sre(pte);
        pasid_set_present(pte);
        spin_unlock(&iommu->lock);
 
index 7ea0100..90ee56d 100644 (file)
@@ -222,7 +222,7 @@ mISDN_register_device(struct mISDNdevice *dev,
 
        err = get_free_devid();
        if (err < 0)
-               goto error1;
+               return err;
        dev->id = err;
 
        device_initialize(&dev->dev);
index c3b2c99..cfbcd9e 100644 (file)
@@ -77,6 +77,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
        if (!entry)
                return -ENOMEM;
 
+       INIT_LIST_HEAD(&entry->list);
        entry->elem = elem;
 
        entry->dev.class = elements_class;
@@ -107,7 +108,7 @@ err2:
        device_unregister(&entry->dev);
        return ret;
 err1:
-       kfree(entry);
+       put_device(&entry->dev);
        return ret;
 }
 EXPORT_SYMBOL(mISDN_dsp_element_register);
index 9c5ef81..bb786c3 100644 (file)
@@ -1858,6 +1858,8 @@ bad:
        dm_io_client_destroy(c->dm_io);
 bad_dm_io:
        mutex_destroy(&c->lock);
+       if (c->no_sleep)
+               static_branch_dec(&no_sleep_enabled);
        kfree(c);
 bad_client:
        return ERR_PTR(r);
index 159c680..2653516 100644 (file)
@@ -3630,6 +3630,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
        limits->physical_block_size =
                max_t(unsigned, limits->physical_block_size, cc->sector_size);
        limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
+       limits->dma_alignment = limits->logical_block_size - 1;
 }
 
 static struct target_type crypt_target = {
index aaf2472..e97e9f9 100644 (file)
@@ -263,6 +263,7 @@ struct dm_integrity_c {
 
        struct completion crypto_backoff;
 
+       bool wrote_to_journal;
        bool journal_uptodate;
        bool just_formatted;
        bool recalculate_flag;
@@ -2375,6 +2376,8 @@ static void integrity_commit(struct work_struct *w)
        if (!commit_sections)
                goto release_flush_bios;
 
+       ic->wrote_to_journal = true;
+
        i = commit_start;
        for (n = 0; n < commit_sections; n++) {
                for (j = 0; j < ic->journal_section_entries; j++) {
@@ -2591,10 +2594,6 @@ static void integrity_writer(struct work_struct *w)
 
        unsigned prev_free_sectors;
 
-       /* the following test is not needed, but it tests the replay code */
-       if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
-               return;
-
        spin_lock_irq(&ic->endio_wait.lock);
        write_start = ic->committed_section;
        write_sections = ic->n_committed_sections;
@@ -3101,10 +3100,17 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
        drain_workqueue(ic->commit_wq);
 
        if (ic->mode == 'J') {
-               if (ic->meta_dev)
-                       queue_work(ic->writer_wq, &ic->writer_work);
+               queue_work(ic->writer_wq, &ic->writer_work);
                drain_workqueue(ic->writer_wq);
                dm_integrity_flush_buffers(ic, true);
+               if (ic->wrote_to_journal) {
+                       init_journal(ic, ic->free_section,
+                                    ic->journal_sections - ic->free_section, ic->commit_seq);
+                       if (ic->free_section) {
+                               init_journal(ic, 0, ic->free_section,
+                                            next_commit_seq(ic->commit_seq));
+                       }
+               }
        }
 
        if (ic->mode == 'B') {
@@ -3132,6 +3138,8 @@ static void dm_integrity_resume(struct dm_target *ti)
 
        DEBUG_print("resume\n");
 
+       ic->wrote_to_journal = false;
+
        if (ic->provided_data_sectors != old_provided_data_sectors) {
                if (ic->provided_data_sectors > old_provided_data_sectors &&
                    ic->mode == 'B' &&
@@ -3370,6 +3378,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
                limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
                limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
                blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
+               limits->dma_alignment = limits->logical_block_size - 1;
        }
 }
 
index 6b3f867..3bfc158 100644 (file)
@@ -655,7 +655,7 @@ static void list_version_get_needed(struct target_type *tt, void *needed_param)
     size_t *needed = needed_param;
 
     *needed += sizeof(struct dm_target_versions);
-    *needed += strlen(tt->name);
+    *needed += strlen(tt->name) + 1;
     *needed += ALIGN_MASK;
 }
 
@@ -720,7 +720,7 @@ static int __list_versions(struct dm_ioctl *param, size_t param_size, const char
        iter_info.old_vers = NULL;
        iter_info.vers = vers;
        iter_info.flags = 0;
-       iter_info.end = (char *)vers+len;
+       iter_info.end = (char *)vers + needed;
 
        /*
         * Now loop through filling out the names & versions.
index 20fd688..178e13a 100644 (file)
@@ -875,6 +875,7 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit
        limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev);
        limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev);
        limits->io_min = limits->physical_block_size;
+       limits->dma_alignment = limits->logical_block_size - 1;
 }
 
 #if IS_ENABLED(CONFIG_FS_DAX)
index e71068f..844264e 100644 (file)
@@ -854,6 +854,7 @@ static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
        u32 context_id = vmci_get_context_id();
        struct vmci_event_qp ev;
 
+       memset(&ev, 0, sizeof(ev));
        ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
        ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
                                          VMCI_CONTEXT_RESOURCE_ID);
@@ -1467,6 +1468,7 @@ static int qp_notify_peer(bool attach,
         * kernel.
         */
 
+       memset(&ev, 0, sizeof(ev));
        ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
        ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
                                          VMCI_CONTEXT_RESOURCE_ID);
index 95fa8fb..c5de202 100644 (file)
@@ -1134,7 +1134,13 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
                mmc_power_cycle(host, ocr);
        } else {
                bit = fls(ocr) - 1;
-               ocr &= 3 << bit;
+               /*
+                * The bit variable represents the highest voltage bit set in
+                * the OCR register.
+                * To keep a range of 2 values (e.g. 3.2V/3.3V and 3.3V/3.4V),
+                * we must shift the mask '3' with (bit - 1).
+                */
+               ocr &= 3 << (bit - 1);
                if (bit != host->ios.vdd)
                        dev_warn(mmc_dev(host), "exceeding card's volts\n");
        }
index 34ea1ac..28dc650 100644 (file)
@@ -1749,6 +1749,8 @@ static int amd_probe(struct sdhci_pci_chip *chip)
                }
        }
 
+       pci_dev_put(smbus_dev);
+
        if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
                chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
 
index ad457cd..bca1d09 100644 (file)
@@ -32,6 +32,7 @@
 #define O2_SD_CAPS             0xE0
 #define O2_SD_ADMA1            0xE2
 #define O2_SD_ADMA2            0xE7
+#define O2_SD_MISC_CTRL2       0xF0
 #define O2_SD_INF_MOD          0xF1
 #define O2_SD_MISC_CTRL4       0xFC
 #define O2_SD_MISC_CTRL                0x1C0
@@ -877,6 +878,12 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
                /* Set Tuning Windows to 5 */
                pci_write_config_byte(chip->pdev,
                                O2_SD_TUNING_CTRL, 0x55);
+               //Adjust 1st and 2nd CD debounce time
+               pci_read_config_dword(chip->pdev, O2_SD_MISC_CTRL2, &scratch_32);
+               scratch_32 &= 0xFFE7FFFF;
+               scratch_32 |= 0x00180000;
+               pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL2, scratch_32);
+               pci_write_config_dword(chip->pdev, O2_SD_DETECT_SETTING, 1);
                /* Lock WP */
                ret = pci_read_config_byte(chip->pdev,
                                           O2_SD_LOCK_WP, &scratch);
index 34d9a7a..c94bf48 100644 (file)
@@ -26,6 +26,7 @@ config MTD_ONENAND_OMAP2
        tristate "OneNAND on OMAP2/OMAP3 support"
        depends on ARCH_OMAP2 || ARCH_OMAP3 || (COMPILE_TEST && ARM)
        depends on OF || COMPILE_TEST
+       depends on OMAP_GPMC
        help
          Support for a OneNAND flash device connected to an OMAP2/OMAP3 SoC
          via the GPMC memory controller.
index 33f2c98..c3cc660 100644 (file)
@@ -5834,7 +5834,7 @@ nand_match_ecc_req(struct nand_chip *chip,
        int req_step = requirements->step_size;
        int req_strength = requirements->strength;
        int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
-       int best_step, best_strength, best_ecc_bytes;
+       int best_step = 0, best_strength = 0, best_ecc_bytes = 0;
        int best_ecc_bytes_total = INT_MAX;
        int i, j;
 
@@ -5915,7 +5915,7 @@ nand_maximize_ecc(struct nand_chip *chip,
        int step_size, strength, nsteps, ecc_bytes, corr;
        int best_corr = 0;
        int best_step = 0;
-       int best_strength, best_ecc_bytes;
+       int best_strength = 0, best_ecc_bytes = 0;
        int i, j;
 
        for (i = 0; i < caps->nstepinfos; i++) {
index 8f80019..198a447 100644 (file)
@@ -3167,16 +3167,18 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
 
        ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
        if (ret)
-               nand_cleanup(chip);
+               goto err;
 
        if (nandc->props->use_codeword_fixup) {
                ret = qcom_nand_host_parse_boot_partitions(nandc, host, dn);
-               if (ret) {
-                       nand_cleanup(chip);
-                       return ret;
-               }
+               if (ret)
+                       goto err;
        }
 
+       return 0;
+
+err:
+       nand_cleanup(chip);
        return ret;
 }
 
index 24150c9..dc3253b 100644 (file)
@@ -113,6 +113,7 @@ static int com20020_probe(struct pcmcia_device *p_dev)
        struct com20020_dev *info;
        struct net_device *dev;
        struct arcnet_local *lp;
+       int ret = -ENOMEM;
 
        dev_dbg(&p_dev->dev, "com20020_attach()\n");
 
@@ -142,12 +143,18 @@ static int com20020_probe(struct pcmcia_device *p_dev)
        info->dev = dev;
        p_dev->priv = info;
 
-       return com20020_config(p_dev);
+       ret = com20020_config(p_dev);
+       if (ret)
+               goto fail_config;
+
+       return 0;
 
+fail_config:
+       free_arcdev(dev);
 fail_alloc_dev:
        kfree(info);
 fail_alloc_info:
-       return -ENOMEM;
+       return ret;
 } /* com20020_attach */
 
 static void com20020_detach(struct pcmcia_device *link)
index e84c49b..f298b9b 100644 (file)
@@ -3231,16 +3231,23 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
                       struct slave *slave)
 {
        struct slave *curr_active_slave, *curr_arp_slave;
-       struct icmp6hdr *hdr = icmp6_hdr(skb);
        struct in6_addr *saddr, *daddr;
+       struct {
+               struct ipv6hdr ip6;
+               struct icmp6hdr icmp6;
+       } *combined, _combined;
 
        if (skb->pkt_type == PACKET_OTHERHOST ||
-           skb->pkt_type == PACKET_LOOPBACK ||
-           hdr->icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
+           skb->pkt_type == PACKET_LOOPBACK)
+               goto out;
+
+       combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined);
+       if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP ||
+           combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
                goto out;
 
-       saddr = &ipv6_hdr(skb)->saddr;
-       daddr = &ipv6_hdr(skb)->daddr;
+       saddr = &combined->ip6.saddr;
+       daddr = &combined->ip6.saddr;
 
        slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n",
                  __func__, slave->dev->name, bond_slave_state(slave),
index 215dd17..4059fcc 100644 (file)
@@ -256,6 +256,9 @@ static int sja1105_base_tx_mdio_read(struct mii_bus *bus, int phy, int reg)
        u32 tmp;
        int rc;
 
+       if (reg & MII_ADDR_C45)
+               return -EOPNOTSUPP;
+
        rc = sja1105_xfer_u32(priv, SPI_READ, regs->mdio_100base_tx + reg,
                              &tmp, NULL);
        if (rc < 0)
@@ -272,6 +275,9 @@ static int sja1105_base_tx_mdio_write(struct mii_bus *bus, int phy, int reg,
        const struct sja1105_regs *regs = priv->info->regs;
        u32 tmp = val;
 
+       if (reg & MII_ADDR_C45)
+               return -EOPNOTSUPP;
+
        return sja1105_xfer_u32(priv, SPI_WRITE, regs->mdio_100base_tx + reg,
                                &tmp, NULL);
 }
index 7633b22..711d5b5 100644 (file)
@@ -990,6 +990,7 @@ static int tse_shutdown(struct net_device *dev)
        int ret;
 
        phylink_stop(priv->phylink);
+       phylink_disconnect_phy(priv->phylink);
        netif_stop_queue(dev);
        napi_disable(&priv->napi);
 
index d350eee..5a454b5 100644 (file)
@@ -4543,13 +4543,19 @@ static struct pci_driver ena_pci_driver = {
 
 static int __init ena_init(void)
 {
+       int ret;
+
        ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
        if (!ena_wq) {
                pr_err("Failed to create workqueue\n");
                return -ENOMEM;
        }
 
-       return pci_register_driver(&ena_pci_driver);
+       ret = pci_register_driver(&ena_pci_driver);
+       if (ret)
+               destroy_workqueue(ena_wq);
+
+       return ret;
 }
 
 static void __exit ena_cleanup(void)
index cc932b3..4a1efe9 100644 (file)
@@ -1427,7 +1427,7 @@ static int ag71xx_open(struct net_device *ndev)
        if (ret) {
                netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
                          ret);
-               goto err;
+               return ret;
        }
 
        max_frame_len = ag71xx_max_frame_len(ndev->mtu);
@@ -1448,6 +1448,7 @@ static int ag71xx_open(struct net_device *ndev)
 
 err:
        ag71xx_rings_cleanup(ag);
+       phylink_disconnect_phy(ag->phylink);
        return ret;
 }
 
index 5fb3af5..3038386 100644 (file)
@@ -1568,7 +1568,6 @@ void bgmac_enet_remove(struct bgmac *bgmac)
        phy_disconnect(bgmac->net_dev->phydev);
        netif_napi_del(&bgmac->napi);
        bgmac_dma_free(bgmac);
-       free_netdev(bgmac->net_dev);
 }
 EXPORT_SYMBOL_GPL(bgmac_enet_remove);
 
index 11d15cd..77d4cb4 100644 (file)
@@ -795,16 +795,20 @@ static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
 
 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
 {
-       struct pci_dev *dev;
        struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+       struct pci_dev *dev;
+       bool pending;
 
        if (!vf)
                return false;
 
        dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn);
-       if (dev)
-               return bnx2x_is_pcie_pending(dev);
-       return false;
+       if (!dev)
+               return false;
+       pending = bnx2x_is_pcie_pending(dev);
+       pci_dev_put(dev);
+
+       return pending;
 }
 
 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
index c78b6e9..9f8a6ce 100644 (file)
@@ -14037,8 +14037,16 @@ static struct pci_driver bnxt_pci_driver = {
 
 static int __init bnxt_init(void)
 {
+       int err;
+
        bnxt_debug_init();
-       return pci_register_driver(&bnxt_pci_driver);
+       err = pci_register_driver(&bnxt_pci_driver);
+       if (err) {
+               bnxt_debug_exit();
+               return err;
+       }
+
+       return 0;
 }
 
 static void __exit bnxt_exit(void)
index d312bd5..98793b2 100644 (file)
@@ -1794,13 +1794,10 @@ static int liquidio_open(struct net_device *netdev)
 
        ifstate_set(lio, LIO_IFSTATE_RUNNING);
 
-       if (OCTEON_CN23XX_PF(oct)) {
-               if (!oct->msix_on)
-                       if (setup_tx_poll_fn(netdev))
-                               return -1;
-       } else {
-               if (setup_tx_poll_fn(netdev))
-                       return -1;
+       if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) {
+               ret = setup_tx_poll_fn(netdev);
+               if (ret)
+                       goto err_poll;
        }
 
        netif_tx_start_all_queues(netdev);
@@ -1813,7 +1810,7 @@ static int liquidio_open(struct net_device *netdev)
        /* tell Octeon to start forwarding packets to host */
        ret = send_rx_ctrl_cmd(lio, 1);
        if (ret)
-               return ret;
+               goto err_rx_ctrl;
 
        /* start periodical statistics fetch */
        INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
@@ -1824,6 +1821,27 @@ static int liquidio_open(struct net_device *netdev)
        dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
                 netdev->name);
 
+       return 0;
+
+err_rx_ctrl:
+       if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on)
+               cleanup_tx_poll_fn(netdev);
+err_poll:
+       if (lio->ptp_clock) {
+               ptp_clock_unregister(lio->ptp_clock);
+               lio->ptp_clock = NULL;
+       }
+
+       if (oct->props[lio->ifidx].napi_enabled == 1) {
+               list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+                       napi_disable(napi);
+
+               oct->props[lio->ifidx].napi_enabled = 0;
+
+               if (OCTEON_CN23XX_PF(oct))
+                       oct->droq[0]->ops.poll_mode = 0;
+       }
+
        return ret;
 }
 
index 2f6484d..7eb2ddb 100644 (file)
@@ -1436,8 +1436,10 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
                return AE_OK;
        }
 
-       if (strncmp(string.pointer, bgx_sel, 4))
+       if (strncmp(string.pointer, bgx_sel, 4)) {
+               kfree(string.pointer);
                return AE_OK;
+       }
 
        acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
                            bgx_acpi_register_phy, NULL, bgx, NULL);
index a523ddd..de7105a 100644 (file)
@@ -798,8 +798,10 @@ static int dm9051_loop_rx(struct board_info *db)
                }
 
                ret = dm9051_stop_mrcmd(db);
-               if (ret)
+               if (ret) {
+                       dev_kfree_skb(skb);
                        return ret;
+               }
 
                skb->protocol = eth_type_trans(skb, db->ndev);
                if (db->ndev->features & NETIF_F_RXCSUM)
index 48fb391..13d5ff4 100644 (file)
@@ -542,6 +542,27 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
        return (budget != 0);
 }
 
+static bool tsnep_tx_pending(struct tsnep_tx *tx)
+{
+       unsigned long flags;
+       struct tsnep_tx_entry *entry;
+       bool pending = false;
+
+       spin_lock_irqsave(&tx->lock, flags);
+
+       if (tx->read != tx->write) {
+               entry = &tx->entry[tx->read];
+               if ((__le32_to_cpu(entry->desc_wb->properties) &
+                    TSNEP_TX_DESC_OWNER_MASK) ==
+                   (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
+                       pending = true;
+       }
+
+       spin_unlock_irqrestore(&tx->lock, flags);
+
+       return pending;
+}
+
 static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
                         int queue_index, struct tsnep_tx *tx)
 {
@@ -821,6 +842,19 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
        return done;
 }
 
+static bool tsnep_rx_pending(struct tsnep_rx *rx)
+{
+       struct tsnep_rx_entry *entry;
+
+       entry = &rx->entry[rx->read];
+       if ((__le32_to_cpu(entry->desc_wb->properties) &
+            TSNEP_DESC_OWNER_COUNTER_MASK) ==
+           (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
+               return true;
+
+       return false;
+}
+
 static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
                         int queue_index, struct tsnep_rx *rx)
 {
@@ -866,6 +900,17 @@ static void tsnep_rx_close(struct tsnep_rx *rx)
        tsnep_rx_ring_cleanup(rx);
 }
 
+static bool tsnep_pending(struct tsnep_queue *queue)
+{
+       if (queue->tx && tsnep_tx_pending(queue->tx))
+               return true;
+
+       if (queue->rx && tsnep_rx_pending(queue->rx))
+               return true;
+
+       return false;
+}
+
 static int tsnep_poll(struct napi_struct *napi, int budget)
 {
        struct tsnep_queue *queue = container_of(napi, struct tsnep_queue,
@@ -886,9 +931,19 @@ static int tsnep_poll(struct napi_struct *napi, int budget)
        if (!complete)
                return budget;
 
-       if (likely(napi_complete_done(napi, done)))
+       if (likely(napi_complete_done(napi, done))) {
                tsnep_enable_irq(queue->adapter, queue->irq_mask);
 
+               /* reschedule if work is already pending, prevent rotten packets
+                * which are transmitted or received after polling but before
+                * interrupt enable
+                */
+               if (tsnep_pending(queue)) {
+                       tsnep_disable_irq(queue->adapter, queue->irq_mask);
+                       napi_schedule(napi);
+               }
+       }
+
        return min(done, budget - 1);
 }
 
index f8c06c3..8671591 100644 (file)
@@ -2058,7 +2058,7 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
        /* enable Tx ints by setting pkt thr to 1 */
        enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
 
-       tbmr = ENETC_TBMR_EN;
+       tbmr = ENETC_TBMR_EN | ENETC_TBMR_SET_PRIO(tx_ring->prio);
        if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
                tbmr |= ENETC_TBMR_VIH;
 
@@ -2461,7 +2461,8 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
                /* Reset all ring priorities to 0 */
                for (i = 0; i < priv->num_tx_rings; i++) {
                        tx_ring = priv->tx_ring[i];
-                       enetc_set_bdr_prio(hw, tx_ring->index, 0);
+                       tx_ring->prio = 0;
+                       enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
                }
 
                return 0;
@@ -2480,7 +2481,8 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
         */
        for (i = 0; i < num_tc; i++) {
                tx_ring = priv->tx_ring[i];
-               enetc_set_bdr_prio(hw, tx_ring->index, i);
+               tx_ring->prio = i;
+               enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
        }
 
        /* Reset the number of netdev queues based on the TC count */
index 161930a..c6d8cc1 100644 (file)
@@ -95,6 +95,7 @@ struct enetc_bdr {
                void __iomem *rcir;
        };
        u16 index;
+       u16 prio;
        int bd_count; /* # of BDs */
        int next_to_use;
        int next_to_clean;
index a842e19..fcebb54 100644 (file)
@@ -137,6 +137,7 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
        struct tc_taprio_qopt_offload *taprio = type_data;
        struct enetc_ndev_priv *priv = netdev_priv(ndev);
        struct enetc_hw *hw = &priv->si->hw;
+       struct enetc_bdr *tx_ring;
        int err;
        int i;
 
@@ -145,16 +146,20 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
                if (priv->tx_ring[i]->tsd_enable)
                        return -EBUSY;
 
-       for (i = 0; i < priv->num_tx_rings; i++)
-               enetc_set_bdr_prio(hw, priv->tx_ring[i]->index,
-                                  taprio->enable ? i : 0);
+       for (i = 0; i < priv->num_tx_rings; i++) {
+               tx_ring = priv->tx_ring[i];
+               tx_ring->prio = taprio->enable ? i : 0;
+               enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+       }
 
        err = enetc_setup_taprio(ndev, taprio);
-
-       if (err)
-               for (i = 0; i < priv->num_tx_rings; i++)
-                       enetc_set_bdr_prio(hw, priv->tx_ring[i]->index,
-                                          taprio->enable ? 0 : i);
+       if (err) {
+               for (i = 0; i < priv->num_tx_rings; i++) {
+                       tx_ring = priv->tx_ring[i];
+                       tx_ring->prio = taprio->enable ? 0 : i;
+                       enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+               }
+       }
 
        return err;
 }
index 0179fc2..17137de 100644 (file)
@@ -819,7 +819,6 @@ struct hnae3_knic_private_info {
        const struct hnae3_dcb_ops *dcb_ops;
 
        u16 int_rl_setting;
-       enum pkt_hash_types rss_type;
        void __iomem *io_base;
 };
 
index e23729a..ae27365 100644 (file)
@@ -191,23 +191,6 @@ u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle)
        return HCLGE_COMM_RSS_KEY_SIZE;
 }
 
-void hclge_comm_get_rss_type(struct hnae3_handle *nic,
-                            struct hclge_comm_rss_tuple_cfg *rss_tuple_sets)
-{
-       if (rss_tuple_sets->ipv4_tcp_en ||
-           rss_tuple_sets->ipv4_udp_en ||
-           rss_tuple_sets->ipv4_sctp_en ||
-           rss_tuple_sets->ipv6_tcp_en ||
-           rss_tuple_sets->ipv6_udp_en ||
-           rss_tuple_sets->ipv6_sctp_en)
-               nic->kinfo.rss_type = PKT_HASH_TYPE_L4;
-       else if (rss_tuple_sets->ipv4_fragment_en ||
-                rss_tuple_sets->ipv6_fragment_en)
-               nic->kinfo.rss_type = PKT_HASH_TYPE_L3;
-       else
-               nic->kinfo.rss_type = PKT_HASH_TYPE_NONE;
-}
-
 int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
                               const u8 hfunc, u8 *hash_algo)
 {
@@ -344,9 +327,6 @@ int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic,
        req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
        req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
 
-       if (is_pf)
-               hclge_comm_get_rss_type(nic, &rss_cfg->rss_tuple_sets);
-
        ret = hclge_comm_cmd_send(hw, &desc, 1);
        if (ret)
                dev_err(&hw->cmq.csq.pdev->dev,
index 946d166..92af3d2 100644 (file)
@@ -95,8 +95,6 @@ struct hclge_comm_rss_tc_mode_cmd {
 };
 
 u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle);
-void hclge_comm_get_rss_type(struct hnae3_handle *nic,
-                            struct hclge_comm_rss_tuple_cfg *rss_tuple_sets);
 void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
                                   struct hclge_comm_rss_cfg *rss_cfg);
 int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
index 4cb2421..0285779 100644 (file)
@@ -105,26 +105,28 @@ static const struct pci_device_id hns3_pci_tbl[] = {
 };
 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
 
-#define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t) \
+#define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t, h) \
        {       ptype, \
                l, \
                CHECKSUM_##s, \
                HNS3_L3_TYPE_##t, \
-               1 }
+               1, \
+               h}
 
 #define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \
-               { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0 }
+               { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0, \
+                 PKT_HASH_TYPE_NONE }
 
 static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
        HNS3_RX_PTYPE_UNUSED_ENTRY(0),
-       HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP),
-       HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP),
-       HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP),
-       HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL),
-       HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL),
-       HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL),
-       HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM),
-       HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL),
+       HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP, PKT_HASH_TYPE_NONE),
+       HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP, PKT_HASH_TYPE_NONE),
+       HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP, PKT_HASH_TYPE_NONE),
+       HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+       HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+       HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+       HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM, PKT_HASH_TYPE_NONE),
+       HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
        HNS3_RX_PTYPE_UNUSED_ENTRY(9),
        HNS3_RX_PTYPE_UNUSED_ENTRY(10),
        HNS3_RX_PTYPE_UNUSED_ENTRY(11),
@@ -132,36 +134,36 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
        HNS3_RX_PTYPE_UNUSED_ENTRY(13),
        HNS3_RX_PTYPE_UNUSED_ENTRY(14),
        HNS3_RX_PTYPE_UNUSED_ENTRY(15),
-       HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL),
-       HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4),
-       HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4),
-       HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4),
-       HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4),
-       HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4),
-       HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4),
-       HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4),
-       HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4),
-       HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4),
+       HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+       HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE),
+       HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE),
+       HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4, PKT_HASH_TYPE_NONE),
+       HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
        HNS3_RX_PTYPE_UNUSED_ENTRY(26),
        HNS3_RX_PTYPE_UNUSED_ENTRY(27),
        HNS3_RX_PTYPE_UNUSED_ENTRY(28),
-       HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL),
-       HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL),
-       HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4),
-       HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4),
-       HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4),
-       HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4),
-       HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4),
-       HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4),
-       HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4),
+       HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+       HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+       HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
        HNS3_RX_PTYPE_UNUSED_ENTRY(38),
-       HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6),
-       HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6),
-       HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6),
-       HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6),
-       HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6),
-       HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6),
-       HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6),
+       HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
        HNS3_RX_PTYPE_UNUSED_ENTRY(46),
        HNS3_RX_PTYPE_UNUSED_ENTRY(47),
        HNS3_RX_PTYPE_UNUSED_ENTRY(48),
@@ -227,35 +229,35 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
        HNS3_RX_PTYPE_UNUSED_ENTRY(108),
        HNS3_RX_PTYPE_UNUSED_ENTRY(109),
        HNS3_RX_PTYPE_UNUSED_ENTRY(110),
-       HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6),
-       HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6),
-       HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6),
-       HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6),
-       HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6),
-       HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6),
-       HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6),
-       HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6),
-       HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6),
+       HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
        HNS3_RX_PTYPE_UNUSED_ENTRY(120),
        HNS3_RX_PTYPE_UNUSED_ENTRY(121),
        HNS3_RX_PTYPE_UNUSED_ENTRY(122),
-       HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL),
-       HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL),
-       HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4),
-       HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4),
-       HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4),
-       HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4),
-       HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4),
-       HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4),
-       HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4),
+       HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+       HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+       HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
        HNS3_RX_PTYPE_UNUSED_ENTRY(132),
-       HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6),
-       HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6),
-       HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6),
-       HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6),
-       HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6),
-       HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6),
-       HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6),
+       HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+       HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+       HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
        HNS3_RX_PTYPE_UNUSED_ENTRY(140),
        HNS3_RX_PTYPE_UNUSED_ENTRY(141),
        HNS3_RX_PTYPE_UNUSED_ENTRY(142),
@@ -3776,8 +3778,8 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
                desc_cb->reuse_flag = 1;
        } else if (frag_size <= ring->rx_copybreak) {
                ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb);
-               if (ret)
-                       goto out;
+               if (!ret)
+                       return;
        }
 
 out:
@@ -4171,15 +4173,35 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
 }
 
 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
-                                    struct sk_buff *skb, u32 rss_hash)
+                                    struct sk_buff *skb, u32 rss_hash,
+                                    u32 l234info, u32 ol_info)
 {
-       struct hnae3_handle *handle = ring->tqp->handle;
-       enum pkt_hash_types rss_type;
+       enum pkt_hash_types rss_type = PKT_HASH_TYPE_NONE;
+       struct net_device *netdev = ring_to_netdev(ring);
+       struct hns3_nic_priv *priv = netdev_priv(netdev);
 
-       if (rss_hash)
-               rss_type = handle->kinfo.rss_type;
-       else
-               rss_type = PKT_HASH_TYPE_NONE;
+       if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
+               u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
+                                           HNS3_RXD_PTYPE_S);
+
+               rss_type = hns3_rx_ptype_tbl[ptype].hash_type;
+       } else {
+               int l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+                                             HNS3_RXD_L3ID_S);
+               int l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
+                                             HNS3_RXD_L4ID_S);
+
+               if (l3_type == HNS3_L3_TYPE_IPV4 ||
+                   l3_type == HNS3_L3_TYPE_IPV6) {
+                       if (l4_type == HNS3_L4_TYPE_UDP ||
+                           l4_type == HNS3_L4_TYPE_TCP ||
+                           l4_type == HNS3_L4_TYPE_SCTP)
+                               rss_type = PKT_HASH_TYPE_L4;
+                       else if (l4_type == HNS3_L4_TYPE_IGMP ||
+                                l4_type == HNS3_L4_TYPE_ICMP)
+                               rss_type = PKT_HASH_TYPE_L3;
+               }
+       }
 
        skb_set_hash(skb, rss_hash, rss_type);
 }
@@ -4282,7 +4304,8 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
 
        ring->tqp_vector->rx_group.total_bytes += len;
 
-       hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
+       hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash),
+                                l234info, ol_info);
        return 0;
 }
 
index 133a054..294a14b 100644 (file)
@@ -404,6 +404,7 @@ struct hns3_rx_ptype {
        u32 ip_summed : 2;
        u32 l3_type : 4;
        u32 valid : 1;
+       u32 hash_type: 3;
 };
 
 struct ring_stats {
index 987271d..4e54f91 100644 (file)
@@ -3443,6 +3443,7 @@ static int hclge_update_tp_port_info(struct hclge_dev *hdev)
        hdev->hw.mac.autoneg = cmd.base.autoneg;
        hdev->hw.mac.speed = cmd.base.speed;
        hdev->hw.mac.duplex = cmd.base.duplex;
+       linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising);
 
        return 0;
 }
@@ -4859,7 +4860,6 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
                return ret;
        }
 
-       hclge_comm_get_rss_type(&vport->nic, &hdev->rss_cfg.rss_tuple_sets);
        return 0;
 }
 
@@ -11587,9 +11587,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
        if (ret)
                goto err_msi_irq_uninit;
 
-       if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
-           !hnae3_dev_phy_imp_supported(hdev)) {
-               ret = hclge_mac_mdio_config(hdev);
+       if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
+               if (hnae3_dev_phy_imp_supported(hdev))
+                       ret = hclge_update_tp_port_info(hdev);
+               else
+                       ret = hclge_mac_mdio_config(hdev);
+
                if (ret)
                        goto err_msi_irq_uninit;
        }
index e1f54a2..2d6906a 100644 (file)
@@ -1474,8 +1474,15 @@ static struct pci_driver hinic_driver = {
 
 static int __init hinic_module_init(void)
 {
+       int ret;
+
        hinic_dbg_register_debugfs(HINIC_DRV_NAME);
-       return pci_register_driver(&hinic_driver);
+
+       ret = pci_register_driver(&hinic_driver);
+       if (ret)
+               hinic_dbg_unregister_debugfs();
+
+       return ret;
 }
 
 static void __exit hinic_module_exit(void)
index 3f6187c..0d1bab4 100644 (file)
@@ -298,7 +298,6 @@ struct iavf_adapter {
 #define IAVF_FLAG_QUEUES_DISABLED              BIT(17)
 #define IAVF_FLAG_SETUP_NETDEV_FEATURES                BIT(18)
 #define IAVF_FLAG_REINIT_MSIX_NEEDED           BIT(20)
-#define IAVF_FLAG_INITIAL_MAC_SET              BIT(23)
 /* duplicates for common code */
 #define IAVF_FLAG_DCB_ENABLED                  0
        /* flags for admin queue service task */
index 3fc5723..d746529 100644 (file)
@@ -1087,12 +1087,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
        if (ret)
                return ret;
 
-       /* If this is an initial set MAC during VF spawn do not wait */
-       if (adapter->flags & IAVF_FLAG_INITIAL_MAC_SET) {
-               adapter->flags &= ~IAVF_FLAG_INITIAL_MAC_SET;
-               return 0;
-       }
-
        ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
                                               iavf_is_mac_set_handled(netdev, addr->sa_data),
                                               msecs_to_jiffies(2500));
@@ -2605,8 +2599,6 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
                ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
        }
 
-       adapter->flags |= IAVF_FLAG_INITIAL_MAC_SET;
-
        adapter->tx_desc_count = IAVF_DEFAULT_TXD;
        adapter->rx_desc_count = IAVF_DEFAULT_RXD;
        err = iavf_init_interrupt_scheme(adapter);
@@ -2921,7 +2913,6 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
        iavf_free_queues(adapter);
        memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
        iavf_shutdown_adminq(&adapter->hw);
-       adapter->netdev->flags &= ~IFF_UP;
        adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
        iavf_change_state(adapter, __IAVF_DOWN);
        wake_up(&adapter->down_waitqueue);
@@ -3021,6 +3012,11 @@ static void iavf_reset_task(struct work_struct *work)
                iavf_disable_vf(adapter);
                mutex_unlock(&adapter->client_lock);
                mutex_unlock(&adapter->crit_lock);
+               if (netif_running(netdev)) {
+                       rtnl_lock();
+                       dev_close(netdev);
+                       rtnl_unlock();
+               }
                return; /* Do not attempt to reinit. It's dead, Jim. */
        }
 
@@ -3033,6 +3029,7 @@ continue_reset:
 
        if (running) {
                netif_carrier_off(netdev);
+               netif_tx_stop_all_queues(netdev);
                adapter->link_up = false;
                iavf_napi_disable_all(adapter);
        }
@@ -3172,6 +3169,16 @@ reset_err:
 
        mutex_unlock(&adapter->client_lock);
        mutex_unlock(&adapter->crit_lock);
+
+       if (netif_running(netdev)) {
+               /* Close device to ensure that Tx queues will not be started
+                * during netif_device_attach() at the end of the reset task.
+                */
+               rtnl_lock();
+               dev_close(netdev);
+               rtnl_unlock();
+       }
+
        dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
 reset_finish:
        rtnl_lock();
@@ -5035,23 +5042,21 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
 static void iavf_remove(struct pci_dev *pdev)
 {
        struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
-       struct net_device *netdev = adapter->netdev;
        struct iavf_fdir_fltr *fdir, *fdirtmp;
        struct iavf_vlan_filter *vlf, *vlftmp;
+       struct iavf_cloud_filter *cf, *cftmp;
        struct iavf_adv_rss *rss, *rsstmp;
        struct iavf_mac_filter *f, *ftmp;
-       struct iavf_cloud_filter *cf, *cftmp;
-       struct iavf_hw *hw = &adapter->hw;
+       struct net_device *netdev;
+       struct iavf_hw *hw;
        int err;
 
-       /* When reboot/shutdown is in progress no need to do anything
-        * as the adapter is already REMOVE state that was set during
-        * iavf_shutdown() callback.
-        */
-       if (adapter->state == __IAVF_REMOVE)
+       netdev = adapter->netdev;
+       hw = &adapter->hw;
+
+       if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
                return;
 
-       set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
        /* Wait until port initialization is complete.
         * There are flows where register/unregister netdev may race.
         */
index 0f67187..ca28984 100644 (file)
@@ -3145,15 +3145,15 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
  */
 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
 {
-       irqreturn_t ret = IRQ_HANDLED;
        struct ice_pf *pf = data;
-       bool irq_handled;
 
-       irq_handled = ice_ptp_process_ts(pf);
-       if (!irq_handled)
-               ret = IRQ_WAKE_THREAD;
+       if (ice_is_reset_in_progress(pf->state))
+               return IRQ_HANDLED;
 
-       return ret;
+       while (!ice_ptp_process_ts(pf))
+               usleep_range(50, 100);
+
+       return IRQ_HANDLED;
 }
 
 /**
index 011b727..0f66846 100644 (file)
@@ -614,11 +614,14 @@ static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
  * 2) extend the 40b timestamp value to get a 64bit timestamp
  * 3) send that timestamp to the stack
  *
- * After looping, if we still have waiting SKBs, return true. This may cause us
- * effectively poll even when not strictly necessary. We do this because it's
- * possible a new timestamp was requested around the same time as the interrupt.
- * In some cases hardware might not interrupt us again when the timestamp is
- * captured.
+ * Returns true if all timestamps were handled, and false if any slots remain
+ * without a timestamp.
+ *
+ * After looping, if we still have waiting SKBs, return false. This may cause
+ * us effectively poll even when not strictly necessary. We do this because
+ * it's possible a new timestamp was requested around the same time as the
+ * interrupt. In some cases hardware might not interrupt us again when the
+ * timestamp is captured.
  *
  * Note that we only take the tracking lock when clearing the bit and when
  * checking if we need to re-queue this task. The only place where bits can be
@@ -641,7 +644,7 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
        u8 idx;
 
        if (!tx->init)
-               return false;
+               return true;
 
        ptp_port = container_of(tx, struct ice_ptp_port, tx);
        pf = ptp_port_to_pf(ptp_port);
@@ -2381,10 +2384,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
  */
 bool ice_ptp_process_ts(struct ice_pf *pf)
 {
-       if (pf->ptp.port.tx.init)
-               return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
-
-       return false;
+       return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
 }
 
 static void ice_ptp_periodic_work(struct kthread_work *work)
index eb0fb81..b399bdb 100644 (file)
@@ -7350,6 +7350,7 @@ static int mvpp2_get_sram(struct platform_device *pdev,
                          struct mvpp2 *priv)
 {
        struct resource *res;
+       void __iomem *base;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
        if (!res) {
@@ -7360,9 +7361,12 @@ static int mvpp2_get_sram(struct platform_device *pdev,
                return 0;
        }
 
-       priv->cm3_base = devm_ioremap_resource(&pdev->dev, res);
+       base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
 
-       return PTR_ERR_OR_ZERO(priv->cm3_base);
+       priv->cm3_base = base;
+       return 0;
 }
 
 static int mvpp2_probe(struct platform_device *pdev)
index 9089adc..b45dd7f 100644 (file)
@@ -521,14 +521,12 @@ static int octep_open(struct net_device *netdev)
        octep_oq_dbell_init(oct);
 
        ret = octep_get_link_status(oct);
-       if (ret)
+       if (ret > 0)
                octep_link_up(netdev);
 
        return 0;
 
 set_queues_err:
-       octep_napi_disable(oct);
-       octep_napi_delete(oct);
        octep_clean_irqs(oct);
 setup_irq_err:
        octep_free_oqs(oct);
@@ -958,7 +956,7 @@ int octep_device_setup(struct octep_device *oct)
        ret = octep_ctrl_mbox_init(ctrl_mbox);
        if (ret) {
                dev_err(&pdev->dev, "Failed to initialize control mbox\n");
-               return -1;
+               goto unsupported_dev;
        }
        oct->ctrl_mbox_ifstats_offset = OCTEP_CTRL_MBOX_SZ(ctrl_mbox->h2fq.elem_sz,
                                                           ctrl_mbox->h2fq.elem_cnt,
@@ -968,6 +966,10 @@ int octep_device_setup(struct octep_device *oct)
        return 0;
 
 unsupported_dev:
+       for (i = 0; i < OCTEP_MMIO_REGIONS; i++)
+               iounmap(oct->mmio[i].hw_addr);
+
+       kfree(oct->conf);
        return -1;
 }
 
@@ -1070,7 +1072,11 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        netdev->max_mtu = OCTEP_MAX_MTU;
        netdev->mtu = OCTEP_DEFAULT_MTU;
 
-       octep_get_mac_addr(octep_dev, octep_dev->mac_addr);
+       err = octep_get_mac_addr(octep_dev, octep_dev->mac_addr);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to get mac address\n");
+               goto register_dev_err;
+       }
        eth_hw_addr_set(netdev, octep_dev->mac_addr);
 
        err = register_netdev(netdev);
index 6b4f640..993ac18 100644 (file)
@@ -32,7 +32,6 @@ config OCTEONTX2_PF
        tristate "Marvell OcteonTX2 NIC Physical Function driver"
        select OCTEONTX2_MBOX
        select NET_DEVLINK
-       depends on MACSEC || !MACSEC
        depends on (64BIT && COMPILE_TEST) || ARM64
        select DIMLIB
        depends on PCI
index 4a343f8..c0bedf4 100644 (file)
@@ -951,7 +951,7 @@ static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction d
                else
                        event.intr_mask = (dir == MCS_RX) ?
                                          MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
-                                         MCS_BBE_RX_PLFIFO_OVERFLOW_INT;
+                                         MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
 
                /* Notify the lmac_id info which ran into BBE fatal error */
                event.lmac_id = i & 0x3ULL;
index a1970eb..f66dde2 100644 (file)
@@ -880,6 +880,8 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
                sprintf(lmac, "LMAC%d", lmac_id);
                seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
                           dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
+
+               pci_dev_put(pdev);
        }
        return 0;
 }
@@ -2566,6 +2568,7 @@ static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
                }
        }
 
+       pci_dev_put(pdev);
        return 0;
 }
 
index 7646bb2..a62c1b3 100644 (file)
@@ -4985,6 +4985,8 @@ static int nix_setup_ipolicers(struct rvu *rvu,
                ipolicer->ref_count = devm_kcalloc(rvu->dev,
                                                   ipolicer->band_prof.max,
                                                   sizeof(u16), GFP_KERNEL);
+               if (!ipolicer->ref_count)
+                       return -ENOMEM;
        }
 
        /* Set policer timeunit to 2us ie  (19 + 1) * 100 nsec = 2us */
index b04fb22..ae50d56 100644 (file)
@@ -62,15 +62,18 @@ int rvu_sdp_init(struct rvu *rvu)
                pfvf->sdp_info = devm_kzalloc(rvu->dev,
                                              sizeof(struct sdp_node_info),
                                              GFP_KERNEL);
-               if (!pfvf->sdp_info)
+               if (!pfvf->sdp_info) {
+                       pci_dev_put(pdev);
                        return -ENOMEM;
+               }
 
                dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]);
 
-               put_device(&pdev->dev);
                i++;
        }
 
+       pci_dev_put(pdev);
+
        return 0;
 }
 
index 24f9d60..47796e4 100644 (file)
@@ -746,6 +746,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
        return 0;
 
 err_sfp_bind:
+       unregister_netdev(dev);
 err_register_netdev:
        prestera_port_list_del(port);
 err_port_init:
index 7cd3815..1d36619 100644 (file)
@@ -2378,8 +2378,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
                                data + NET_SKB_PAD + eth->ip_align,
                                ring->buf_size, DMA_FROM_DEVICE);
                        if (unlikely(dma_mapping_error(eth->dma_dev,
-                                                      dma_addr)))
+                                                      dma_addr))) {
+                               skb_free_frag(data);
                                return -ENOMEM;
+                       }
                }
                rxd->rxd1 = (unsigned int)dma_addr;
                ring->data[i] = data;
@@ -2996,8 +2998,10 @@ static int mtk_open(struct net_device *dev)
                int i;
 
                err = mtk_start_dma(eth);
-               if (err)
+               if (err) {
+                       phylink_disconnect_phy(mac->phylink);
                        return err;
+               }
 
                for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
                        mtk_ppe_start(eth->ppe[i]);
@@ -4143,13 +4147,13 @@ static int mtk_probe(struct platform_device *pdev)
                                                   eth->soc->offload_version, i);
                        if (!eth->ppe[i]) {
                                err = -ENOMEM;
-                               goto err_free_dev;
+                               goto err_deinit_ppe;
                        }
                }
 
                err = mtk_eth_offload_init(eth);
                if (err)
-                       goto err_free_dev;
+                       goto err_deinit_ppe;
        }
 
        for (i = 0; i < MTK_MAX_DEVS; i++) {
@@ -4159,7 +4163,7 @@ static int mtk_probe(struct platform_device *pdev)
                err = register_netdev(eth->netdev[i]);
                if (err) {
                        dev_err(eth->dev, "error bringing up device\n");
-                       goto err_deinit_mdio;
+                       goto err_deinit_ppe;
                } else
                        netif_info(eth, probe, eth->netdev[i],
                                   "mediatek frame engine at 0x%08lx, irq %d\n",
@@ -4177,7 +4181,8 @@ static int mtk_probe(struct platform_device *pdev)
 
        return 0;
 
-err_deinit_mdio:
+err_deinit_ppe:
+       mtk_ppe_deinit(eth);
        mtk_mdio_cleanup(eth);
 err_free_dev:
        mtk_free_dev(eth);
index 2d8ca99..784ecb2 100644 (file)
@@ -737,7 +737,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
                                  MTK_PPE_ENTRIES * soc->foe_entry_size,
                                  &ppe->foe_phys, GFP_KERNEL);
        if (!foe)
-               return NULL;
+               goto err_free_l2_flows;
 
        ppe->foe_table = foe;
 
@@ -745,11 +745,26 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
                        sizeof(*ppe->foe_flow);
        ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
        if (!ppe->foe_flow)
-               return NULL;
+               goto err_free_l2_flows;
 
        mtk_ppe_debugfs_init(ppe, index);
 
        return ppe;
+
+err_free_l2_flows:
+       rhashtable_destroy(&ppe->l2_flows);
+       return NULL;
+}
+
+void mtk_ppe_deinit(struct mtk_eth *eth)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
+               if (!eth->ppe[i])
+                       return;
+               rhashtable_destroy(&eth->ppe[i]->l2_flows);
+       }
 }
 
 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
index 0b7a67a..a09c325 100644 (file)
@@ -304,6 +304,7 @@ struct mtk_ppe {
 
 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
                             int version, int index);
+void mtk_ppe_deinit(struct mtk_eth *eth);
 void mtk_ppe_start(struct mtk_ppe *ppe);
 int mtk_ppe_stop(struct mtk_ppe *ppe);
 
index b149e60..48cfaa7 100644 (file)
@@ -697,7 +697,8 @@ static int mlx4_create_zones(struct mlx4_dev *dev,
                        err = mlx4_bitmap_init(*bitmap + k, 1,
                                               MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
                                               0);
-                       mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
+                       if (!err)
+                               mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
                }
 
                if (err)
index 2e0d59c..74bd05e 100644 (file)
@@ -45,6 +45,8 @@
 #include "mlx5_core.h"
 #include "lib/eq.h"
 #include "lib/tout.h"
+#define CREATE_TRACE_POINTS
+#include "diag/cmd_tracepoint.h"
 
 enum {
        CMD_IF_REV = 5,
@@ -785,27 +787,14 @@ EXPORT_SYMBOL(mlx5_cmd_out_err);
 static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
 {
        u16 opcode, op_mod;
-       u32 syndrome;
-       u8  status;
        u16 uid;
-       int err;
-
-       syndrome = MLX5_GET(mbox_out, out, syndrome);
-       status = MLX5_GET(mbox_out, out, status);
 
        opcode = MLX5_GET(mbox_in, in, opcode);
        op_mod = MLX5_GET(mbox_in, in, op_mod);
        uid    = MLX5_GET(mbox_in, in, uid);
 
-       err = cmd_status_to_err(status);
-
        if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
                mlx5_cmd_out_err(dev, opcode, op_mod, out);
-       else
-               mlx5_core_dbg(dev,
-                       "%s(0x%x) op_mod(0x%x) uid(%d) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
-                       mlx5_command_str(opcode), opcode, op_mod, uid,
-                       cmd_status_str(status), status, syndrome, err);
 }
 
 int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
@@ -1016,6 +1005,7 @@ static void cmd_work_handler(struct work_struct *work)
                cmd_ent_get(ent);
        set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
 
+       cmd_ent_get(ent); /* for the _real_ FW event on completion */
        /* Skip sending command to fw if internal error */
        if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
                ent->ret = -ENXIO;
@@ -1023,7 +1013,6 @@ static void cmd_work_handler(struct work_struct *work)
                return;
        }
 
-       cmd_ent_get(ent); /* for the _real_ FW event on completion */
        /* ring doorbell after the descriptor is valid */
        mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
        wmb();
@@ -1672,8 +1661,8 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
                                cmd_ent_put(ent); /* timeout work was canceled */
 
                        if (!forced || /* Real FW completion */
-                           pci_channel_offline(dev->pdev) || /* FW is inaccessible */
-                           dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+                            mlx5_cmd_is_down(dev) || /* No real FW completion is expected */
+                            !opcode_allowed(cmd, ent->op))
                                cmd_ent_put(ent);
 
                        ent->ts2 = ktime_get_ns();
@@ -1892,6 +1881,16 @@ out_in:
        return err;
 }
 
+static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
+{
+       u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
+       u8 status = MLX5_GET(mbox_out, out, status);
+
+       trace_mlx5_cmd(mlx5_command_str(opcode), opcode, op_mod,
+                      cmd_status_str(status), status, syndrome,
+                      cmd_status_to_err(status));
+}
+
 static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
                           u32 syndrome, int err)
 {
@@ -1914,7 +1913,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
 }
 
 /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
-static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out)
+static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op_mod, void *out)
 {
        u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
        u8 status = MLX5_GET(mbox_out, out, status);
@@ -1922,8 +1921,10 @@ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *
        if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */
                err = -EIO;
 
-       if (!err && status != MLX5_CMD_STAT_OK)
+       if (!err && status != MLX5_CMD_STAT_OK) {
                err = -EREMOTEIO;
+               mlx5_cmd_err_trace(dev, opcode, op_mod, out);
+       }
 
        cmd_status_log(dev, opcode, status, syndrome, err);
        return err;
@@ -1951,9 +1952,9 @@ int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int
 {
        int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
        u16 opcode = MLX5_GET(mbox_in, in, opcode);
+       u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
 
-       err = cmd_status_err(dev, err, opcode, out);
-       return err;
+       return cmd_status_err(dev, err, opcode, op_mod, out);
 }
 EXPORT_SYMBOL(mlx5_cmd_do);
 
@@ -1997,8 +1998,9 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
 {
        int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
        u16 opcode = MLX5_GET(mbox_in, in, opcode);
+       u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
 
-       err = cmd_status_err(dev, err, opcode, out);
+       err = cmd_status_err(dev, err, opcode, op_mod, out);
        return mlx5_cmd_check(dev, err, in, out);
 }
 EXPORT_SYMBOL(mlx5_cmd_exec_polling);
@@ -2034,7 +2036,7 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
        struct mlx5_async_ctx *ctx;
 
        ctx = work->ctx;
-       status = cmd_status_err(ctx->dev, status, work->opcode, work->out);
+       status = cmd_status_err(ctx->dev, status, work->opcode, work->op_mod, work->out);
        work->user_callback(status, work);
        if (atomic_dec_and_test(&ctx->num_inflight))
                complete(&ctx->inflight_done);
@@ -2049,6 +2051,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
        work->ctx = ctx;
        work->user_callback = callback;
        work->opcode = MLX5_GET(mbox_in, in, opcode);
+       work->op_mod = MLX5_GET(mbox_in, in, op_mod);
        work->out = out;
        if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
                return -EIO;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h
new file mode 100644 (file)
index 0000000..406ebe1
--- /dev/null
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_CMD_TP_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_CMD_TP_H_
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+TRACE_EVENT(mlx5_cmd,
+           TP_PROTO(const char *command_str, u16 opcode, u16 op_mod,
+                    const char *status_str, u8 status, u32 syndrome, int err),
+           TP_ARGS(command_str, opcode, op_mod, status_str, status, syndrome, err),
+           TP_STRUCT__entry(__string(command_str, command_str)
+                            __field(u16, opcode)
+                            __field(u16, op_mod)
+                           __string(status_str, status_str)
+                           __field(u8, status)
+                           __field(u32, syndrome)
+                           __field(int, err)
+                           ),
+           TP_fast_assign(__assign_str(command_str, command_str);
+                       __entry->opcode = opcode;
+                       __entry->op_mod = op_mod;
+                       __assign_str(status_str, status_str);
+                       __entry->status = status;
+                       __entry->syndrome = syndrome;
+                       __entry->err = err;
+           ),
+           TP_printk("%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)",
+                     __get_str(command_str), __entry->opcode, __entry->op_mod,
+                     __get_str(status_str), __entry->status, __entry->syndrome,
+                     __entry->err)
+);
+
+#endif /* _MLX5_CMD_TP_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ./diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cmd_tracepoint
+#include <trace/define_trace.h>
index 978a2bb..2183138 100644 (file)
@@ -638,7 +638,7 @@ static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer,
                        trace_timestamp = (timestamp_event.timestamp & MASK_52_7) |
                                          (str_frmt->timestamp & MASK_6_0);
                else
-                       trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) |
+                       trace_timestamp = ((timestamp_event.timestamp - 1) & MASK_52_7) |
                                          (str_frmt->timestamp & MASK_6_0);
 
                mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp);
index 5aff979..ff73d25 100644 (file)
@@ -224,15 +224,16 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
        list_for_each_entry(flow, flow_list, tmp_list) {
                if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
                        continue;
-               spec = &flow->attr->parse_attr->spec;
-
-               /* update from encap rule to slow path rule */
-               rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
 
                attr = mlx5e_tc_get_encap_attr(flow);
                esw_attr = attr->esw_attr;
                /* mark the flow's encap dest as non-valid */
                esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
+               esw_attr->dests[flow->tmp_entry_index].pkt_reformat = NULL;
+
+               /* update from encap rule to slow path rule */
+               spec = &flow->attr->parse_attr->spec;
+               rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
 
                if (IS_ERR(rule)) {
                        err = PTR_ERR(rule);
@@ -251,6 +252,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
        /* we know that the encap is valid */
        e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
        mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
+       e->pkt_reformat = NULL;
 }
 
 static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow,
@@ -762,8 +764,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
                       struct net_device *mirred_dev,
                       int out_index,
                       struct netlink_ext_ack *extack,
-                      struct net_device **encap_dev,
-                      bool *encap_valid)
+                      struct net_device **encap_dev)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5e_tc_flow_parse_attr *parse_attr;
@@ -878,9 +879,8 @@ attach_flow:
        if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
                attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat;
                attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
-               *encap_valid = true;
        } else {
-               *encap_valid = false;
+               flow_flag_set(flow, SLOW);
        }
        mutex_unlock(&esw->offloads.encap_tbl_lock);
 
index d542b84..8ad273d 100644 (file)
@@ -17,8 +17,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
                       struct net_device *mirred_dev,
                       int out_index,
                       struct netlink_ext_ack *extack,
-                      struct net_device **encap_dev,
-                      bool *encap_valid);
+                      struct net_device **encap_dev);
 
 int mlx5e_attach_decap(struct mlx5e_priv *priv,
                       struct mlx5e_tc_flow *flow,
index 2ef36cb..3dc6c98 100644 (file)
@@ -368,15 +368,15 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
        obj_attrs.aso_pdn = macsec->aso.pdn;
        obj_attrs.epn_state = sa->epn_state;
 
-       if (is_tx) {
-               obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci);
-               key = &ctx->sa.tx_sa->key;
-       } else {
-               obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
-               key = &ctx->sa.rx_sa->key;
+       key = (is_tx) ? &ctx->sa.tx_sa->key : &ctx->sa.rx_sa->key;
+
+       if (sa->epn_state.epn_enabled) {
+               obj_attrs.ssci = (is_tx) ? cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci) :
+                                          cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
+
+               memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
        }
 
-       memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
        obj_attrs.replay_window = ctx->secy->replay_window;
        obj_attrs.replay_protect = ctx->secy->replay_protect;
 
@@ -1155,7 +1155,7 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
                                continue;
 
                        if (rx_sa->active) {
-                               err = mlx5e_macsec_init_sa(ctx, rx_sa, false, false);
+                               err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
                                if (err)
                                        goto out;
                        }
@@ -1536,6 +1536,8 @@ static void macsec_async_event(struct work_struct *work)
 
        async_work = container_of(work, struct mlx5e_macsec_async_work, work);
        macsec = async_work->macsec;
+       mutex_lock(&macsec->lock);
+
        mdev = async_work->mdev;
        obj_id = async_work->obj_id;
        macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
@@ -1557,6 +1559,7 @@ static void macsec_async_event(struct work_struct *work)
 
 out_async_work:
        kfree(async_work);
+       mutex_unlock(&macsec->lock);
 }
 
 static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
index 24aa25d..1728e19 100644 (file)
@@ -35,7 +35,6 @@
 #include "en.h"
 #include "en/port.h"
 #include "en/params.h"
-#include "en/xsk/pool.h"
 #include "en/ptp.h"
 #include "lib/clock.h"
 #include "en/fs_ethtool.h"
@@ -412,15 +411,8 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
                                struct ethtool_channels *ch)
 {
        mutex_lock(&priv->state_lock);
-
        ch->max_combined   = priv->max_nch;
        ch->combined_count = priv->channels.params.num_channels;
-       if (priv->xsk.refcnt) {
-               /* The upper half are XSK queues. */
-               ch->max_combined *= 2;
-               ch->combined_count *= 2;
-       }
-
        mutex_unlock(&priv->state_lock);
 }
 
@@ -454,16 +446,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
 
        mutex_lock(&priv->state_lock);
 
-       /* Don't allow changing the number of channels if there is an active
-        * XSK, because the numeration of the XSK and regular RQs will change.
-        */
-       if (priv->xsk.refcnt) {
-               err = -EINVAL;
-               netdev_err(priv->netdev, "%s: AF_XDP is active, cannot change the number of channels\n",
-                          __func__);
-               goto out;
-       }
-
        /* Don't allow changing the number of channels if HTB offload is active,
         * because the numeration of the QoS SQs will change, while per-queue
         * qdiscs are attached.
index e3a4f01..5e41dfd 100644 (file)
@@ -206,10 +206,11 @@ static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
 static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_mode)
 {
        u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
+       u32 sz;
 
-       WARN_ON_ONCE(entries * umr_entry_size % MLX5_OCTWORD);
+       sz = ALIGN(entries * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT);
 
-       return entries * umr_entry_size / MLX5_OCTWORD;
+       return sz / MLX5_OCTWORD;
 }
 
 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
index 5a6aa61..bd9936a 100644 (file)
@@ -1634,7 +1634,6 @@ set_encap_dests(struct mlx5e_priv *priv,
                struct mlx5e_tc_flow *flow,
                struct mlx5_flow_attr *attr,
                struct netlink_ext_ack *extack,
-               bool *encap_valid,
                bool *vf_tun)
 {
        struct mlx5e_tc_flow_parse_attr *parse_attr;
@@ -1651,7 +1650,6 @@ set_encap_dests(struct mlx5e_priv *priv,
        parse_attr = attr->parse_attr;
        esw_attr = attr->esw_attr;
        *vf_tun = false;
-       *encap_valid = true;
 
        for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
                struct net_device *out_dev;
@@ -1668,7 +1666,7 @@ set_encap_dests(struct mlx5e_priv *priv,
                        goto out;
                }
                err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
-                                        extack, &encap_dev, encap_valid);
+                                        extack, &encap_dev);
                dev_put(out_dev);
                if (err)
                        goto out;
@@ -1732,8 +1730,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
        struct mlx5e_tc_flow_parse_attr *parse_attr;
        struct mlx5_flow_attr *attr = flow->attr;
        struct mlx5_esw_flow_attr *esw_attr;
-       bool vf_tun, encap_valid;
        u32 max_prio, max_chain;
+       bool vf_tun;
        int err = 0;
 
        parse_attr = attr->parse_attr;
@@ -1823,7 +1821,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
                esw_attr->int_port = int_port;
        }
 
-       err = set_encap_dests(priv, flow, attr, extack, &encap_valid, &vf_tun);
+       err = set_encap_dests(priv, flow, attr, extack, &vf_tun);
        if (err)
                goto err_out;
 
@@ -1853,7 +1851,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
         * (1) there's no error
         * (2) there's an encap action and we don't have valid neigh
         */
-       if (!encap_valid || flow_flag_test(flow, SLOW))
+       if (flow_flag_test(flow, SLOW))
                flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
        else
                flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
@@ -3759,7 +3757,7 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
        struct mlx5e_post_act *post_act = get_post_action(flow->priv);
        struct mlx5_flow_attr *attr, *next_attr = NULL;
        struct mlx5e_post_act_handle *handle;
-       bool vf_tun, encap_valid = true;
+       bool vf_tun;
        int err;
 
        /* This is going in reverse order as needed.
@@ -3781,13 +3779,10 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
                if (list_is_last(&attr->list, &flow->attrs))
                        break;
 
-               err = set_encap_dests(flow->priv, flow, attr, extack, &encap_valid, &vf_tun);
+               err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun);
                if (err)
                        goto out_free;
 
-               if (!encap_valid)
-                       flow_flag_set(flow, SLOW);
-
                err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
                if (err)
                        goto out_free;
index 728ca9f..3fda75f 100644 (file)
@@ -433,7 +433,7 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
                    mlx5_lag_mpesw_is_activated(esw->dev))
                        dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
        }
-       if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
+       if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
                if (pkt_reformat) {
                        flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
                        flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
index 9d908a0..1e46f9a 100644 (file)
@@ -9,7 +9,8 @@ enum {
        MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
        MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
        MLX5_FW_RESET_FLAGS_PENDING_COMP,
-       MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS
+       MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
+       MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED
 };
 
 struct mlx5_fw_reset {
@@ -406,7 +407,7 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
        err = mlx5_pci_link_toggle(dev);
        if (err) {
                mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err);
-               goto done;
+               set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags);
        }
 
        mlx5_enter_error_state(dev, true);
@@ -482,6 +483,10 @@ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
                goto out;
        }
        err = fw_reset->ret;
+       if (test_and_clear_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags)) {
+               mlx5_unload_one_devl_locked(dev);
+               mlx5_load_one_devl_locked(dev, false);
+       }
 out:
        clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
        return err;
index a9f4ede..be1307a 100644 (file)
@@ -228,9 +228,8 @@ static void mlx5_ldev_free(struct kref *ref)
        if (ldev->nb.notifier_call)
                unregister_netdevice_notifier_net(&init_net, &ldev->nb);
        mlx5_lag_mp_cleanup(ldev);
-       mlx5_lag_mpesw_cleanup(ldev);
-       cancel_work_sync(&ldev->mpesw_work);
        destroy_workqueue(ldev->wq);
+       mlx5_lag_mpesw_cleanup(ldev);
        mutex_destroy(&ldev->lock);
        kfree(ldev);
 }
index ce2ce8c..f30ac2d 100644 (file)
@@ -50,6 +50,19 @@ struct lag_tracker {
        enum netdev_lag_hash hash_type;
 };
 
+enum mpesw_op {
+       MLX5_MPESW_OP_ENABLE,
+       MLX5_MPESW_OP_DISABLE,
+};
+
+struct mlx5_mpesw_work_st {
+       struct work_struct work;
+       struct mlx5_lag    *lag;
+       enum mpesw_op      op;
+       struct completion  comp;
+       int result;
+};
+
 /* LAG data of a ConnectX card.
  * It serves both its phys functions.
  */
@@ -66,7 +79,6 @@ struct mlx5_lag {
        struct lag_tracker        tracker;
        struct workqueue_struct   *wq;
        struct delayed_work       bond_work;
-       struct work_struct        mpesw_work;
        struct notifier_block     nb;
        struct lag_mp             lag_mp;
        struct mlx5_lag_port_sel  port_sel;
index f643202..c17e8f1 100644 (file)
@@ -7,63 +7,95 @@
 #include "eswitch.h"
 #include "lib/mlx5.h"
 
-void mlx5_mpesw_work(struct work_struct *work)
+static int add_mpesw_rule(struct mlx5_lag *ldev)
 {
-       struct mlx5_lag *ldev = container_of(work, struct mlx5_lag, mpesw_work);
+       struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+       int err;
 
-       mutex_lock(&ldev->lock);
-       mlx5_disable_lag(ldev);
-       mutex_unlock(&ldev->lock);
-}
+       if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1)
+               return 0;
 
-static void mlx5_lag_disable_mpesw(struct mlx5_core_dev *dev)
-{
-       struct mlx5_lag *ldev = dev->priv.lag;
+       if (ldev->mode != MLX5_LAG_MODE_NONE) {
+               err = -EINVAL;
+               goto out_err;
+       }
 
-       if (!queue_work(ldev->wq, &ldev->mpesw_work))
-               mlx5_core_warn(dev, "failed to queue work\n");
+       err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
+       if (err) {
+               mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
+               goto out_err;
+       }
+
+       return 0;
+
+out_err:
+       atomic_dec(&ldev->lag_mpesw.mpesw_rule_count);
+       return err;
 }
 
-void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
+static void del_mpesw_rule(struct mlx5_lag *ldev)
 {
-       struct mlx5_lag *ldev = dev->priv.lag;
+       if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) &&
+           ldev->mode == MLX5_LAG_MODE_MPESW)
+               mlx5_disable_lag(ldev);
+}
 
-       if (!ldev)
-               return;
+static void mlx5_mpesw_work(struct work_struct *work)
+{
+       struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work);
+       struct mlx5_lag *ldev = mpesww->lag;
 
        mutex_lock(&ldev->lock);
-       if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) &&
-           ldev->mode == MLX5_LAG_MODE_MPESW)
-               mlx5_lag_disable_mpesw(dev);
+       if (mpesww->op == MLX5_MPESW_OP_ENABLE)
+               mpesww->result = add_mpesw_rule(ldev);
+       else if (mpesww->op == MLX5_MPESW_OP_DISABLE)
+               del_mpesw_rule(ldev);
        mutex_unlock(&ldev->lock);
+
+       complete(&mpesww->comp);
 }
 
-int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
+static int mlx5_lag_mpesw_queue_work(struct mlx5_core_dev *dev,
+                                    enum mpesw_op op)
 {
        struct mlx5_lag *ldev = dev->priv.lag;
+       struct mlx5_mpesw_work_st *work;
        int err = 0;
 
        if (!ldev)
                return 0;
 
-       mutex_lock(&ldev->lock);
-       if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1)
-               goto out;
+       work = kzalloc(sizeof(*work), GFP_KERNEL);
+       if (!work)
+               return -ENOMEM;
 
-       if (ldev->mode != MLX5_LAG_MODE_NONE) {
+       INIT_WORK(&work->work, mlx5_mpesw_work);
+       init_completion(&work->comp);
+       work->op = op;
+       work->lag = ldev;
+
+       if (!queue_work(ldev->wq, &work->work)) {
+               mlx5_core_warn(dev, "failed to queue mpesw work\n");
                err = -EINVAL;
                goto out;
        }
-
-       err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
-       if (err)
-               mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
-
+       wait_for_completion(&work->comp);
+       err = work->result;
 out:
-       mutex_unlock(&ldev->lock);
+       kfree(work);
        return err;
 }
 
+void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
+{
+       mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_DISABLE);
+}
+
+int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
+{
+       return mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_ENABLE);
+}
+
 int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
 {
        struct mlx5_lag *ldev = mdev->priv.lag;
@@ -71,12 +103,9 @@ int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
        if (!netif_is_bond_master(out_dev) || !ldev)
                return 0;
 
-       mutex_lock(&ldev->lock);
-       if (ldev->mode == MLX5_LAG_MODE_MPESW) {
-               mutex_unlock(&ldev->lock);
+       if (ldev->mode == MLX5_LAG_MODE_MPESW)
                return -EOPNOTSUPP;
-       }
-       mutex_unlock(&ldev->lock);
+
        return 0;
 }
 
@@ -90,11 +119,10 @@ bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev)
 
 void mlx5_lag_mpesw_init(struct mlx5_lag *ldev)
 {
-       INIT_WORK(&ldev->mpesw_work, mlx5_mpesw_work);
        atomic_set(&ldev->lag_mpesw.mpesw_rule_count, 0);
 }
 
 void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev)
 {
-       cancel_delayed_work_sync(&ldev->bond_work);
+       WARN_ON(atomic_read(&ldev->lag_mpesw.mpesw_rule_count));
 }
index be4abcb..88e8daf 100644 (file)
@@ -12,7 +12,6 @@ struct lag_mpesw {
        atomic_t mpesw_rule_count;
 };
 
-void mlx5_mpesw_work(struct work_struct *work);
 int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev);
 bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev);
 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
index 283c4cc..e58775a 100644 (file)
@@ -1798,7 +1798,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
        res = state == pci_channel_io_perm_failure ?
                PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
 
-       mlx5_pci_trace(dev, "Exit, result = %d, %s\n",  res, result2str(res));
+       mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Exit, result = %d, %s\n",
+                      __func__, dev->state, dev->pci_status, res, result2str(res));
        return res;
 }
 
@@ -1837,7 +1838,8 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
        struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
        int err;
 
-       mlx5_pci_trace(dev, "Enter\n");
+       mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Enter\n",
+                      __func__, dev->state, dev->pci_status);
 
        err = mlx5_pci_enable_device(dev);
        if (err) {
@@ -1859,7 +1861,8 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
 
        res = PCI_ERS_RESULT_RECOVERED;
 out:
-       mlx5_pci_trace(dev, "Exit, err = %d, result = %d, %s\n", err, res, result2str(res));
+       mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Exit, err = %d, result = %d, %s\n",
+                      __func__, dev->state, dev->pci_status, err, res, result2str(res));
        return res;
 }
 
index 7da012f..8e2abba 100644 (file)
@@ -18,6 +18,10 @@ struct mlx5_sf_dev_table {
        phys_addr_t base_address;
        u64 sf_bar_length;
        struct notifier_block nb;
+       struct mutex table_lock; /* Serializes sf life cycle and vhca state change handler */
+       struct workqueue_struct *active_wq;
+       struct work_struct work;
+       u8 stop_active_wq:1;
        struct mlx5_core_dev *dev;
 };
 
@@ -168,6 +172,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
                return 0;
 
        sf_index = event->function_id - base_id;
+       mutex_lock(&table->table_lock);
        sf_dev = xa_load(&table->devices, sf_index);
        switch (event->new_vhca_state) {
        case MLX5_VHCA_STATE_INVALID:
@@ -191,6 +196,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
        default:
                break;
        }
+       mutex_unlock(&table->table_lock);
        return 0;
 }
 
@@ -215,6 +221,78 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
        return 0;
 }
 
+static void mlx5_sf_dev_add_active_work(struct work_struct *work)
+{
+       struct mlx5_sf_dev_table *table = container_of(work, struct mlx5_sf_dev_table, work);
+       u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
+       struct mlx5_core_dev *dev = table->dev;
+       u16 max_functions;
+       u16 function_id;
+       u16 sw_func_id;
+       int err = 0;
+       u8 state;
+       int i;
+
+       max_functions = mlx5_sf_max_functions(dev);
+       function_id = MLX5_CAP_GEN(dev, sf_base_id);
+       for (i = 0; i < max_functions; i++, function_id++) {
+               if (table->stop_active_wq)
+                       return;
+               err = mlx5_cmd_query_vhca_state(dev, function_id, out, sizeof(out));
+               if (err)
+                       /* A failure of specific vhca doesn't mean others will
+                        * fail as well.
+                        */
+                       continue;
+               state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
+               if (state != MLX5_VHCA_STATE_ACTIVE)
+                       continue;
+
+               sw_func_id = MLX5_GET(query_vhca_state_out, out, vhca_state_context.sw_function_id);
+               mutex_lock(&table->table_lock);
+               /* Don't probe device which is already probe */
+               if (!xa_load(&table->devices, i))
+                       mlx5_sf_dev_add(dev, i, function_id, sw_func_id);
+               /* There is a race where SF got inactive after the query
+                * above. e.g.: the query returns that the state of the
+                * SF is active, and after that the eswitch manager set it to
+                * inactive.
+                * This case cannot be managed in SW, since the probing of the
+                * SF is on one system, and the inactivation is on a different
+                * system.
+                * If the inactive is done after the SF perform init_hca(),
+                * the SF will fully probe and then removed. If it was
+                * done before init_hca(), the SF probe will fail.
+                */
+               mutex_unlock(&table->table_lock);
+       }
+}
+
+/* In case SFs are generated externally, probe active SFs */
+static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table)
+{
+       if (MLX5_CAP_GEN(table->dev, eswitch_manager))
+               return 0; /* the table is local */
+
+       /* Use a workqueue to probe active SFs, which are in large
+        * quantity and may take up to minutes to probe.
+        */
+       table->active_wq = create_singlethread_workqueue("mlx5_active_sf");
+       if (!table->active_wq)
+               return -ENOMEM;
+       INIT_WORK(&table->work, &mlx5_sf_dev_add_active_work);
+       queue_work(table->active_wq, &table->work);
+       return 0;
+}
+
+static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table)
+{
+       if (table->active_wq) {
+               table->stop_active_wq = true;
+               destroy_workqueue(table->active_wq);
+       }
+}
+
 void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
 {
        struct mlx5_sf_dev_table *table;
@@ -240,11 +318,17 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
        table->base_address = pci_resource_start(dev->pdev, 2);
        table->max_sfs = max_sfs;
        xa_init(&table->devices);
+       mutex_init(&table->table_lock);
        dev->priv.sf_dev_table = table;
 
        err = mlx5_vhca_event_notifier_register(dev, &table->nb);
        if (err)
                goto vhca_err;
+
+       err = mlx5_sf_dev_queue_active_work(table);
+       if (err)
+               goto add_active_err;
+
        err = mlx5_sf_dev_vhca_arm_all(table);
        if (err)
                goto arm_err;
@@ -252,6 +336,8 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
        return;
 
 arm_err:
+       mlx5_sf_dev_destroy_active_work(table);
+add_active_err:
        mlx5_vhca_event_notifier_unregister(dev, &table->nb);
 vhca_err:
        table->max_sfs = 0;
@@ -279,7 +365,9 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
        if (!table)
                return;
 
+       mlx5_sf_dev_destroy_active_work(table);
        mlx5_vhca_event_notifier_unregister(dev, &table->nb);
+       mutex_destroy(&table->table_lock);
 
        /* Now that event handler is not running, it is safe to destroy
         * the sf device without race.
index 4efccd9..1290b2d 100644 (file)
@@ -3470,6 +3470,8 @@ mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
        u16 vid;
 
        vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
+       if (!vxlan_fdb_info->offloaded)
+               return;
 
        bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
        if (!bridge_device)
index fea4254..06811c6 100644 (file)
@@ -716,6 +716,9 @@ int lan966x_stats_init(struct lan966x *lan966x)
        snprintf(queue_name, sizeof(queue_name), "%s-stats",
                 dev_name(lan966x->dev));
        lan966x->stats_queue = create_singlethread_workqueue(queue_name);
+       if (!lan966x->stats_queue)
+               return -ENOMEM;
+
        INIT_DELAYED_WORK(&lan966x->stats_work, lan966x_check_stats_work);
        queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work,
                           LAN966X_STATS_CHECK_DELAY);
index 6b0febc..01f3a3a 100644 (file)
@@ -1253,6 +1253,9 @@ int sparx_stats_init(struct sparx5 *sparx5)
        snprintf(queue_name, sizeof(queue_name), "%s-stats",
                 dev_name(sparx5->dev));
        sparx5->stats_queue = create_singlethread_workqueue(queue_name);
+       if (!sparx5->stats_queue)
+               return -ENOMEM;
+
        INIT_DELAYED_WORK(&sparx5->stats_work, sparx5_check_stats_work);
        queue_delayed_work(sparx5->stats_queue, &sparx5->stats_work,
                           SPX5_STATS_CHECK_DELAY);
index 62a325e..eeac04b 100644 (file)
@@ -659,6 +659,9 @@ static int sparx5_start(struct sparx5 *sparx5)
        snprintf(queue_name, sizeof(queue_name), "%s-mact",
                 dev_name(sparx5->dev));
        sparx5->mact_queue = create_singlethread_workqueue(queue_name);
+       if (!sparx5->mact_queue)
+               return -ENOMEM;
+
        INIT_DELAYED_WORK(&sparx5->mact_work, sparx5_mact_pull_work);
        queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
                           SPX5_MACT_PULL_DELAY);
index 19516cc..d078156 100644 (file)
@@ -104,7 +104,7 @@ static int sparx5_port_open(struct net_device *ndev)
        err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
        if (err) {
                netdev_err(ndev, "Could not attach to PHY\n");
-               return err;
+               goto err_connect;
        }
 
        phylink_start(port->phylink);
@@ -116,10 +116,20 @@ static int sparx5_port_open(struct net_device *ndev)
                        err = sparx5_serdes_set(port->sparx5, port, &port->conf);
                else
                        err = phy_power_on(port->serdes);
-               if (err)
+               if (err) {
                        netdev_err(ndev, "%s failed\n", __func__);
+                       goto out_power;
+               }
        }
 
+       return 0;
+
+out_power:
+       phylink_stop(port->phylink);
+       phylink_disconnect_phy(port->phylink);
+err_connect:
+       sparx5_port_enable(port, false);
+
        return err;
 }
 
index e05429c..dc2c375 100644 (file)
@@ -90,13 +90,10 @@ static int sparx5_tc_setup_qdisc_ets(struct net_device *ndev,
                        }
                }
 
-               sparx5_tc_ets_add(port, params);
-               break;
+               return sparx5_tc_ets_add(port, params);
        case TC_ETS_DESTROY:
 
-               sparx5_tc_ets_del(port);
-
-               break;
+               return sparx5_tc_ets_del(port);
        case TC_ETS_GRAFT:
                return -EOPNOTSUPP;
 
index 405786c..cb08d7b 100644 (file)
@@ -341,7 +341,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
                return ret;
 
        attrs.split = eth_port.is_split;
-       attrs.splittable = !attrs.split;
+       attrs.splittable = eth_port.port_lanes > 1 && !attrs.split;
        attrs.lanes = eth_port.port_lanes;
        attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
        attrs.phys.port_number = eth_port.label_port;
index 22a5d24..991059d 100644 (file)
@@ -1432,6 +1432,9 @@ nfp_port_get_module_info(struct net_device *netdev,
        u8 data;
 
        port = nfp_port_from_netdev(netdev);
+       if (!port)
+               return -EOPNOTSUPP;
+
        /* update port state to get latest interface */
        set_bit(NFP_PORT_CHANGED, &port->flags);
        eth_port = nfp_port_get_eth_port(port);
@@ -1477,15 +1480,15 @@ nfp_port_get_module_info(struct net_device *netdev,
 
                if (data < 0x3) {
                        modinfo->type = ETH_MODULE_SFF_8436;
-                       modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+                       modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
                } else {
                        modinfo->type = ETH_MODULE_SFF_8636;
-                       modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+                       modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
                }
                break;
        case NFP_INTERFACE_QSFP28:
                modinfo->type = ETH_MODULE_SFF_8636;
-               modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+               modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
                break;
        default:
                netdev_err(netdev, "Unsupported module 0x%x detected\n",
index 3f2c301..28b7cec 100644 (file)
@@ -1143,6 +1143,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
                buffer_info->dma = 0;
                buffer_info->time_stamp = 0;
                tx_ring->next_to_use = ring_num;
+               dev_kfree_skb_any(skb);
                return;
        }
        buffer_info->mapped = true;
@@ -2459,6 +2460,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
        unregister_netdev(netdev);
 
        pch_gbe_phy_hw_reset(&adapter->hw);
+       pci_dev_put(adapter->ptp_pdev);
 
        free_netdev(netdev);
 }
@@ -2533,7 +2535,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
        /* setup the private structure */
        ret = pch_gbe_sw_init(adapter);
        if (ret)
-               goto err_free_netdev;
+               goto err_put_dev;
 
        /* Initialize PHY */
        ret = pch_gbe_init_phy(adapter);
@@ -2591,6 +2593,8 @@ static int pch_gbe_probe(struct pci_dev *pdev,
 
 err_free_adapter:
        pch_gbe_phy_hw_reset(&adapter->hw);
+err_put_dev:
+       pci_dev_put(adapter->ptp_pdev);
 err_free_netdev:
        free_netdev(netdev);
        return ret;
index 56f93b0..5456c2b 100644 (file)
@@ -687,8 +687,14 @@ int ionic_port_reset(struct ionic *ionic)
 
 static int __init ionic_init_module(void)
 {
+       int ret;
+
        ionic_debugfs_create();
-       return ionic_bus_register_driver();
+       ret = ionic_bus_register_driver();
+       if (ret)
+               ionic_debugfs_destroy();
+
+       return ret;
 }
 
 static void __exit ionic_cleanup_module(void)
index 76072f8..0d57ffc 100644 (file)
@@ -2471,6 +2471,7 @@ static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
                                             skb_shinfo(skb)->nr_frags);
        if (tx_cb->seg_count == -1) {
                netdev_err(ndev, "%s: invalid segment count!\n", __func__);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
index 88fa295..ddcc325 100644 (file)
@@ -218,6 +218,7 @@ netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,
                   skb->len, skb->data_len, channel->channel);
        if (!efx->n_channels || !efx->n_tx_channels || !channel) {
                netif_stop_queue(net_dev);
+               dev_kfree_skb_any(skb);
                goto err;
        }
 
index 8273e6a..6b43da7 100644 (file)
@@ -6548,6 +6548,9 @@ void stmmac_xdp_release(struct net_device *dev)
        struct stmmac_priv *priv = netdev_priv(dev);
        u32 chan;
 
+       /* Ensure tx function is not running */
+       netif_tx_disable(dev);
+
        /* Disable NAPI process */
        stmmac_disable_all_queues(priv);
 
index de94921..025e0c1 100644 (file)
@@ -98,6 +98,7 @@ struct ipvl_port {
        struct sk_buff_head     backlog;
        int                     count;
        struct ida              ida;
+       netdevice_tracker       dev_tracker;
 };
 
 struct ipvl_skb_cb {
index 54c94a6..796a38f 100644 (file)
@@ -83,6 +83,7 @@ static int ipvlan_port_create(struct net_device *dev)
        if (err)
                goto err;
 
+       netdev_hold(dev, &port->dev_tracker, GFP_KERNEL);
        return 0;
 
 err:
@@ -95,6 +96,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
        struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
        struct sk_buff *skb;
 
+       netdev_put(dev, &port->dev_tracker);
        if (port->mode == IPVLAN_MODE_L3S)
                ipvlan_l3s_unregister(port);
        netdev_rx_handler_unregister(dev);
index 85376d2..f41f67b 100644 (file)
@@ -3835,7 +3835,6 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
        if (macsec_is_offloaded(macsec)) {
                const struct macsec_ops *ops;
                struct macsec_context ctx;
-               int ret;
 
                ops = macsec_get_ops(netdev_priv(dev), &ctx);
                if (!ops) {
index 578897a..b8cc55b 100644 (file)
@@ -141,7 +141,7 @@ static struct macvlan_source_entry *macvlan_hash_lookup_source(
        u32 idx = macvlan_eth_hash(addr);
        struct hlist_head *h = &vlan->port->vlan_source_hash[idx];
 
-       hlist_for_each_entry_rcu(entry, h, hlist) {
+       hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
                if (ether_addr_equal_64bits(entry->addr, addr) &&
                    entry->vlan == vlan)
                        return entry;
@@ -1647,7 +1647,7 @@ static int macvlan_fill_info_macaddr(struct sk_buff *skb,
        struct hlist_head *h = &vlan->port->vlan_source_hash[i];
        struct macvlan_source_entry *entry;
 
-       hlist_for_each_entry_rcu(entry, h, hlist) {
+       hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
                if (entry->vlan != vlan)
                        continue;
                if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr))
index 0762c73..1d67a3c 100644 (file)
@@ -43,6 +43,7 @@
 enum {
        MCTP_I2C_FLOW_STATE_NEW = 0,
        MCTP_I2C_FLOW_STATE_ACTIVE,
+       MCTP_I2C_FLOW_STATE_INVALID,
 };
 
 /* List of all struct mctp_i2c_client
@@ -374,12 +375,18 @@ mctp_i2c_get_tx_flow_state(struct mctp_i2c_dev *midev, struct sk_buff *skb)
         */
        if (!key->valid) {
                state = MCTP_I2C_TX_FLOW_INVALID;
-
-       } else if (key->dev_flow_state == MCTP_I2C_FLOW_STATE_NEW) {
-               key->dev_flow_state = MCTP_I2C_FLOW_STATE_ACTIVE;
-               state = MCTP_I2C_TX_FLOW_NEW;
        } else {
-               state = MCTP_I2C_TX_FLOW_EXISTING;
+               switch (key->dev_flow_state) {
+               case MCTP_I2C_FLOW_STATE_NEW:
+                       key->dev_flow_state = MCTP_I2C_FLOW_STATE_ACTIVE;
+                       state = MCTP_I2C_TX_FLOW_NEW;
+                       break;
+               case MCTP_I2C_FLOW_STATE_ACTIVE:
+                       state = MCTP_I2C_TX_FLOW_EXISTING;
+                       break;
+               default:
+                       state = MCTP_I2C_TX_FLOW_INVALID;
+               }
        }
 
        spin_unlock_irqrestore(&key->lock, flags);
@@ -617,21 +624,31 @@ static void mctp_i2c_release_flow(struct mctp_dev *mdev,
 
 {
        struct mctp_i2c_dev *midev = netdev_priv(mdev->dev);
+       bool queue_release = false;
        unsigned long flags;
 
        spin_lock_irqsave(&midev->lock, flags);
-       midev->release_count++;
-       spin_unlock_irqrestore(&midev->lock, flags);
-
-       /* Ensure we have a release operation queued, through the fake
-        * marker skb
+       /* if we have seen the flow/key previously, we need to pair the
+        * original lock with a release
         */
-       spin_lock(&midev->tx_queue.lock);
-       if (!midev->unlock_marker.next)
-               __skb_queue_tail(&midev->tx_queue, &midev->unlock_marker);
-       spin_unlock(&midev->tx_queue.lock);
+       if (key->dev_flow_state == MCTP_I2C_FLOW_STATE_ACTIVE) {
+               midev->release_count++;
+               queue_release = true;
+       }
+       key->dev_flow_state = MCTP_I2C_FLOW_STATE_INVALID;
+       spin_unlock_irqrestore(&midev->lock, flags);
 
-       wake_up(&midev->tx_wq);
+       if (queue_release) {
+               /* Ensure we have a release operation queued, through the fake
+                * marker skb
+                */
+               spin_lock(&midev->tx_queue.lock);
+               if (!midev->unlock_marker.next)
+                       __skb_queue_tail(&midev->tx_queue,
+                                        &midev->unlock_marker);
+               spin_unlock(&midev->tx_queue.lock);
+               wake_up(&midev->tx_wq);
+       }
 }
 
 static const struct net_device_ops mctp_i2c_ops = {
index 0b1b6f6..0b9d379 100644 (file)
@@ -343,6 +343,8 @@ static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev)
 
        kfree_skb(mhi_netdev->skbagg_head);
 
+       free_netdev(ndev);
+
        dev_set_drvdata(&mhi_dev->dev, NULL);
 }
 
index a7880c7..68e56e4 100644 (file)
@@ -1683,6 +1683,7 @@ void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev)
                                  ARRAY_SIZE(nsim_devlink_params));
        devl_resources_unregister(devlink);
        kfree(nsim_dev->vfconfigs);
+       kfree(nsim_dev->fa_cookie);
        devl_unlock(devlink);
        devlink_free(devlink);
        dev_set_drvdata(&nsim_bus_dev->dev, NULL);
index 349b7b1..d499659 100644 (file)
@@ -870,8 +870,10 @@ static int at803x_probe(struct phy_device *phydev)
                        .wolopts = 0,
                };
 
-               if (ccr < 0)
+               if (ccr < 0) {
+                       ret = ccr;
                        goto err;
+               }
                mode_cfg = ccr & AT803X_MODE_CFG_MASK;
 
                switch (mode_cfg) {
index 417527f..7446d5c 100644 (file)
@@ -682,6 +682,13 @@ static int dp83867_of_init(struct phy_device *phydev)
         */
        dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN / 2;
 
+       /* For non-OF device, the RX and TX FIFO depths are taken from
+        * default value. So, we init RX & TX FIFO depths here
+        * so that it is configured correctly later in dp83867_config_init();
+        */
+       dp83867->tx_fifo_depth = DP83867_PHYCR_FIFO_DEPTH_4_B_NIB;
+       dp83867->rx_fifo_depth = DP83867_PHYCR_FIFO_DEPTH_4_B_NIB;
+
        return 0;
 }
 #endif /* CONFIG_OF_MDIO */
index 2810f4f..0d706ee 100644 (file)
@@ -2015,14 +2015,16 @@ static int m88e1510_loopback(struct phy_device *phydev, bool enable)
                if (err < 0)
                        return err;
 
-               /* FIXME: Based on trial and error test, it seem 1G need to have
-                * delay between soft reset and loopback enablement.
-                */
-               if (phydev->speed == SPEED_1000)
-                       msleep(1000);
+               err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
+                                BMCR_LOOPBACK);
 
-               return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
-                                 BMCR_LOOPBACK);
+               if (!err) {
+                       /* It takes some time for PHY device to switch
+                        * into/out-of loopback mode.
+                        */
+                       msleep(1000);
+               }
+               return err;
        } else {
                err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, 0);
                if (err < 0)
index 83fcaeb..a52ee2b 100644 (file)
@@ -1391,12 +1391,21 @@ static int __init tbnet_init(void)
        tb_property_add_immediate(tbnet_dir, "prtcstns", flags);
 
        ret = tb_register_property_dir("network", tbnet_dir);
-       if (ret) {
-               tb_property_free_dir(tbnet_dir);
-               return ret;
-       }
+       if (ret)
+               goto err_free_dir;
+
+       ret = tb_register_service_driver(&tbnet_driver);
+       if (ret)
+               goto err_unregister;
 
-       return tb_register_service_driver(&tbnet_driver);
+       return 0;
+
+err_unregister:
+       tb_unregister_property_dir("network", tbnet_dir);
+err_free_dir:
+       tb_property_free_dir(tbnet_dir);
+
+       return ret;
 }
 module_init(tbnet_init);
 
index 8d5cbda..0897fdb 100644 (file)
@@ -1915,6 +1915,7 @@ static const struct driver_info cdc_ncm_zlp_info = {
        .status = cdc_ncm_status,
        .rx_fixup = cdc_ncm_rx_fixup,
        .tx_fixup = cdc_ncm_tx_fixup,
+       .set_rx_mode = usbnet_cdc_update_filter,
 };
 
 /* Same as cdc_ncm_info, but with FLAG_WWAN */
index 26c34a7..554d4e2 100644 (file)
@@ -1357,6 +1357,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
        {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x103a, 0)}, /* Telit LE910C4-WWX */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */
@@ -1422,6 +1423,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)},    /* Foxconn T77W968 LTE */
        {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)},    /* Foxconn T77W968 LTE with eSIM support*/
        {QMI_FIXED_INTF(0x2692, 0x9025, 4)},    /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
+       {QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
index bfb58c9..32d2c60 100644 (file)
@@ -66,6 +66,7 @@ struct smsc95xx_priv {
        spinlock_t mac_cr_lock;
        u8 features;
        u8 suspend_flags;
+       bool is_internal_phy;
        struct irq_chip irqchip;
        struct irq_domain *irqdomain;
        struct fwnode_handle *irqfwnode;
@@ -252,6 +253,43 @@ done:
        mutex_unlock(&dev->phy_mutex);
 }
 
+static int smsc95xx_mdiobus_reset(struct mii_bus *bus)
+{
+       struct smsc95xx_priv *pdata;
+       struct usbnet *dev;
+       u32 val;
+       int ret;
+
+       dev = bus->priv;
+       pdata = dev->driver_priv;
+
+       if (pdata->is_internal_phy)
+               return 0;
+
+       mutex_lock(&dev->phy_mutex);
+
+       ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
+       if (ret < 0)
+               goto reset_out;
+
+       val |= PM_CTL_PHY_RST_;
+
+       ret = smsc95xx_write_reg(dev, PM_CTRL, val);
+       if (ret < 0)
+               goto reset_out;
+
+       /* Driver has no knowledge at this point about the external PHY.
+        * The 802.3 specifies that the reset process shall
+        * be completed within 0.5 s.
+        */
+       fsleep(500000);
+
+reset_out:
+       mutex_unlock(&dev->phy_mutex);
+
+       return 0;
+}
+
 static int smsc95xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
 {
        struct usbnet *dev = bus->priv;
@@ -1052,7 +1090,6 @@ static void smsc95xx_handle_link_change(struct net_device *net)
 static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
 {
        struct smsc95xx_priv *pdata;
-       bool is_internal_phy;
        char usb_path[64];
        int ret, phy_irq;
        u32 val;
@@ -1133,13 +1170,14 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
        if (ret < 0)
                goto free_mdio;
 
-       is_internal_phy = !(val & HW_CFG_PSEL_);
-       if (is_internal_phy)
+       pdata->is_internal_phy = !(val & HW_CFG_PSEL_);
+       if (pdata->is_internal_phy)
                pdata->mdiobus->phy_mask = ~(1u << SMSC95XX_INTERNAL_PHY_ID);
 
        pdata->mdiobus->priv = dev;
        pdata->mdiobus->read = smsc95xx_mdiobus_read;
        pdata->mdiobus->write = smsc95xx_mdiobus_write;
+       pdata->mdiobus->reset = smsc95xx_mdiobus_reset;
        pdata->mdiobus->name = "smsc95xx-mdiobus";
        pdata->mdiobus->parent = &dev->udev->dev;
 
@@ -1160,7 +1198,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
        }
 
        pdata->phydev->irq = phy_irq;
-       pdata->phydev->is_internal = is_internal_phy;
+       pdata->phydev->is_internal = pdata->is_internal_phy;
 
        /* detect device revision as different features may be available */
        ret = smsc95xx_read_reg(dev, ID_REV, &val);
index 7106932..86e5245 100644 (file)
@@ -3949,12 +3949,11 @@ static int virtnet_probe(struct virtio_device *vdev)
        return 0;
 
 free_unregister_netdev:
-       virtio_reset_device(vdev);
-
        unregister_netdev(dev);
 free_failover:
        net_failover_destroy(vi->failover);
 free_vqs:
+       virtio_reset_device(vdev);
        cancel_delayed_work_sync(&vi->refill);
        free_receive_page_frags(vi);
        virtnet_del_vqs(vi);
index d3d34d1..5bf5a93 100644 (file)
@@ -249,7 +249,7 @@ static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev)
        if (object->integer.value == 3)
                sleep_state = IPC_PCIE_D3L2;
 
-       kfree(object);
+       ACPI_FREE(object);
 
 default_ret:
        return sleep_state;
index 3458af3..7d0f5e4 100644 (file)
@@ -165,6 +165,8 @@ static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name)
                return -EFAULT;
        }
 
+       kfree(buffer.pointer);
+
 #endif
        return 0;
 }
index 24436c9..9760082 100644 (file)
@@ -112,8 +112,10 @@ static int nfcmrvl_i2c_nci_send(struct nfcmrvl_private *priv,
        struct nfcmrvl_i2c_drv_data *drv_data = priv->drv_data;
        int ret;
 
-       if (test_bit(NFCMRVL_PHY_ERROR, &priv->flags))
+       if (test_bit(NFCMRVL_PHY_ERROR, &priv->flags)) {
+               kfree_skb(skb);
                return -EREMOTEIO;
+       }
 
        ret = i2c_master_send(drv_data->i2c, skb->data, skb->len);
 
index 580cb6e..66b1986 100644 (file)
@@ -73,11 +73,15 @@ static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
        struct nxp_nci_info *info = nci_get_drvdata(ndev);
        int r;
 
-       if (!info->phy_ops->write)
+       if (!info->phy_ops->write) {
+               kfree_skb(skb);
                return -EOPNOTSUPP;
+       }
 
-       if (info->mode != NXP_NCI_MODE_NCI)
+       if (info->mode != NXP_NCI_MODE_NCI) {
+               kfree_skb(skb);
                return -EINVAL;
+       }
 
        r = info->phy_ops->write(info->phy_id, skb);
        if (r < 0) {
index 0270e05..aec3568 100644 (file)
@@ -105,6 +105,7 @@ static int s3fwrn5_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
        mutex_lock(&info->mutex);
 
        if (s3fwrn5_get_mode(info) != S3FWRN5_MODE_NCI) {
+               kfree_skb(skb);
                mutex_unlock(&info->mutex);
                return -EINVAL;
        }
index 7764b1a..ec87dd2 100644 (file)
@@ -312,6 +312,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
        int r = 0;
        struct device *dev = &ndev->nfc_dev->dev;
        struct nfc_evt_transaction *transaction;
+       u32 aid_len;
+       u8 params_len;
 
        pr_debug("connectivity gate event: %x\n", event);
 
@@ -325,26 +327,47 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
                 * Description  Tag     Length
                 * AID          81      5 to 16
                 * PARAMETERS   82      0 to 255
+                *
+                * The key differences are aid storage length is variably sized
+                * in the packet, but fixed in nfc_evt_transaction, and that
+                * the aid_len is u8 in the packet, but u32 in the structure,
+                * and the tags in the packet are not included in
+                * nfc_evt_transaction.
+                *
+                * size(b):  1          1       5-16 1             1           0-255
+                * offset:   0          1       2    aid_len + 2   aid_len + 3 aid_len + 4
+                * mem name: aid_tag(M) aid_len aid  params_tag(M) params_len  params
+                * example:  0x81       5-16    X    0x82          0-255       X
                 */
-               if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
-                   skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
+               if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
                        return -EPROTO;
 
-               transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL);
-               if (!transaction)
-                       return -ENOMEM;
+               aid_len = skb->data[1];
 
-               transaction->aid_len = skb->data[1];
-               memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
+               if (skb->len < aid_len + 4 ||
+                   aid_len > sizeof(transaction->aid))
+                       return -EPROTO;
 
-               /* Check next byte is PARAMETERS tag (82) */
-               if (skb->data[transaction->aid_len + 2] !=
-                   NFC_EVT_TRANSACTION_PARAMS_TAG)
+               params_len = skb->data[aid_len + 3];
+
+               /* Verify PARAMETERS tag is (82), and final check that there is
+                * enough space in the packet to read everything.
+                */
+               if (skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG ||
+                   skb->len < aid_len + 4 + params_len)
                        return -EPROTO;
 
-               transaction->params_len = skb->data[transaction->aid_len + 3];
-               memcpy(transaction->params, skb->data +
-                      transaction->aid_len + 4, transaction->params_len);
+               transaction = devm_kzalloc(dev, sizeof(*transaction) +
+                                          params_len, GFP_KERNEL);
+               if (!transaction)
+                       return -ENOMEM;
+
+               transaction->aid_len = aid_len;
+               transaction->params_len = params_len;
+
+               memcpy(transaction->aid, &skb->data[2], aid_len);
+               memcpy(transaction->params, &skb->data[aid_len + 4],
+                      params_len);
 
                r = nfc_se_transaction(ndev->nfc_dev, host, transaction);
                break;
index 02b5578..f433551 100644 (file)
@@ -3489,6 +3489,8 @@ static const struct pci_device_id nvme_id_table[] = {
                                NVME_QUIRK_IGNORE_DEV_SUBNQN, },
         { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */
                .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN },
+        { PCI_DEVICE(0x1344, 0x6001),   /* Micron Nitro NVMe */
+                .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1c5c, 0x1504),   /* SK Hynix PC400 */
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
        { PCI_DEVICE(0x1c5c, 0x174a),   /* SK Hynix P31 SSD */
@@ -3519,6 +3521,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
        { PCI_DEVICE(0x2646, 0x501E),   /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+       { PCI_DEVICE(0x1f40, 0x5236),   /* Netac Technologies Co. NV7000 NVMe SSD */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1e4B, 0x1001),   /* MAXIO MAP1001 */
                .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1e4B, 0x1002),   /* MAXIO MAP1002 */
index c4113b4..4dcddcf 100644 (file)
@@ -45,9 +45,11 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
        if (!dhchap_secret)
                return -ENOMEM;
        if (set_ctrl) {
+               kfree(host->dhchap_ctrl_secret);
                host->dhchap_ctrl_secret = strim(dhchap_secret);
                host->dhchap_ctrl_key_hash = key_hash;
        } else {
+               kfree(host->dhchap_secret);
                host->dhchap_secret = strim(dhchap_secret);
                host->dhchap_key_hash = key_hash;
        }
index f6732fd..377bf34 100644 (file)
@@ -203,7 +203,7 @@ static int lan9662_otp_probe(struct platform_device *pdev)
 }
 
 static const struct of_device_id lan9662_otp_match[] = {
-       { .compatible = "microchip,lan9662-otp", },
+       { .compatible = "microchip,lan9662-otpc", },
        { },
 };
 MODULE_DEVICE_TABLE(of, lan9662_otp_match);
index 8e72d1b..4fdbdcc 100644 (file)
@@ -135,7 +135,7 @@ static int u_boot_env_parse(struct u_boot_env *priv)
                break;
        case U_BOOT_FORMAT_REDUNDANT:
                crc32_offset = offsetof(struct u_boot_env_image_redundant, crc32);
-               crc32_data_offset = offsetof(struct u_boot_env_image_redundant, mark);
+               crc32_data_offset = offsetof(struct u_boot_env_image_redundant, data);
                data_offset = offsetof(struct u_boot_env_image_redundant, data);
                break;
        }
index 7c45927..5784dc2 100644 (file)
@@ -468,7 +468,7 @@ static size_t parport_pc_fifo_write_block_pio(struct parport *port,
        const unsigned char *bufp = buf;
        size_t left = length;
        unsigned long expire = jiffies + port->physport->cad->timeout;
-       const int fifo = FIFO(port);
+       const unsigned long fifo = FIFO(port);
        int poll_for = 8; /* 80 usecs */
        const struct parport_pc_private *priv = port->physport->private_data;
        const int fifo_depth = priv->fifo_depth;
index ba64284..f1ec893 100644 (file)
@@ -1613,7 +1613,7 @@ out:
 }
 
 static u32 hv_compose_msi_req_v1(
-       struct pci_create_interrupt *int_pkt, const struct cpumask *affinity,
+       struct pci_create_interrupt *int_pkt,
        u32 slot, u8 vector, u16 vector_count)
 {
        int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
@@ -1632,6 +1632,35 @@ static u32 hv_compose_msi_req_v1(
 }
 
 /*
+ * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and
+ * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be
+ * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V
+ * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is
+ * not irrelevant because Hyper-V chooses the physical CPU to handle the
+ * interrupts based on the vCPU specified in message sent to the vPCI VSP in
+ * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest,
+ * but assigning too many vPCI device interrupts to the same pCPU can cause a
+ * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V
+ * to spread out the pCPUs that it selects.
+ *
+ * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu()
+ * to always return the same dummy vCPU, because a second call to
+ * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a
+ * new pCPU for the interrupt. But for the multi-MSI case, the second call to
+ * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the
+ * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that
+ * the pCPUs are spread out. All interrupts for a multi-MSI device end up using
+ * the same pCPU, even though the vCPUs will be spread out by later calls
+ * to hv_irq_unmask(), but that is the best we can do now.
+ *
+ * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not*
+ * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an
+ * enhancement is planned for a future version. With that enhancement, the
+ * dummy vCPU selection won't matter, and interrupts for the same multi-MSI
+ * device will be spread across multiple pCPUs.
+ */
+
+/*
  * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
  * by subsequent retarget in hv_irq_unmask().
  */
@@ -1640,18 +1669,39 @@ static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
        return cpumask_first_and(affinity, cpu_online_mask);
 }
 
-static u32 hv_compose_msi_req_v2(
-       struct pci_create_interrupt2 *int_pkt, const struct cpumask *affinity,
-       u32 slot, u8 vector, u16 vector_count)
+/*
+ * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0.
+ */
+static int hv_compose_multi_msi_req_get_cpu(void)
 {
+       static DEFINE_SPINLOCK(multi_msi_cpu_lock);
+
+       /* -1 means starting with CPU 0 */
+       static int cpu_next = -1;
+
+       unsigned long flags;
        int cpu;
 
+       spin_lock_irqsave(&multi_msi_cpu_lock, flags);
+
+       cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
+                                    false);
+       cpu = cpu_next;
+
+       spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
+
+       return cpu;
+}
+
+static u32 hv_compose_msi_req_v2(
+       struct pci_create_interrupt2 *int_pkt, int cpu,
+       u32 slot, u8 vector, u16 vector_count)
+{
        int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
        int_pkt->wslot.slot = slot;
        int_pkt->int_desc.vector = vector;
        int_pkt->int_desc.vector_count = vector_count;
        int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
-       cpu = hv_compose_msi_req_get_cpu(affinity);
        int_pkt->int_desc.processor_array[0] =
                hv_cpu_number_to_vp_number(cpu);
        int_pkt->int_desc.processor_count = 1;
@@ -1660,18 +1710,15 @@ static u32 hv_compose_msi_req_v2(
 }
 
 static u32 hv_compose_msi_req_v3(
-       struct pci_create_interrupt3 *int_pkt, const struct cpumask *affinity,
+       struct pci_create_interrupt3 *int_pkt, int cpu,
        u32 slot, u32 vector, u16 vector_count)
 {
-       int cpu;
-
        int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
        int_pkt->wslot.slot = slot;
        int_pkt->int_desc.vector = vector;
        int_pkt->int_desc.reserved = 0;
        int_pkt->int_desc.vector_count = vector_count;
        int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
-       cpu = hv_compose_msi_req_get_cpu(affinity);
        int_pkt->int_desc.processor_array[0] =
                hv_cpu_number_to_vp_number(cpu);
        int_pkt->int_desc.processor_count = 1;
@@ -1715,12 +1762,18 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
                        struct pci_create_interrupt3 v3;
                } int_pkts;
        } __packed ctxt;
+       bool multi_msi;
        u64 trans_id;
        u32 size;
        int ret;
+       int cpu;
+
+       msi_desc  = irq_data_get_msi_desc(data);
+       multi_msi = !msi_desc->pci.msi_attrib.is_msix &&
+                   msi_desc->nvec_used > 1;
 
        /* Reuse the previous allocation */
-       if (data->chip_data) {
+       if (data->chip_data && multi_msi) {
                int_desc = data->chip_data;
                msg->address_hi = int_desc->address >> 32;
                msg->address_lo = int_desc->address & 0xffffffff;
@@ -1728,7 +1781,6 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
                return;
        }
 
-       msi_desc  = irq_data_get_msi_desc(data);
        pdev = msi_desc_to_pci_dev(msi_desc);
        dest = irq_data_get_effective_affinity_mask(data);
        pbus = pdev->bus;
@@ -1738,11 +1790,18 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
        if (!hpdev)
                goto return_null_message;
 
+       /* Free any previous message that might have already been composed. */
+       if (data->chip_data && !multi_msi) {
+               int_desc = data->chip_data;
+               data->chip_data = NULL;
+               hv_int_desc_free(hpdev, int_desc);
+       }
+
        int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
        if (!int_desc)
                goto drop_reference;
 
-       if (!msi_desc->pci.msi_attrib.is_msix && msi_desc->nvec_used > 1) {
+       if (multi_msi) {
                /*
                 * If this is not the first MSI of Multi MSI, we already have
                 * a mapping.  Can exit early.
@@ -1767,9 +1826,11 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
                 */
                vector = 32;
                vector_count = msi_desc->nvec_used;
+               cpu = hv_compose_multi_msi_req_get_cpu();
        } else {
                vector = hv_msi_get_int_vector(data);
                vector_count = 1;
+               cpu = hv_compose_msi_req_get_cpu(dest);
        }
 
        /*
@@ -1785,7 +1846,6 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
        switch (hbus->protocol_version) {
        case PCI_PROTOCOL_VERSION_1_1:
                size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
-                                       dest,
                                        hpdev->desc.win_slot.slot,
                                        (u8)vector,
                                        vector_count);
@@ -1794,7 +1854,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
        case PCI_PROTOCOL_VERSION_1_2:
        case PCI_PROTOCOL_VERSION_1_3:
                size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
-                                       dest,
+                                       cpu,
                                        hpdev->desc.win_slot.slot,
                                        (u8)vector,
                                        vector_count);
@@ -1802,7 +1862,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 
        case PCI_PROTOCOL_VERSION_1_4:
                size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
-                                       dest,
+                                       cpu,
                                        hpdev->desc.win_slot.slot,
                                        vector,
                                        vector_count);
index ef898ee..6e0a409 100644 (file)
@@ -220,6 +220,8 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
        for (state = 0; ; state++) {
                /* Retrieve the pinctrl-* property */
                propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
+               if (!propname)
+                       return -ENOMEM;
                prop = of_find_property(np, propname, &size);
                kfree(propname);
                if (!prop) {
index f7b54a5..65d3129 100644 (file)
@@ -24,6 +24,7 @@
 #define MTK_EINT_EDGE_SENSITIVE           0
 #define MTK_EINT_LEVEL_SENSITIVE          1
 #define MTK_EINT_DBNC_SET_DBNC_BITS      4
+#define MTK_EINT_DBNC_MAX                16
 #define MTK_EINT_DBNC_RST_BIT            (0x1 << 1)
 #define MTK_EINT_DBNC_SET_EN             (0x1 << 0)
 
@@ -48,6 +49,21 @@ static const struct mtk_eint_regs mtk_generic_eint_regs = {
        .dbnc_clr  = 0x700,
 };
 
+const unsigned int debounce_time_mt2701[] = {
+       500, 1000, 16000, 32000, 64000, 128000, 256000, 0
+};
+EXPORT_SYMBOL_GPL(debounce_time_mt2701);
+
+const unsigned int debounce_time_mt6765[] = {
+       125, 250, 500, 1000, 16000, 32000, 64000, 128000, 256000, 512000, 0
+};
+EXPORT_SYMBOL_GPL(debounce_time_mt6765);
+
+const unsigned int debounce_time_mt6795[] = {
+       500, 1000, 16000, 32000, 64000, 128000, 256000, 512000, 0
+};
+EXPORT_SYMBOL_GPL(debounce_time_mt6795);
+
 static void __iomem *mtk_eint_get_offset(struct mtk_eint *eint,
                                         unsigned int eint_num,
                                         unsigned int offset)
@@ -404,10 +420,11 @@ int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
        int virq, eint_offset;
        unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask,
                     dbnc;
-       static const unsigned int debounce_time[] = {500, 1000, 16000, 32000,
-                                                    64000, 128000, 256000};
        struct irq_data *d;
 
+       if (!eint->hw->db_time)
+               return -EOPNOTSUPP;
+
        virq = irq_find_mapping(eint->domain, eint_num);
        eint_offset = (eint_num % 4) * 8;
        d = irq_get_irq_data(virq);
@@ -418,9 +435,9 @@ int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
        if (!mtk_eint_can_en_debounce(eint, eint_num))
                return -EINVAL;
 
-       dbnc = ARRAY_SIZE(debounce_time);
-       for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
-               if (debounce <= debounce_time[i]) {
+       dbnc = eint->num_db_time;
+       for (i = 0; i < eint->num_db_time; i++) {
+               if (debounce <= eint->hw->db_time[i]) {
                        dbnc = i;
                        break;
                }
@@ -494,6 +511,13 @@ int mtk_eint_do_init(struct mtk_eint *eint)
        if (!eint->domain)
                return -ENOMEM;
 
+       if (eint->hw->db_time) {
+               for (i = 0; i < MTK_EINT_DBNC_MAX; i++)
+                       if (eint->hw->db_time[i] == 0)
+                               break;
+               eint->num_db_time = i;
+       }
+
        mtk_eint_hw_init(eint);
        for (i = 0; i < eint->hw->ap_num; i++) {
                int virq = irq_create_mapping(eint->domain, i);
index 48468d0..6139b16 100644 (file)
@@ -37,8 +37,13 @@ struct mtk_eint_hw {
        u8              ports;
        unsigned int    ap_num;
        unsigned int    db_cnt;
+       const unsigned int *db_time;
 };
 
+extern const unsigned int debounce_time_mt2701[];
+extern const unsigned int debounce_time_mt6765[];
+extern const unsigned int debounce_time_mt6795[];
+
 struct mtk_eint;
 
 struct mtk_eint_xt {
@@ -62,6 +67,7 @@ struct mtk_eint {
        /* Used to fit into various EINT device */
        const struct mtk_eint_hw *hw;
        const struct mtk_eint_regs *regs;
+       u16 num_db_time;
 
        /* Used to fit into various pinctrl device */
        void *pctl;
index d1583b4..b185538 100644 (file)
@@ -518,6 +518,7 @@ static const struct mtk_pinctrl_devdata mt2701_pinctrl_data = {
                .ports     = 6,
                .ap_num    = 169,
                .db_cnt    = 16,
+               .db_time   = debounce_time_mt2701,
        },
 };
 
index b921068..730a496 100644 (file)
@@ -567,6 +567,7 @@ static const struct mtk_pinctrl_devdata mt2712_pinctrl_data = {
                .ports     = 8,
                .ap_num    = 229,
                .db_cnt    = 40,
+               .db_time   = debounce_time_mt2701,
        },
 };
 
index c57b19f..f6ec41e 100644 (file)
@@ -1062,6 +1062,7 @@ static const struct mtk_eint_hw mt6765_eint_hw = {
        .ports     = 6,
        .ap_num    = 160,
        .db_cnt    = 13,
+       .db_time   = debounce_time_mt6765,
 };
 
 static const struct mtk_pin_soc mt6765_data = {
index 4ddf8bd..62d4f5a 100644 (file)
@@ -737,6 +737,7 @@ static const struct mtk_eint_hw mt6779_eint_hw = {
        .ports     = 6,
        .ap_num    = 195,
        .db_cnt    = 13,
+       .db_time   = debounce_time_mt2701,
 };
 
 static const struct mtk_pin_soc mt6779_data = {
index f901522..01e855c 100644 (file)
@@ -475,6 +475,7 @@ static const struct mtk_eint_hw mt6795_eint_hw = {
        .ports     = 7,
        .ap_num    = 224,
        .db_cnt    = 32,
+       .db_time   = debounce_time_mt6795,
 };
 
 static const unsigned int mt6795_pull_type[] = {
index 68eee88..3c1148d 100644 (file)
@@ -846,6 +846,7 @@ static const struct mtk_eint_hw mt7622_eint_hw = {
        .ports     = 7,
        .ap_num    = ARRAY_SIZE(mt7622_pins),
        .db_cnt    = 20,
+       .db_time   = debounce_time_mt6765,
 };
 
 static const struct mtk_pin_soc mt7622_data = {
index b8d9d31..6999770 100644 (file)
@@ -1369,6 +1369,7 @@ static const struct mtk_eint_hw mt7623_eint_hw = {
        .ports     = 6,
        .ap_num    = 169,
        .db_cnt    = 20,
+       .db_time   = debounce_time_mt2701,
 };
 
 static struct mtk_pin_soc mt7623_data = {
index b5f0fa4..2ce411c 100644 (file)
@@ -402,6 +402,7 @@ static const struct mtk_eint_hw mt7629_eint_hw = {
        .ports     = 7,
        .ap_num    = ARRAY_SIZE(mt7629_pins),
        .db_cnt    = 16,
+       .db_time   = debounce_time_mt2701,
 };
 
 static struct mtk_pin_soc mt7629_data = {
index f26869f..50cb736 100644 (file)
@@ -826,6 +826,7 @@ static const struct mtk_eint_hw mt7986a_eint_hw = {
        .ports = 7,
        .ap_num = ARRAY_SIZE(mt7986a_pins),
        .db_cnt = 16,
+       .db_time = debounce_time_mt6765,
 };
 
 static const struct mtk_eint_hw mt7986b_eint_hw = {
@@ -833,6 +834,7 @@ static const struct mtk_eint_hw mt7986b_eint_hw = {
        .ports = 7,
        .ap_num = ARRAY_SIZE(mt7986b_pins),
        .db_cnt = 16,
+       .db_time = debounce_time_mt6765,
 };
 
 static struct mtk_pin_soc mt7986a_data = {
index 91c530e..e8772dc 100644 (file)
@@ -286,6 +286,7 @@ static const struct mtk_pinctrl_devdata mt8127_pinctrl_data = {
                .ports     = 6,
                .ap_num    = 143,
                .db_cnt    = 16,
+               .db_time = debounce_time_mt2701,
        },
 };
 
index 5628467..cdb0252 100644 (file)
@@ -315,6 +315,7 @@ static const struct mtk_pinctrl_devdata mt8135_pinctrl_data = {
                .ports     = 6,
                .ap_num    = 192,
                .db_cnt    = 16,
+               .db_time = debounce_time_mt2701,
        },
 };
 
index 825167f..866da2c 100644 (file)
@@ -319,6 +319,7 @@ static const struct mtk_pinctrl_devdata mt8167_pinctrl_data = {
                .ports     = 6,
                .ap_num    = 169,
                .db_cnt    = 64,
+               .db_time = debounce_time_mt6795,
        },
 };
 
index 1d7d11a..37d8cec 100644 (file)
@@ -327,6 +327,7 @@ static const struct mtk_pinctrl_devdata mt8173_pinctrl_data = {
                .ports     = 6,
                .ap_num    = 224,
                .db_cnt    = 16,
+               .db_time   = debounce_time_mt2701,
        },
 };
 
index fecb1e6..ddc48b7 100644 (file)
@@ -545,6 +545,7 @@ static const struct mtk_eint_hw mt8183_eint_hw = {
        .ports     = 6,
        .ap_num    = 212,
        .db_cnt    = 13,
+       .db_time   = debounce_time_mt6765,
 };
 
 static const struct mtk_pin_soc mt8183_data = {
index a4dd519..a02f7c3 100644 (file)
@@ -1222,6 +1222,7 @@ static const struct mtk_eint_hw mt8186_eint_hw = {
        .ports     = 7,
        .ap_num    = 217,
        .db_cnt    = 32,
+       .db_time   = debounce_time_mt6765,
 };
 
 static const struct mtk_pin_soc mt8186_data = {
index d0e75c1..6a3d012 100644 (file)
@@ -1625,6 +1625,7 @@ static const struct mtk_eint_hw mt8188_eint_hw = {
        .ports     = 7,
        .ap_num    = 225,
        .db_cnt    = 32,
+       .db_time   = debounce_time_mt6765,
 };
 
 static const struct mtk_pin_soc mt8188_data = {
index 78c02b7..9695f4e 100644 (file)
@@ -1371,6 +1371,7 @@ static const struct mtk_eint_hw mt8192_eint_hw = {
        .ports     = 7,
        .ap_num    = 224,
        .db_cnt    = 32,
+       .db_time   = debounce_time_mt6765,
 };
 
 static const struct mtk_pin_reg_calc mt8192_reg_cals[PINCTRL_PIN_REG_MAX] = {
index 563693d..89557c7 100644 (file)
@@ -935,6 +935,7 @@ static const struct mtk_eint_hw mt8195_eint_hw = {
        .ports     = 7,
        .ap_num    = 225,
        .db_cnt    = 32,
+       .db_time   = debounce_time_mt6765,
 };
 
 static const struct mtk_pin_soc mt8195_data = {
index 57f37a2..e31b89b 100644 (file)
@@ -453,6 +453,7 @@ static const struct mtk_pinctrl_devdata mt8365_pinctrl_data = {
                .ports     = 5,
                .ap_num = 160,
                .db_cnt = 160,
+               .db_time   = debounce_time_mt6765,
        },
 };
 
index 939a193..e929339 100644 (file)
@@ -319,6 +319,7 @@ static const struct mtk_pinctrl_devdata mt8516_pinctrl_data = {
                .ports     = 6,
                .ap_num    = 169,
                .db_cnt    = 64,
+               .db_time   = debounce_time_mt6795,
        },
 };
 
index e1ae3be..b7921b5 100644 (file)
@@ -709,6 +709,9 @@ static int mtk_pinconf_bias_set_rsel(struct mtk_pinctrl *hw,
 {
        int err, rsel_val;
 
+       if (!pullup && arg == MTK_DISABLE)
+               return 0;
+
        if (hw->rsel_si_unit) {
                /* find pin rsel_index from pin_rsel array*/
                err = mtk_hw_pin_rsel_lookup(hw, desc, pullup, arg, &rsel_val);
index 53bdfc4..da974ff 100644 (file)
@@ -679,14 +679,54 @@ static void rockchip_get_recalced_mux(struct rockchip_pin_bank *bank, int pin,
 }
 
 static struct rockchip_mux_route_data px30_mux_route_data[] = {
+       RK_MUXROUTE_SAME(2, RK_PB4, 1, 0x184, BIT(16 + 7)), /* cif-d0m0 */
+       RK_MUXROUTE_SAME(3, RK_PA1, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d0m1 */
+       RK_MUXROUTE_SAME(2, RK_PB6, 1, 0x184, BIT(16 + 7)), /* cif-d1m0 */
+       RK_MUXROUTE_SAME(3, RK_PA2, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d1m1 */
        RK_MUXROUTE_SAME(2, RK_PA0, 1, 0x184, BIT(16 + 7)), /* cif-d2m0 */
        RK_MUXROUTE_SAME(3, RK_PA3, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d2m1 */
+       RK_MUXROUTE_SAME(2, RK_PA1, 1, 0x184, BIT(16 + 7)), /* cif-d3m0 */
+       RK_MUXROUTE_SAME(3, RK_PA5, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d3m1 */
+       RK_MUXROUTE_SAME(2, RK_PA2, 1, 0x184, BIT(16 + 7)), /* cif-d4m0 */
+       RK_MUXROUTE_SAME(3, RK_PA7, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d4m1 */
+       RK_MUXROUTE_SAME(2, RK_PA3, 1, 0x184, BIT(16 + 7)), /* cif-d5m0 */
+       RK_MUXROUTE_SAME(3, RK_PB0, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d5m1 */
+       RK_MUXROUTE_SAME(2, RK_PA4, 1, 0x184, BIT(16 + 7)), /* cif-d6m0 */
+       RK_MUXROUTE_SAME(3, RK_PB1, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d6m1 */
+       RK_MUXROUTE_SAME(2, RK_PA5, 1, 0x184, BIT(16 + 7)), /* cif-d7m0 */
+       RK_MUXROUTE_SAME(3, RK_PB4, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d7m1 */
+       RK_MUXROUTE_SAME(2, RK_PA6, 1, 0x184, BIT(16 + 7)), /* cif-d8m0 */
+       RK_MUXROUTE_SAME(3, RK_PB6, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d8m1 */
+       RK_MUXROUTE_SAME(2, RK_PA7, 1, 0x184, BIT(16 + 7)), /* cif-d9m0 */
+       RK_MUXROUTE_SAME(3, RK_PB7, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d9m1 */
+       RK_MUXROUTE_SAME(2, RK_PB7, 1, 0x184, BIT(16 + 7)), /* cif-d10m0 */
+       RK_MUXROUTE_SAME(3, RK_PC6, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d10m1 */
+       RK_MUXROUTE_SAME(2, RK_PC0, 1, 0x184, BIT(16 + 7)), /* cif-d11m0 */
+       RK_MUXROUTE_SAME(3, RK_PC7, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d11m1 */
+       RK_MUXROUTE_SAME(2, RK_PB0, 1, 0x184, BIT(16 + 7)), /* cif-vsyncm0 */
+       RK_MUXROUTE_SAME(3, RK_PD1, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-vsyncm1 */
+       RK_MUXROUTE_SAME(2, RK_PB1, 1, 0x184, BIT(16 + 7)), /* cif-hrefm0 */
+       RK_MUXROUTE_SAME(3, RK_PD2, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-hrefm1 */
+       RK_MUXROUTE_SAME(2, RK_PB2, 1, 0x184, BIT(16 + 7)), /* cif-clkinm0 */
+       RK_MUXROUTE_SAME(3, RK_PD3, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-clkinm1 */
+       RK_MUXROUTE_SAME(2, RK_PB3, 1, 0x184, BIT(16 + 7)), /* cif-clkoutm0 */
+       RK_MUXROUTE_SAME(3, RK_PD0, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-clkoutm1 */
        RK_MUXROUTE_SAME(3, RK_PC6, 2, 0x184, BIT(16 + 8)), /* pdm-m0 */
        RK_MUXROUTE_SAME(2, RK_PC6, 1, 0x184, BIT(16 + 8) | BIT(8)), /* pdm-m1 */
+       RK_MUXROUTE_SAME(3, RK_PD3, 2, 0x184, BIT(16 + 8)), /* pdm-sdi0m0 */
+       RK_MUXROUTE_SAME(2, RK_PC5, 2, 0x184, BIT(16 + 8) | BIT(8)), /* pdm-sdi0m1 */
        RK_MUXROUTE_SAME(1, RK_PD3, 2, 0x184, BIT(16 + 10)), /* uart2-rxm0 */
        RK_MUXROUTE_SAME(2, RK_PB6, 2, 0x184, BIT(16 + 10) | BIT(10)), /* uart2-rxm1 */
+       RK_MUXROUTE_SAME(1, RK_PD2, 2, 0x184, BIT(16 + 10)), /* uart2-txm0 */
+       RK_MUXROUTE_SAME(2, RK_PB4, 2, 0x184, BIT(16 + 10) | BIT(10)), /* uart2-txm1 */
        RK_MUXROUTE_SAME(0, RK_PC1, 2, 0x184, BIT(16 + 9)), /* uart3-rxm0 */
        RK_MUXROUTE_SAME(1, RK_PB7, 2, 0x184, BIT(16 + 9) | BIT(9)), /* uart3-rxm1 */
+       RK_MUXROUTE_SAME(0, RK_PC0, 2, 0x184, BIT(16 + 9)), /* uart3-txm0 */
+       RK_MUXROUTE_SAME(1, RK_PB6, 2, 0x184, BIT(16 + 9) | BIT(9)), /* uart3-txm1 */
+       RK_MUXROUTE_SAME(0, RK_PC2, 2, 0x184, BIT(16 + 9)), /* uart3-ctsm0 */
+       RK_MUXROUTE_SAME(1, RK_PB4, 2, 0x184, BIT(16 + 9) | BIT(9)), /* uart3-ctsm1 */
+       RK_MUXROUTE_SAME(0, RK_PC3, 2, 0x184, BIT(16 + 9)), /* uart3-rtsm0 */
+       RK_MUXROUTE_SAME(1, RK_PB5, 2, 0x184, BIT(16 + 9) | BIT(9)), /* uart3-rtsm1 */
 };
 
 static struct rockchip_mux_route_data rv1126_mux_route_data[] = {
index aa20753..e96c006 100644 (file)
@@ -1873,8 +1873,8 @@ static const struct msm_pingroup sc8280xp_groups[] = {
        [225] = PINGROUP(225, hs3_mi2s, phase_flag, _, _, _, _, egpio),
        [226] = PINGROUP(226, hs3_mi2s, phase_flag, _, _, _, _, egpio),
        [227] = PINGROUP(227, hs3_mi2s, phase_flag, _, _, _, _, egpio),
-       [228] = UFS_RESET(ufs_reset, 0xf1004),
-       [229] = UFS_RESET(ufs1_reset, 0xf3004),
+       [228] = UFS_RESET(ufs_reset, 0xf1000),
+       [229] = UFS_RESET(ufs1_reset, 0xf3000),
        [230] = SDC_QDSD_PINGROUP(sdc2_clk, 0xe8000, 14, 6),
        [231] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xe8000, 11, 3),
        [232] = SDC_QDSD_PINGROUP(sdc2_data, 0xe8000, 9, 0),
index 6748fe4..def8d7a 100644 (file)
@@ -1596,16 +1596,32 @@ static void ssh_ptl_timeout_reap(struct work_struct *work)
                ssh_ptl_tx_wakeup_packet(ptl);
 }
 
-static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, u8 seq)
+static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, const struct ssh_frame *frame)
 {
        int i;
 
        /*
+        * Ignore unsequenced packets. On some devices (notably Surface Pro 9),
+        * unsequenced events will always be sent with SEQ=0x00. Attempting to
+        * detect retransmission would thus just block all events.
+        *
+        * While sequence numbers would also allow detection of retransmitted
+        * packets in unsequenced communication, they have only ever been used
+        * to cover edge-cases in sequenced transmission. In particular, the
+        * only instance of packets being retransmitted (that we are aware of)
+        * is due to an ACK timeout. As this does not happen in unsequenced
+        * communication, skip the retransmission check for those packets
+        * entirely.
+        */
+       if (frame->type == SSH_FRAME_TYPE_DATA_NSQ)
+               return false;
+
+       /*
         * Check if SEQ has been seen recently (i.e. packet was
         * re-transmitted and we should ignore it).
         */
        for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++) {
-               if (likely(ptl->rx.blocked.seqs[i] != seq))
+               if (likely(ptl->rx.blocked.seqs[i] != frame->seq))
                        continue;
 
                ptl_dbg(ptl, "ptl: ignoring repeated data packet\n");
@@ -1613,7 +1629,7 @@ static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, u8 seq)
        }
 
        /* Update list of blocked sequence IDs. */
-       ptl->rx.blocked.seqs[ptl->rx.blocked.offset] = seq;
+       ptl->rx.blocked.seqs[ptl->rx.blocked.offset] = frame->seq;
        ptl->rx.blocked.offset = (ptl->rx.blocked.offset + 1)
                                  % ARRAY_SIZE(ptl->rx.blocked.seqs);
 
@@ -1624,7 +1640,7 @@ static void ssh_ptl_rx_dataframe(struct ssh_ptl *ptl,
                                 const struct ssh_frame *frame,
                                 const struct ssam_span *payload)
 {
-       if (ssh_ptl_rx_retransmit_check(ptl, frame->seq))
+       if (ssh_ptl_rx_retransmit_check(ptl, frame))
                return;
 
        ptl->ops.data_received(ptl, payload);
index 5859110..023f126 100644 (file)
@@ -234,6 +234,19 @@ static const struct software_node *ssam_node_group_sl3[] = {
        NULL,
 };
 
+/* Devices for Surface Laptop 5. */
+static const struct software_node *ssam_node_group_sl5[] = {
+       &ssam_node_root,
+       &ssam_node_bat_ac,
+       &ssam_node_bat_main,
+       &ssam_node_tmp_pprof,
+       &ssam_node_hid_main_keyboard,
+       &ssam_node_hid_main_touchpad,
+       &ssam_node_hid_main_iid5,
+       &ssam_node_hid_sam_ucm_ucsi,
+       NULL,
+};
+
 /* Devices for Surface Laptop Studio. */
 static const struct software_node *ssam_node_group_sls[] = {
        &ssam_node_root,
@@ -268,6 +281,7 @@ static const struct software_node *ssam_node_group_sp7[] = {
        NULL,
 };
 
+/* Devices for Surface Pro 8 */
 static const struct software_node *ssam_node_group_sp8[] = {
        &ssam_node_root,
        &ssam_node_hub_kip,
@@ -284,6 +298,23 @@ static const struct software_node *ssam_node_group_sp8[] = {
        NULL,
 };
 
+/* Devices for Surface Pro 9 */
+static const struct software_node *ssam_node_group_sp9[] = {
+       &ssam_node_root,
+       &ssam_node_hub_kip,
+       &ssam_node_bat_ac,
+       &ssam_node_bat_main,
+       &ssam_node_tmp_pprof,
+       /* TODO: Tablet mode switch (via POS subsystem) */
+       &ssam_node_hid_kip_keyboard,
+       &ssam_node_hid_kip_penstash,
+       &ssam_node_hid_kip_touchpad,
+       &ssam_node_hid_kip_fwupd,
+       &ssam_node_hid_sam_sensors,
+       &ssam_node_hid_sam_ucm_ucsi,
+       NULL,
+};
+
 
 /* -- SSAM platform/meta-hub driver. ---------------------------------------- */
 
@@ -303,6 +334,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
        /* Surface Pro 8 */
        { "MSHW0263", (unsigned long)ssam_node_group_sp8 },
 
+       /* Surface Pro 9 */
+       { "MSHW0343", (unsigned long)ssam_node_group_sp9 },
+
        /* Surface Book 2 */
        { "MSHW0107", (unsigned long)ssam_node_group_gen5 },
 
@@ -324,6 +358,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
        /* Surface Laptop 4 (13", Intel) */
        { "MSHW0250", (unsigned long)ssam_node_group_sl3 },
 
+       /* Surface Laptop 5 */
+       { "MSHW0350", (unsigned long)ssam_node_group_sl5 },
+
        /* Surface Laptop Go 1 */
        { "MSHW0118", (unsigned long)ssam_node_group_slg1 },
 
index 18224f9..ee67efd 100644 (file)
@@ -566,6 +566,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
        },
        {
                .callback = set_force_caps,
+               .ident = "Acer Aspire Switch V 10 SW5-017",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"),
+               },
+               .driver_data = (void *)ACER_CAP_KBD_DOCK,
+       },
+       {
+               .callback = set_force_caps,
                .ident = "Acer One 10 (S1003)",
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
index 96e790e..ef4ae97 100644 (file)
@@ -276,7 +276,6 @@ static const struct file_operations amd_pmc_stb_debugfs_fops_v2 = {
        .release = amd_pmc_stb_debugfs_release_v2,
 };
 
-#if defined(CONFIG_SUSPEND) || defined(CONFIG_DEBUG_FS)
 static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
 {
        if (dev->cpu_id == AMD_CPU_ID_PCO) {
@@ -351,7 +350,6 @@ static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table
        memcpy_fromio(table, pdev->smu_virt_addr, sizeof(struct smu_metrics));
        return 0;
 }
-#endif /* CONFIG_SUSPEND || CONFIG_DEBUG_FS */
 
 #ifdef CONFIG_SUSPEND
 static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
@@ -964,6 +962,7 @@ static const struct acpi_device_id amd_pmc_acpi_ids[] = {
        {"AMDI0006", 0},
        {"AMDI0007", 0},
        {"AMDI0008", 0},
+       {"AMDI0009", 0},
        {"AMD0004", 0},
        {"AMD0005", 0},
        { }
index 6e8e093..872efc1 100644 (file)
@@ -1738,6 +1738,8 @@ static void asus_wmi_set_xusb2pr(struct asus_wmi *asus)
        pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
                                cpu_to_le32(ports_available));
 
+       pci_dev_put(xhci_pdev);
+
        pr_info("set USB_INTEL_XUSB2PR old: 0x%04x, new: 0x%04x\n",
                        orig_ports_available, ports_available);
 }
index 1244903..0a99058 100644 (file)
@@ -90,6 +90,7 @@ enum hp_wmi_event_ids {
        HPWMI_PEAKSHIFT_PERIOD          = 0x0F,
        HPWMI_BATTERY_CHARGE_PERIOD     = 0x10,
        HPWMI_SANITIZATION_MODE         = 0x17,
+       HPWMI_SMART_EXPERIENCE_APP      = 0x21,
 };
 
 /*
@@ -859,6 +860,8 @@ static void hp_wmi_notify(u32 value, void *context)
                break;
        case HPWMI_SANITIZATION_MODE:
                break;
+       case HPWMI_SMART_EXPERIENCE_APP:
+               break;
        default:
                pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
                break;
index 33b3dfd..3ea8fc6 100644 (file)
@@ -136,6 +136,7 @@ struct ideapad_private {
                bool dytc                 : 1;
                bool fan_mode             : 1;
                bool fn_lock              : 1;
+               bool set_fn_lock_led      : 1;
                bool hw_rfkill_switch     : 1;
                bool kbd_bl               : 1;
                bool touchpad_ctrl_via_ec : 1;
@@ -154,7 +155,21 @@ MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
 
 static bool allow_v4_dytc;
 module_param(allow_v4_dytc, bool, 0444);
-MODULE_PARM_DESC(allow_v4_dytc, "Enable DYTC version 4 platform-profile support.");
+MODULE_PARM_DESC(allow_v4_dytc,
+       "Enable DYTC version 4 platform-profile support. "
+       "If you need this please report this to: platform-driver-x86@vger.kernel.org");
+
+static bool hw_rfkill_switch;
+module_param(hw_rfkill_switch, bool, 0444);
+MODULE_PARM_DESC(hw_rfkill_switch,
+       "Enable rfkill support for laptops with a hw on/off wifi switch/slider. "
+       "If you need this please report this to: platform-driver-x86@vger.kernel.org");
+
+static bool set_fn_lock_led;
+module_param(set_fn_lock_led, bool, 0444);
+MODULE_PARM_DESC(set_fn_lock_led,
+       "Enable driver based updates of the fn-lock LED on fn-lock changes. "
+       "If you need this please report this to: platform-driver-x86@vger.kernel.org");
 
 /*
  * ACPI Helpers
@@ -1501,6 +1516,9 @@ static void ideapad_wmi_notify(u32 value, void *context)
                ideapad_input_report(priv, value);
                break;
        case 208:
+               if (!priv->features.set_fn_lock_led)
+                       break;
+
                if (!eval_hals(priv->adev->handle, &result)) {
                        bool state = test_bit(HALS_FNLOCK_STATE_BIT, &result);
 
@@ -1514,6 +1532,18 @@ static void ideapad_wmi_notify(u32 value, void *context)
 }
 #endif
 
+/* On some models we need to call exec_sals(SALS_FNLOCK_ON/OFF) to set the LED */
+static const struct dmi_system_id set_fn_lock_led_list[] = {
+       {
+               /* https://bugzilla.kernel.org/show_bug.cgi?id=212671 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion R7000P2020H"),
+               }
+       },
+       {}
+};
+
 /*
  * Some ideapads have a hardware rfkill switch, but most do not have one.
  * Reading VPCCMD_R_RF always results in 0 on models without a hardware rfkill,
@@ -1556,7 +1586,10 @@ static void ideapad_check_features(struct ideapad_private *priv)
        acpi_handle handle = priv->adev->handle;
        unsigned long val;
 
-       priv->features.hw_rfkill_switch = dmi_check_system(hw_rfkill_list);
+       priv->features.set_fn_lock_led =
+               set_fn_lock_led || dmi_check_system(set_fn_lock_led_list);
+       priv->features.hw_rfkill_switch =
+               hw_rfkill_switch || dmi_check_system(hw_rfkill_list);
 
        /* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */
        if (acpi_dev_present("ELAN0634", NULL, -1))
index 15ca8af..ddfba38 100644 (file)
@@ -18,6 +18,8 @@
 #include <asm/cpu_device_id.h>
 #include <asm/intel-family.h>
 
+#include <xen/xen.h>
+
 static void intel_pmc_core_release(struct device *dev)
 {
        kfree(dev);
@@ -53,6 +55,13 @@ static int __init pmc_core_platform_init(void)
        if (acpi_dev_present("INT33A1", NULL, -1))
                return -ENODEV;
 
+       /*
+        * Skip forcefully attaching the device for VMs. Make an exception for
+        * Xen dom0, which does have full hardware access.
+        */
+       if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR) && !xen_initial_domain())
+               return -ENODEV;
+
        if (!x86_match_cpu(intel_pmc_core_platform_ids))
                return -ENODEV;
 
index 20e5c04..8476dfe 100644 (file)
@@ -4497,6 +4497,14 @@ static const struct dmi_system_id fwbug_list[] __initconst = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "21A0"),
                }
        },
+       {
+               .ident = "P14s Gen2 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "21A1"),
+               }
+       },
        {}
 };
 
index 863fabe..307ee6f 100644 (file)
@@ -725,7 +725,14 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
        /* Get thermal zone and ADC */
        di->tz = thermal_zone_get_zone_by_name("battery-thermal");
        if (IS_ERR(di->tz)) {
-               return dev_err_probe(dev, PTR_ERR(di->tz),
+               ret = PTR_ERR(di->tz);
+               /*
+                * This usually just means we are probing before the thermal
+                * zone, so just defer.
+                */
+               if (ret == -ENODEV)
+                       ret = -EPROBE_DEFER;
+               return dev_err_probe(dev, ret,
                                     "failed to get battery thermal zone\n");
        }
        di->bat_ctrl = devm_iio_channel_get(dev, "bat_ctrl");
index 218e8e6..00221e9 100644 (file)
@@ -352,7 +352,7 @@ static int ip5xxx_battery_get_property(struct power_supply *psy,
                ret = ip5xxx_battery_read_adc(ip5xxx, IP5XXX_BATIADC_DAT0,
                                              IP5XXX_BATIADC_DAT1, &raw);
 
-               val->intval = DIV_ROUND_CLOSEST(raw * 745985, 1000);
+               val->intval = DIV_ROUND_CLOSEST(raw * 149197, 200);
                return 0;
 
        case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
index 635f051..f20a6ac 100644 (file)
@@ -121,7 +121,7 @@ struct rk817_charger {
 #define ADC_TO_CHARGE_UAH(adc_value, res_div)  \
        (adc_value / 3600 * 172 / res_div)
 
-static u8 rk817_chg_cur_to_reg(u32 chg_cur_ma)
+static int rk817_chg_cur_to_reg(u32 chg_cur_ma)
 {
        if (chg_cur_ma >= 3500)
                return CHG_3_5A;
@@ -864,8 +864,8 @@ static int rk817_battery_init(struct rk817_charger *charger,
 {
        struct rk808 *rk808 = charger->rk808;
        u32 tmp, max_chg_vol_mv, max_chg_cur_ma;
-       u8 max_chg_vol_reg, chg_term_i_reg, max_chg_cur_reg;
-       int ret, chg_term_ma;
+       u8 max_chg_vol_reg, chg_term_i_reg;
+       int ret, chg_term_ma, max_chg_cur_reg;
        u8 bulk_reg[2];
 
        /* Get initial plug state */
@@ -1116,14 +1116,12 @@ static int rk817_charger_probe(struct platform_device *pdev)
 
        charger->bat_ps = devm_power_supply_register(&pdev->dev,
                                                     &rk817_bat_desc, &pscfg);
-
-       charger->chg_ps = devm_power_supply_register(&pdev->dev,
-                                                    &rk817_chg_desc, &pscfg);
-
-       if (IS_ERR(charger->chg_ps))
+       if (IS_ERR(charger->bat_ps))
                return dev_err_probe(dev, -EINVAL,
                                     "Battery failed to probe\n");
 
+       charger->chg_ps = devm_power_supply_register(&pdev->dev,
+                                                    &rk817_chg_desc, &pscfg);
        if (IS_ERR(charger->chg_ps))
                return dev_err_probe(dev, -EINVAL,
                                     "Charger failed to probe\n");
index bcccad8..e8c00a8 100644 (file)
@@ -5154,6 +5154,7 @@ static void regulator_dev_release(struct device *dev)
 {
        struct regulator_dev *rdev = dev_get_drvdata(dev);
 
+       debugfs_remove_recursive(rdev->debugfs);
        kfree(rdev->constraints);
        of_node_put(rdev->dev.of_node);
        kfree(rdev);
@@ -5644,11 +5645,15 @@ wash:
        mutex_lock(&regulator_list_mutex);
        regulator_ena_gpio_free(rdev);
        mutex_unlock(&regulator_list_mutex);
+       put_device(&rdev->dev);
+       rdev = NULL;
 clean:
        if (dangling_of_gpiod)
                gpiod_put(config->ena_gpiod);
+       if (rdev && rdev->dev.of_node)
+               of_node_put(rdev->dev.of_node);
+       kfree(rdev);
        kfree(config);
-       put_device(&rdev->dev);
 rinse:
        if (dangling_cfg_gpiod)
                gpiod_put(cfg->ena_gpiod);
@@ -5677,7 +5682,6 @@ void regulator_unregister(struct regulator_dev *rdev)
 
        mutex_lock(&regulator_list_mutex);
 
-       debugfs_remove_recursive(rdev->debugfs);
        WARN_ON(rdev->open_count);
        regulator_remove_coupling(rdev);
        unset_regulator_supplies(rdev);
index 6b96899..8488417 100644 (file)
@@ -243,6 +243,7 @@ static int rt5759_regulator_register(struct rt5759_priv *priv)
        if (priv->chip_type == CHIP_TYPE_RT5759A)
                reg_desc->uV_step = RT5759A_STEP_UV;
 
+       memset(&reg_cfg, 0, sizeof(reg_cfg));
        reg_cfg.dev = priv->dev;
        reg_cfg.of_node = np;
        reg_cfg.init_data = of_get_regulator_init_data(priv->dev, np, reg_desc);
index 75a941f..1b2eee9 100644 (file)
@@ -457,6 +457,8 @@ static int slg51000_i2c_probe(struct i2c_client *client)
                chip->cs_gpiod = cs_gpiod;
        }
 
+       usleep_range(10000, 11000);
+
        i2c_set_clientdata(client, chip);
        chip->chip_irq = client->irq;
        chip->dev = dev;
index 430265c..f385675 100644 (file)
@@ -67,6 +67,7 @@ struct twlreg_info {
 #define TWL6030_CFG_STATE_SLEEP        0x03
 #define TWL6030_CFG_STATE_GRP_SHIFT    5
 #define TWL6030_CFG_STATE_APP_SHIFT    2
+#define TWL6030_CFG_STATE_MASK         0x03
 #define TWL6030_CFG_STATE_APP_MASK     (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
 #define TWL6030_CFG_STATE_APP(v)       (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
                                                TWL6030_CFG_STATE_APP_SHIFT)
@@ -128,13 +129,14 @@ static int twl6030reg_is_enabled(struct regulator_dev *rdev)
                if (grp < 0)
                        return grp;
                grp &= P1_GRP_6030;
+               val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+               val = TWL6030_CFG_STATE_APP(val);
        } else {
+               val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+               val &= TWL6030_CFG_STATE_MASK;
                grp = 1;
        }
 
-       val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
-       val = TWL6030_CFG_STATE_APP(val);
-
        return grp && (val == TWL6030_CFG_STATE_ON);
 }
 
@@ -187,7 +189,12 @@ static int twl6030reg_get_status(struct regulator_dev *rdev)
 
        val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
 
-       switch (TWL6030_CFG_STATE_APP(val)) {
+       if (info->features & TWL6032_SUBCLASS)
+               val &= TWL6030_CFG_STATE_MASK;
+       else
+               val = TWL6030_CFG_STATE_APP(val);
+
+       switch (val) {
        case TWL6030_CFG_STATE_ON:
                return REGULATOR_STATUS_NORMAL;
 
@@ -530,6 +537,7 @@ static const struct twlreg_info TWL6030_INFO_##label = { \
 #define TWL6032_ADJUSTABLE_LDO(label, offset) \
 static const struct twlreg_info TWL6032_INFO_##label = { \
        .base = offset, \
+       .features = TWL6032_SUBCLASS, \
        .desc = { \
                .name = #label, \
                .id = TWL6032_REG_##label, \
@@ -562,6 +570,7 @@ static const struct twlreg_info TWLFIXED_INFO_##label = { \
 #define TWL6032_ADJUSTABLE_SMPS(label, offset) \
 static const struct twlreg_info TWLSMPS_INFO_##label = { \
        .base = offset, \
+       .features = TWL6032_SUBCLASS, \
        .desc = { \
                .name = #label, \
                .id = TWL6032_REG_##label, \
index cb83f81..df17f0f 100644 (file)
@@ -1954,7 +1954,7 @@ dasd_copy_pair_show(struct device *dev,
                        break;
                }
        }
-       if (!copy->entry[i].primary)
+       if (i == DASD_CP_ENTRIES)
                goto out;
 
        /* print all secondary */
index 662730f..5d0b999 100644 (file)
@@ -4722,7 +4722,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
        struct dasd_device *basedev;
        struct req_iterator iter;
        struct dasd_ccw_req *cqr;
-       unsigned int first_offs;
        unsigned int trkcount;
        unsigned long *idaws;
        unsigned int size;
@@ -4756,7 +4755,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
        last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
                DASD_RAW_SECTORS_PER_TRACK;
        trkcount = last_trk - first_trk + 1;
-       first_offs = 0;
 
        if (rq_data_dir(req) == READ)
                cmd = DASD_ECKD_CCW_READ_TRACK;
@@ -4800,13 +4798,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
 
        if (use_prefix) {
                prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
-                          startdev, 1, first_offs + 1, trkcount, 0, 0);
+                          startdev, 1, 0, trkcount, 0, 0);
        } else {
                define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
                ccw[-1].flags |= CCW_FLAG_CC;
 
                data += sizeof(struct DE_eckd_data);
-               locate_record_ext(ccw++, data, first_trk, first_offs + 1,
+               locate_record_ext(ccw++, data, first_trk, 0,
                                  trkcount, cmd, basedev, 0, 0);
        }
 
@@ -5500,7 +5498,7 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
  * Dump the range of CCWs into 'page' buffer
  * and return number of printed chars.
  */
-static int
+static void
 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
 {
        int len, count;
@@ -5518,16 +5516,21 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
                else
                        datap = (char *) ((addr_t) from->cda);
 
-               /* dump data (max 32 bytes) */
-               for (count = 0; count < from->count && count < 32; count++) {
-                       if (count % 8 == 0) len += sprintf(page + len, " ");
-                       if (count % 4 == 0) len += sprintf(page + len, " ");
+               /* dump data (max 128 bytes) */
+               for (count = 0; count < from->count && count < 128; count++) {
+                       if (count % 32 == 0)
+                               len += sprintf(page + len, "\n");
+                       if (count % 8 == 0)
+                               len += sprintf(page + len, " ");
+                       if (count % 4 == 0)
+                               len += sprintf(page + len, " ");
                        len += sprintf(page + len, "%02x", datap[count]);
                }
                len += sprintf(page + len, "\n");
                from++;
        }
-       return len;
+       if (len > 0)
+               printk(KERN_ERR "%s", page);
 }
 
 static void
@@ -5619,37 +5622,33 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
        if (req) {
                /* req == NULL for unsolicited interrupts */
                /* dump the Channel Program (max 140 Bytes per line) */
-               /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
+               /* Count CCW and print first CCWs (maximum 7) */
                first = req->cpaddr;
                for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
                to = min(first + 6, last);
-               len = sprintf(page, PRINTK_HEADER
-                             " Related CP in req: %p\n", req);
-               dasd_eckd_dump_ccw_range(first, to, page + len);
-               printk(KERN_ERR "%s", page);
+               printk(KERN_ERR PRINTK_HEADER " Related CP in req: %p\n", req);
+               dasd_eckd_dump_ccw_range(first, to, page);
 
                /* print failing CCW area (maximum 4) */
                /* scsw->cda is either valid or zero  */
-               len = 0;
                from = ++to;
                fail = (struct ccw1 *)(addr_t)
                                irb->scsw.cmd.cpa; /* failing CCW */
                if (from <  fail - 2) {
                        from = fail - 2;     /* there is a gap - print header */
-                       len += sprintf(page, PRINTK_HEADER "......\n");
+                       printk(KERN_ERR PRINTK_HEADER "......\n");
                }
                to = min(fail + 1, last);
-               len += dasd_eckd_dump_ccw_range(from, to, page + len);
+               dasd_eckd_dump_ccw_range(from, to, page + len);
 
                /* print last CCWs (maximum 2) */
+               len = 0;
                from = max(from, ++to);
                if (from < last - 1) {
                        from = last - 1;     /* there is a gap - print header */
-                       len += sprintf(page + len, PRINTK_HEADER "......\n");
+                       printk(KERN_ERR PRINTK_HEADER "......\n");
                }
-               len += dasd_eckd_dump_ccw_range(from, last, page + len);
-               if (len > 0)
-                       printk(KERN_ERR "%s", page);
+               dasd_eckd_dump_ccw_range(from, last, page + len);
        }
        free_page((unsigned long) page);
 }
index d0ddf2c..9327dcd 100644 (file)
@@ -401,7 +401,7 @@ dasd_ioctl_copy_pair_swap(struct block_device *bdev, void __user *argp)
                return -EFAULT;
        }
        if (memchr_inv(data.reserved, 0, sizeof(data.reserved))) {
-               pr_warn("%s: Ivalid swap data specified.\n",
+               pr_warn("%s: Invalid swap data specified\n",
                        dev_name(&device->cdev->dev));
                dasd_put_device(device);
                return DASD_COPYPAIRSWAP_INVALID;
index 93b80da..b392b9f 100644 (file)
@@ -636,6 +636,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
        dev_info->gd->minors = DCSSBLK_MINORS_PER_DISK;
        dev_info->gd->fops = &dcssblk_devops;
        dev_info->gd->private_data = dev_info;
+       dev_info->gd->flags |= GENHD_FL_NO_PART;
        blk_queue_logical_block_size(dev_info->gd->queue, 4096);
        blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue);
 
index 59ac98f..b02c631 100644 (file)
@@ -233,8 +233,11 @@ static void __init ap_init_qci_info(void)
        if (!ap_qci_info)
                return;
        ap_qci_info_old = kzalloc(sizeof(*ap_qci_info_old), GFP_KERNEL);
-       if (!ap_qci_info_old)
+       if (!ap_qci_info_old) {
+               kfree(ap_qci_info);
+               ap_qci_info = NULL;
                return;
+       }
        if (ap_fetch_qci_info(ap_qci_info) != 0) {
                kfree(ap_qci_info);
                kfree(ap_qci_info_old);
index 19223b0..ab3ea52 100644 (file)
@@ -884,7 +884,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
        const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
        struct zfcp_adapter *adapter = req->adapter;
        struct zfcp_qdio *qdio = adapter->qdio;
-       int req_id = req->req_id;
+       unsigned long req_id = req->req_id;
 
        zfcp_reqlist_add(adapter->req_list, req);
 
index f77ee40..3306de7 100644 (file)
@@ -3265,7 +3265,8 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
        }
 
        if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) &&
-           (scmd->cmnd[0] != ATA_16)) {
+           (scmd->cmnd[0] != ATA_16) &&
+           mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) {
                ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__,
                    scmd->result);
                scsi_print_command(scmd);
index 6298536..bebda91 100644 (file)
@@ -7323,8 +7323,12 @@ static int sdebug_add_host_helper(int per_host_idx)
        dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
 
        error = device_register(&sdbg_host->dev);
-       if (error)
+       if (error) {
+               spin_lock(&sdebug_host_list_lock);
+               list_del(&sdbg_host->host_list);
+               spin_unlock(&sdebug_host_list_lock);
                goto clean;
+       }
 
        ++sdebug_num_hosts;
        return 0;
index cd3db96..f473c00 100644 (file)
@@ -231,7 +231,7 @@ iscsi_create_endpoint(int dd_size)
        dev_set_name(&ep->dev, "ep-%d", id);
        err = device_register(&ep->dev);
         if (err)
-               goto free_id;
+               goto put_dev;
 
        err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
        if (err)
@@ -245,10 +245,12 @@ unregister_dev:
        device_unregister(&ep->dev);
        return NULL;
 
-free_id:
+put_dev:
        mutex_lock(&iscsi_ep_idr_mutex);
        idr_remove(&iscsi_ep_idr, id);
        mutex_unlock(&iscsi_ep_idr_mutex);
+       put_device(&ep->dev);
+       return NULL;
 free_ep:
        kfree(ep);
        return NULL;
@@ -766,7 +768,7 @@ iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport,
 
        err = device_register(&iface->dev);
        if (err)
-               goto free_iface;
+               goto put_dev;
 
        err = sysfs_create_group(&iface->dev.kobj, &iscsi_iface_group);
        if (err)
@@ -780,9 +782,8 @@ unreg_iface:
        device_unregister(&iface->dev);
        return NULL;
 
-free_iface:
-       put_device(iface->dev.parent);
-       kfree(iface);
+put_dev:
+       put_device(&iface->dev);
        return NULL;
 }
 EXPORT_SYMBOL_GPL(iscsi_create_iface);
@@ -1251,15 +1252,15 @@ iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index,
 
        err = device_register(&fnode_sess->dev);
        if (err)
-               goto free_fnode_sess;
+               goto put_dev;
 
        if (dd_size)
                fnode_sess->dd_data = &fnode_sess[1];
 
        return fnode_sess;
 
-free_fnode_sess:
-       kfree(fnode_sess);
+put_dev:
+       put_device(&fnode_sess->dev);
        return NULL;
 }
 EXPORT_SYMBOL_GPL(iscsi_create_flashnode_sess);
@@ -1299,15 +1300,15 @@ iscsi_create_flashnode_conn(struct Scsi_Host *shost,
 
        err = device_register(&fnode_conn->dev);
        if (err)
-               goto free_fnode_conn;
+               goto put_dev;
 
        if (dd_size)
                fnode_conn->dd_data = &fnode_conn[1];
 
        return fnode_conn;
 
-free_fnode_conn:
-       kfree(fnode_conn);
+put_dev:
+       put_device(&fnode_conn->dev);
        return NULL;
 }
 EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn);
@@ -4815,7 +4816,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
        dev_set_name(&priv->dev, "%s", tt->name);
        err = device_register(&priv->dev);
        if (err)
-               goto free_priv;
+               goto put_dev;
 
        err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group);
        if (err)
@@ -4850,8 +4851,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
 unregister_dev:
        device_unregister(&priv->dev);
        return NULL;
-free_priv:
-       kfree(priv);
+put_dev:
+       put_device(&priv->dev);
        return NULL;
 }
 EXPORT_SYMBOL_GPL(iscsi_register_transport);
index bc46721..3c5b7e4 100644 (file)
@@ -303,16 +303,21 @@ enum storvsc_request_type {
 };
 
 /*
- * SRB status codes and masks; a subset of the codes used here.
+ * SRB status codes and masks. In the 8-bit field, the two high order bits
+ * are flags, while the remaining 6 bits are an integer status code.  The
+ * definitions here include only the subset of the integer status codes that
+ * are tested for in this driver.
  */
-
 #define SRB_STATUS_AUTOSENSE_VALID     0x80
 #define SRB_STATUS_QUEUE_FROZEN                0x40
-#define SRB_STATUS_INVALID_LUN 0x20
-#define SRB_STATUS_SUCCESS     0x01
-#define SRB_STATUS_ABORTED     0x02
-#define SRB_STATUS_ERROR       0x04
-#define SRB_STATUS_DATA_OVERRUN        0x12
+
+/* SRB status integer codes */
+#define SRB_STATUS_SUCCESS             0x01
+#define SRB_STATUS_ABORTED             0x02
+#define SRB_STATUS_ERROR               0x04
+#define SRB_STATUS_INVALID_REQUEST     0x06
+#define SRB_STATUS_DATA_OVERRUN                0x12
+#define SRB_STATUS_INVALID_LUN         0x20
 
 #define SRB_STATUS(status) \
        (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN))
@@ -969,38 +974,25 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
        void (*process_err_fn)(struct work_struct *work);
        struct hv_host_device *host_dev = shost_priv(host);
 
-       /*
-        * In some situations, Hyper-V sets multiple bits in the
-        * srb_status, such as ABORTED and ERROR. So process them
-        * individually, with the most specific bits first.
-        */
-
-       if (vm_srb->srb_status & SRB_STATUS_INVALID_LUN) {
-               set_host_byte(scmnd, DID_NO_CONNECT);
-               process_err_fn = storvsc_remove_lun;
-               goto do_work;
-       }
+       switch (SRB_STATUS(vm_srb->srb_status)) {
+       case SRB_STATUS_ERROR:
+       case SRB_STATUS_ABORTED:
+       case SRB_STATUS_INVALID_REQUEST:
+               if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) {
+                       /* Check for capacity change */
+                       if ((asc == 0x2a) && (ascq == 0x9)) {
+                               process_err_fn = storvsc_device_scan;
+                               /* Retry the I/O that triggered this. */
+                               set_host_byte(scmnd, DID_REQUEUE);
+                               goto do_work;
+                       }
 
-       if (vm_srb->srb_status & SRB_STATUS_ABORTED) {
-               if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
-                   /* Capacity data has changed */
-                   (asc == 0x2a) && (ascq == 0x9)) {
-                       process_err_fn = storvsc_device_scan;
                        /*
-                        * Retry the I/O that triggered this.
+                        * Otherwise, let upper layer deal with the
+                        * error when sense message is present
                         */
-                       set_host_byte(scmnd, DID_REQUEUE);
-                       goto do_work;
-               }
-       }
-
-       if (vm_srb->srb_status & SRB_STATUS_ERROR) {
-               /*
-                * Let upper layer deal with error when
-                * sense message is present.
-                */
-               if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)
                        return;
+               }
 
                /*
                 * If there is an error; offline the device since all
@@ -1023,6 +1015,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
                default:
                        set_host_byte(scmnd, DID_ERROR);
                }
+               return;
+
+       case SRB_STATUS_INVALID_LUN:
+               set_host_byte(scmnd, DID_NO_CONNECT);
+               process_err_fn = storvsc_remove_lun;
+               goto do_work;
+
        }
        return;
 
index 7c4f32d..5614085 100644 (file)
@@ -839,6 +839,8 @@ static struct siox_device *siox_device_add(struct siox_master *smaster,
 
 err_device_register:
        /* don't care to make the buffer smaller again */
+       put_device(&sdevice->dev);
+       sdevice = NULL;
 
 err_buf_alloc:
        siox_master_unlock(smaster);
index 2ed821f..a0fdf9d 100644 (file)
@@ -23,7 +23,7 @@ config SLIM_QCOM_CTRL
 config SLIM_QCOM_NGD_CTRL
        tristate "Qualcomm SLIMbus Satellite Non-Generic Device Component"
        depends on HAS_IOMEM && DMA_ENGINE && NET
-       depends on QCOM_RPROC_COMMON || COMPILE_TEST
+       depends on QCOM_RPROC_COMMON || (COMPILE_TEST && !QCOM_RPROC_COMMON)
        depends on ARCH_QCOM || COMPILE_TEST
        select QCOM_QMI_HELPERS
        select QCOM_PDR_HELPERS
index 75f87b3..73a2aa3 100644 (file)
@@ -67,10 +67,10 @@ static const int slim_presence_rate_table[] = {
        384000,
        768000,
        0, /* Reserved */
-       110250,
-       220500,
-       441000,
-       882000,
+       11025,
+       22050,
+       44100,
+       88200,
        176400,
        352800,
        705600,
index cc57a38..28144c6 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/platform_device.h>
 #include <linux/arm-smccc.h>
 #include <linux/of.h>
+#include <linux/clk.h>
 
 #define REV_B1                         0x21
 
@@ -56,6 +57,7 @@ static u32 __init imx8mq_soc_revision(void)
        void __iomem *ocotp_base;
        u32 magic;
        u32 rev;
+       struct clk *clk;
 
        np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp");
        if (!np)
@@ -63,6 +65,13 @@ static u32 __init imx8mq_soc_revision(void)
 
        ocotp_base = of_iomap(np, 0);
        WARN_ON(!ocotp_base);
+       clk = of_clk_get_by_name(np, NULL);
+       if (!clk) {
+               WARN_ON(!clk);
+               return 0;
+       }
+
+       clk_prepare_enable(clk);
 
        /*
         * SOC revision on older imx8mq is not available in fuses so query
@@ -79,6 +88,8 @@ static u32 __init imx8mq_soc_revision(void)
        soc_uid <<= 32;
        soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
 
+       clk_disable_unprepare(clk);
+       clk_put(clk);
        iounmap(ocotp_base);
        of_node_put(np);
 
index 1322b8c..ababb91 100644 (file)
@@ -128,12 +128,15 @@ static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
 
        dw_spi_dma_sg_burst_init(dws);
 
+       pci_dev_put(dma_dev);
+
        return 0;
 
 free_rxchan:
        dma_release_channel(dws->rxchan);
        dws->rxchan = NULL;
 err_exit:
+       pci_dev_put(dma_dev);
        return -EBUSY;
 }
 
index 30d82cc..d209930 100644 (file)
@@ -444,8 +444,7 @@ static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
        unsigned int pre, post;
        unsigned int fin = spi_imx->spi_clk;
 
-       if (unlikely(fspi > fin))
-               return 0;
+       fspi = min(fspi, fin);
 
        post = fls(fin) - fls(fspi);
        if (fin > fspi << post)
@@ -1608,6 +1607,13 @@ static int spi_imx_transfer_one(struct spi_controller *controller,
                return spi_imx_pio_transfer_slave(spi, transfer);
 
        /*
+        * If we decided in spi_imx_can_dma() that we want to do a DMA
+        * transfer, the SPI transfer has already been mapped, so we
+        * have to do the DMA transfer here.
+        */
+       if (spi_imx->usedma)
+               return spi_imx_dma_transfer(spi_imx, transfer);
+       /*
         * Calculate the estimated time in us the transfer runs. Find
         * the number of Hz per byte per polling limit.
         */
@@ -1618,9 +1624,6 @@ static int spi_imx_transfer_one(struct spi_controller *controller,
        if (transfer->len < byte_limit)
                return spi_imx_poll_transfer(spi, transfer);
 
-       if (spi_imx->usedma)
-               return spi_imx_dma_transfer(spi_imx, transfer);
-
        return spi_imx_pio_transfer(spi, transfer);
 }
 
index a33c9a3..d6aff90 100644 (file)
@@ -1273,8 +1273,11 @@ static int mtk_spi_remove(struct platform_device *pdev)
 {
        struct spi_master *master = platform_get_drvdata(pdev);
        struct mtk_spi *mdata = spi_master_get_devdata(master);
+       int ret;
 
-       pm_runtime_disable(&pdev->dev);
+       ret = pm_runtime_resume_and_get(&pdev->dev);
+       if (ret < 0)
+               return ret;
 
        mtk_spi_reset(mdata);
 
@@ -1283,6 +1286,9 @@ static int mtk_spi_remove(struct platform_device *pdev)
                clk_unprepare(mdata->spi_hclk);
        }
 
+       pm_runtime_put_noidle(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+
        return 0;
 }
 
index 10f0c5a..9f35661 100644 (file)
@@ -924,8 +924,9 @@ static int tegra_qspi_start_transfer_one(struct spi_device *spi,
 static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
 {
        struct tegra_qspi_client_data *cdata;
+       struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
 
-       cdata = devm_kzalloc(&spi->dev, sizeof(*cdata), GFP_KERNEL);
+       cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL);
        if (!cdata)
                return NULL;
 
index f9589c5..1e5ad3b 100644 (file)
@@ -439,7 +439,7 @@ int rtllib_wx_set_essid(struct rtllib_device *ieee,
                        union iwreq_data *wrqu, char *extra)
 {
 
-       int ret = 0, len, i;
+       int ret = 0, len;
        short proto_started;
        unsigned long flags;
 
@@ -455,13 +455,6 @@ int rtllib_wx_set_essid(struct rtllib_device *ieee,
                goto out;
        }
 
-       for (i = 0; i < len; i++) {
-               if (extra[i] < 0) {
-                       ret = -1;
-                       goto out;
-               }
-       }
-
        if (proto_started)
                rtllib_stop_protocol(ieee, true);
 
index 4407b56..139031c 100644 (file)
@@ -397,6 +397,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host
        ret = device_register(&tl_hba->dev);
        if (ret) {
                pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
+               put_device(&tl_hba->dev);
                return -ENODEV;
        }
 
@@ -1073,7 +1074,7 @@ check_len:
         */
        ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
        if (ret)
-               goto out;
+               return ERR_PTR(ret);
 
        sh = tl_hba->sh;
        tcm_loop_hba_no_cnt++;
index f3947be..64f0e04 100644 (file)
@@ -80,7 +80,7 @@ static int optee_register_device(const uuid_t *device_uuid)
        rc = device_register(&optee_device->dev);
        if (rc) {
                pr_err("device registration failed, err: %d\n", rc);
-               kfree(optee_device);
+               put_device(&optee_device->dev);
        }
 
        return rc;
index 5e516f5..b6e0cc4 100644 (file)
@@ -264,7 +264,7 @@ struct gsm_mux {
        bool constipated;               /* Asked by remote to shut up */
        bool has_devices;               /* Devices were registered */
 
-       struct mutex tx_mutex;
+       spinlock_t tx_lock;
        unsigned int tx_bytes;          /* TX data outstanding */
 #define TX_THRESH_HI           8192
 #define TX_THRESH_LO           2048
@@ -272,7 +272,7 @@ struct gsm_mux {
        struct list_head tx_data_list;  /* Pending data packets */
 
        /* Control messages */
-       struct delayed_work kick_timeout;       /* Kick TX queuing on timeout */
+       struct timer_list kick_timer;   /* Kick TX queuing on timeout */
        struct timer_list t2_timer;     /* Retransmit timer for commands */
        int cretries;                   /* Command retry counter */
        struct gsm_control *pending_cmd;/* Our current pending command */
@@ -700,6 +700,7 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
        struct gsm_msg *msg;
        u8 *dp;
        int ocr;
+       unsigned long flags;
 
        msg = gsm_data_alloc(gsm, addr, 0, control);
        if (!msg)
@@ -721,10 +722,10 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
 
        gsm_print_packet("Q->", addr, cr, control, NULL, 0);
 
-       mutex_lock(&gsm->tx_mutex);
+       spin_lock_irqsave(&gsm->tx_lock, flags);
        list_add_tail(&msg->list, &gsm->tx_ctrl_list);
        gsm->tx_bytes += msg->len;
-       mutex_unlock(&gsm->tx_mutex);
+       spin_unlock_irqrestore(&gsm->tx_lock, flags);
        gsmld_write_trigger(gsm);
 
        return 0;
@@ -749,7 +750,7 @@ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
        spin_unlock_irqrestore(&dlci->lock, flags);
 
        /* Clear data packets in MUX write queue */
-       mutex_lock(&gsm->tx_mutex);
+       spin_lock_irqsave(&gsm->tx_lock, flags);
        list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
                if (msg->addr != addr)
                        continue;
@@ -757,7 +758,7 @@ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
                list_del(&msg->list);
                kfree(msg);
        }
-       mutex_unlock(&gsm->tx_mutex);
+       spin_unlock_irqrestore(&gsm->tx_lock, flags);
 }
 
 /**
@@ -1028,7 +1029,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
        gsm->tx_bytes += msg->len;
 
        gsmld_write_trigger(gsm);
-       schedule_delayed_work(&gsm->kick_timeout, 10 * gsm->t1 * HZ / 100);
+       mod_timer(&gsm->kick_timer, jiffies + 10 * gsm->t1 * HZ / 100);
 }
 
 /**
@@ -1043,9 +1044,10 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
 
 static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
 {
-       mutex_lock(&dlci->gsm->tx_mutex);
+       unsigned long flags;
+       spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
        __gsm_data_queue(dlci, msg);
-       mutex_unlock(&dlci->gsm->tx_mutex);
+       spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
 }
 
 /**
@@ -1057,7 +1059,7 @@ static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
  *     is data. Keep to the MRU of the mux. This path handles the usual tty
  *     interface which is a byte stream with optional modem data.
  *
- *     Caller must hold the tx_mutex of the mux.
+ *     Caller must hold the tx_lock of the mux.
  */
 
 static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
@@ -1117,7 +1119,7 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
  *     is data. Keep to the MRU of the mux. This path handles framed data
  *     queued as skbuffs to the DLCI.
  *
- *     Caller must hold the tx_mutex of the mux.
+ *     Caller must hold the tx_lock of the mux.
  */
 
 static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
@@ -1133,7 +1135,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
        if (dlci->adaption == 4)
                overhead = 1;
 
-       /* dlci->skb is locked by tx_mutex */
+       /* dlci->skb is locked by tx_lock */
        if (dlci->skb == NULL) {
                dlci->skb = skb_dequeue_tail(&dlci->skb_list);
                if (dlci->skb == NULL)
@@ -1187,7 +1189,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
  *     Push an empty frame in to the transmit queue to update the modem status
  *     bits and to transmit an optional break.
  *
- *     Caller must hold the tx_mutex of the mux.
+ *     Caller must hold the tx_lock of the mux.
  */
 
 static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci,
@@ -1301,12 +1303,13 @@ static int gsm_dlci_data_sweep(struct gsm_mux *gsm)
 
 static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
 {
+       unsigned long flags;
        int sweep;
 
        if (dlci->constipated)
                return;
 
-       mutex_lock(&dlci->gsm->tx_mutex);
+       spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
        /* If we have nothing running then we need to fire up */
        sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO);
        if (dlci->gsm->tx_bytes == 0) {
@@ -1317,7 +1320,7 @@ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
        }
        if (sweep)
                gsm_dlci_data_sweep(dlci->gsm);
-       mutex_unlock(&dlci->gsm->tx_mutex);
+       spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
 }
 
 /*
@@ -1708,7 +1711,7 @@ static struct gsm_control *gsm_control_send(struct gsm_mux *gsm,
                unsigned int command, u8 *data, int clen)
 {
        struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control),
-                                               GFP_KERNEL);
+                                               GFP_ATOMIC);
        unsigned long flags;
        if (ctrl == NULL)
                return NULL;
@@ -2019,23 +2022,24 @@ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len)
 }
 
 /**
- *     gsm_kick_timeout        -       transmit if possible
- *     @work: work contained in our gsm object
+ *     gsm_kick_timer  -       transmit if possible
+ *     @t: timer contained in our gsm object
  *
  *     Transmit data from DLCIs if the queue is empty. We can't rely on
  *     a tty wakeup except when we filled the pipe so we need to fire off
  *     new data ourselves in other cases.
  */
-static void gsm_kick_timeout(struct work_struct *work)
+static void gsm_kick_timer(struct timer_list *t)
 {
-       struct gsm_mux *gsm = container_of(work, struct gsm_mux, kick_timeout.work);
+       struct gsm_mux *gsm = from_timer(gsm, t, kick_timer);
+       unsigned long flags;
        int sent = 0;
 
-       mutex_lock(&gsm->tx_mutex);
+       spin_lock_irqsave(&gsm->tx_lock, flags);
        /* If we have nothing running then we need to fire up */
        if (gsm->tx_bytes < TX_THRESH_LO)
                sent = gsm_dlci_data_sweep(gsm);
-       mutex_unlock(&gsm->tx_mutex);
+       spin_unlock_irqrestore(&gsm->tx_lock, flags);
 
        if (sent && debug & DBG_DATA)
                pr_info("%s TX queue stalled\n", __func__);
@@ -2492,7 +2496,7 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
        }
 
        /* Finish outstanding timers, making sure they are done */
-       cancel_delayed_work_sync(&gsm->kick_timeout);
+       del_timer_sync(&gsm->kick_timer);
        del_timer_sync(&gsm->t2_timer);
 
        /* Finish writing to ldisc */
@@ -2565,7 +2569,6 @@ static void gsm_free_mux(struct gsm_mux *gsm)
                        break;
                }
        }
-       mutex_destroy(&gsm->tx_mutex);
        mutex_destroy(&gsm->mutex);
        kfree(gsm->txframe);
        kfree(gsm->buf);
@@ -2637,15 +2640,15 @@ static struct gsm_mux *gsm_alloc_mux(void)
        }
        spin_lock_init(&gsm->lock);
        mutex_init(&gsm->mutex);
-       mutex_init(&gsm->tx_mutex);
        kref_init(&gsm->ref);
        INIT_LIST_HEAD(&gsm->tx_ctrl_list);
        INIT_LIST_HEAD(&gsm->tx_data_list);
-       INIT_DELAYED_WORK(&gsm->kick_timeout, gsm_kick_timeout);
+       timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
        timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
        INIT_WORK(&gsm->tx_work, gsmld_write_task);
        init_waitqueue_head(&gsm->event);
        spin_lock_init(&gsm->control_lock);
+       spin_lock_init(&gsm->tx_lock);
 
        gsm->t1 = T1;
        gsm->t2 = T2;
@@ -2670,7 +2673,6 @@ static struct gsm_mux *gsm_alloc_mux(void)
        }
        spin_unlock(&gsm_mux_lock);
        if (i == MAX_MUX) {
-               mutex_destroy(&gsm->tx_mutex);
                mutex_destroy(&gsm->mutex);
                kfree(gsm->txframe);
                kfree(gsm->buf);
@@ -2826,16 +2828,17 @@ static void gsmld_write_trigger(struct gsm_mux *gsm)
 static void gsmld_write_task(struct work_struct *work)
 {
        struct gsm_mux *gsm = container_of(work, struct gsm_mux, tx_work);
+       unsigned long flags;
        int i, ret;
 
        /* All outstanding control channel and control messages and one data
         * frame is sent.
         */
        ret = -ENODEV;
-       mutex_lock(&gsm->tx_mutex);
+       spin_lock_irqsave(&gsm->tx_lock, flags);
        if (gsm->tty)
                ret = gsm_data_kick(gsm);
-       mutex_unlock(&gsm->tx_mutex);
+       spin_unlock_irqrestore(&gsm->tx_lock, flags);
 
        if (ret >= 0)
                for (i = 0; i < NUM_DLCI; i++)
@@ -3042,6 +3045,7 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
                           const unsigned char *buf, size_t nr)
 {
        struct gsm_mux *gsm = tty->disc_data;
+       unsigned long flags;
        int space;
        int ret;
 
@@ -3049,13 +3053,13 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
                return -ENODEV;
 
        ret = -ENOBUFS;
-       mutex_lock(&gsm->tx_mutex);
+       spin_lock_irqsave(&gsm->tx_lock, flags);
        space = tty_write_room(tty);
        if (space >= nr)
                ret = tty->ops->write(tty, buf, nr);
        else
                set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
-       mutex_unlock(&gsm->tx_mutex);
+       spin_unlock_irqrestore(&gsm->tx_lock, flags);
 
        return ret;
 }
@@ -3352,13 +3356,14 @@ static struct tty_ldisc_ops tty_ldisc_packet = {
 static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk)
 {
        struct gsm_mux *gsm = dlci->gsm;
+       unsigned long flags;
 
        if (dlci->state != DLCI_OPEN || dlci->adaption != 2)
                return;
 
-       mutex_lock(&gsm->tx_mutex);
+       spin_lock_irqsave(&gsm->tx_lock, flags);
        gsm_dlci_modem_output(gsm, dlci, brk);
-       mutex_unlock(&gsm->tx_mutex);
+       spin_unlock_irqrestore(&gsm->tx_lock, flags);
 }
 
 /**
index 44cc755..0e43bdf 100644 (file)
@@ -174,6 +174,8 @@ static int ehl_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
         */
        up->dma = dma;
 
+       lpss->dma_maxburst = 16;
+
        port->set_termios = dw8250_do_set_termios;
 
        return 0;
@@ -277,8 +279,13 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port
        struct dw_dma_slave *rx_param, *tx_param;
        struct device *dev = port->port.dev;
 
-       if (!lpss->dma_param.dma_dev)
+       if (!lpss->dma_param.dma_dev) {
+               dma = port->dma;
+               if (dma)
+                       goto out_configuration_only;
+
                return 0;
+       }
 
        rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL);
        if (!rx_param)
@@ -289,16 +296,18 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port
                return -ENOMEM;
 
        *rx_param = lpss->dma_param;
-       dma->rxconf.src_maxburst = lpss->dma_maxburst;
-
        *tx_param = lpss->dma_param;
-       dma->txconf.dst_maxburst = lpss->dma_maxburst;
 
        dma->fn = lpss8250_dma_filter;
        dma->rx_param = rx_param;
        dma->tx_param = tx_param;
 
        port->dma = dma;
+
+out_configuration_only:
+       dma->rxconf.src_maxburst = lpss->dma_maxburst;
+       dma->txconf.dst_maxburst = lpss->dma_maxburst;
+
        return 0;
 }
 
index 41b8c6b..3f33014 100644 (file)
@@ -157,7 +157,11 @@ static u32 uart_read(struct uart_8250_port *up, u32 reg)
        return readl(up->port.membase + (reg << up->port.regshift));
 }
 
-static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
+/*
+ * Called on runtime PM resume path from omap8250_restore_regs(), and
+ * omap8250_set_mctrl().
+ */
+static void __omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
 {
        struct uart_8250_port *up = up_to_u8250p(port);
        struct omap8250_priv *priv = up->port.private_data;
@@ -181,6 +185,20 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
        }
 }
 
+static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+       int err;
+
+       err = pm_runtime_resume_and_get(port->dev);
+       if (err)
+               return;
+
+       __omap8250_set_mctrl(port, mctrl);
+
+       pm_runtime_mark_last_busy(port->dev);
+       pm_runtime_put_autosuspend(port->dev);
+}
+
 /*
  * Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460)
  * The access to uart register after MDR1 Access
@@ -193,27 +211,10 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
 static void omap_8250_mdr1_errataset(struct uart_8250_port *up,
                                     struct omap8250_priv *priv)
 {
-       u8 timeout = 255;
-
        serial_out(up, UART_OMAP_MDR1, priv->mdr1);
        udelay(2);
        serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
                        UART_FCR_CLEAR_RCVR);
-       /*
-        * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
-        * TX_FIFO_E bit is 1.
-        */
-       while (UART_LSR_THRE != (serial_in(up, UART_LSR) &
-                               (UART_LSR_THRE | UART_LSR_DR))) {
-               timeout--;
-               if (!timeout) {
-                       /* Should *never* happen. we warn and carry on */
-                       dev_crit(up->port.dev, "Errata i202: timedout %x\n",
-                                serial_in(up, UART_LSR));
-                       break;
-               }
-               udelay(1);
-       }
 }
 
 static void omap_8250_get_divisor(struct uart_port *port, unsigned int baud,
@@ -292,6 +293,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
 {
        struct omap8250_priv *priv = up->port.private_data;
        struct uart_8250_dma    *dma = up->dma;
+       u8 mcr = serial8250_in_MCR(up);
 
        if (dma && dma->tx_running) {
                /*
@@ -308,7 +310,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
        serial_out(up, UART_EFR, UART_EFR_ECB);
 
        serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
-       serial8250_out_MCR(up, UART_MCR_TCRTLR);
+       serial8250_out_MCR(up, mcr | UART_MCR_TCRTLR);
        serial_out(up, UART_FCR, up->fcr);
 
        omap8250_update_scr(up, priv);
@@ -324,7 +326,8 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
        serial_out(up, UART_LCR, 0);
 
        /* drop TCR + TLR access, we setup XON/XOFF later */
-       serial8250_out_MCR(up, up->mcr);
+       serial8250_out_MCR(up, mcr);
+
        serial_out(up, UART_IER, up->ier);
 
        serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
@@ -341,7 +344,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
 
        omap8250_update_mdr1(up, priv);
 
-       up->port.ops->set_mctrl(&up->port, up->port.mctrl);
+       __omap8250_set_mctrl(&up->port, up->port.mctrl);
 
        if (up->port.rs485.flags & SER_RS485_ENABLED)
                serial8250_em485_stop_tx(up);
@@ -669,7 +672,6 @@ static int omap_8250_startup(struct uart_port *port)
 
        pm_runtime_get_sync(port->dev);
 
-       up->mcr = 0;
        serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
 
        serial_out(up, UART_LCR, UART_LCR_WLEN8);
@@ -1458,9 +1460,15 @@ err:
 static int omap8250_remove(struct platform_device *pdev)
 {
        struct omap8250_priv *priv = platform_get_drvdata(pdev);
+       int err;
+
+       err = pm_runtime_resume_and_get(&pdev->dev);
+       if (err)
+               return err;
 
        pm_runtime_dont_use_autosuspend(&pdev->dev);
        pm_runtime_put_sync(&pdev->dev);
+       flush_work(&priv->qos_work);
        pm_runtime_disable(&pdev->dev);
        serial8250_unregister_port(priv->line);
        cpu_latency_qos_remove_request(&priv->pm_qos_request);
index fe8662c..3881722 100644 (file)
@@ -1897,10 +1897,13 @@ EXPORT_SYMBOL_GPL(serial8250_modem_status);
 static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
 {
        switch (iir & 0x3f) {
-       case UART_IIR_RX_TIMEOUT:
-               serial8250_rx_dma_flush(up);
+       case UART_IIR_RDI:
+               if (!up->dma->rx_running)
+                       break;
                fallthrough;
        case UART_IIR_RLSI:
+       case UART_IIR_RX_TIMEOUT:
+               serial8250_rx_dma_flush(up);
                return true;
        }
        return up->dma->rx_dma(up);
index 67fa113..888e01f 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/dmaengine.h>
 #include <linux/dmapool.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <linux/of.h>
@@ -404,33 +405,6 @@ static unsigned int lpuart_get_baud_clk_rate(struct lpuart_port *sport)
 #define lpuart_enable_clks(x)  __lpuart_enable_clks(x, true)
 #define lpuart_disable_clks(x) __lpuart_enable_clks(x, false)
 
-static int lpuart_global_reset(struct lpuart_port *sport)
-{
-       struct uart_port *port = &sport->port;
-       void __iomem *global_addr;
-       int ret;
-
-       if (uart_console(port))
-               return 0;
-
-       ret = clk_prepare_enable(sport->ipg_clk);
-       if (ret) {
-               dev_err(sport->port.dev, "failed to enable uart ipg clk: %d\n", ret);
-               return ret;
-       }
-
-       if (is_imx7ulp_lpuart(sport) || is_imx8qxp_lpuart(sport)) {
-               global_addr = port->membase + UART_GLOBAL - IMX_REG_OFF;
-               writel(UART_GLOBAL_RST, global_addr);
-               usleep_range(GLOBAL_RST_MIN_US, GLOBAL_RST_MAX_US);
-               writel(0, global_addr);
-               usleep_range(GLOBAL_RST_MIN_US, GLOBAL_RST_MAX_US);
-       }
-
-       clk_disable_unprepare(sport->ipg_clk);
-       return 0;
-}
-
 static void lpuart_stop_tx(struct uart_port *port)
 {
        unsigned char temp;
@@ -2636,6 +2610,54 @@ static const struct serial_rs485 lpuart_rs485_supported = {
        /* delay_rts_* and RX_DURING_TX are not supported */
 };
 
+static int lpuart_global_reset(struct lpuart_port *sport)
+{
+       struct uart_port *port = &sport->port;
+       void __iomem *global_addr;
+       unsigned long ctrl, bd;
+       unsigned int val = 0;
+       int ret;
+
+       ret = clk_prepare_enable(sport->ipg_clk);
+       if (ret) {
+               dev_err(sport->port.dev, "failed to enable uart ipg clk: %d\n", ret);
+               return ret;
+       }
+
+       if (is_imx7ulp_lpuart(sport) || is_imx8qxp_lpuart(sport)) {
+               /*
+                * If the transmitter is used by earlycon, wait for transmit engine to
+                * complete and then reset.
+                */
+               ctrl = lpuart32_read(port, UARTCTRL);
+               if (ctrl & UARTCTRL_TE) {
+                       bd = lpuart32_read(&sport->port, UARTBAUD);
+                       if (read_poll_timeout(lpuart32_tx_empty, val, val, 1, 100000, false,
+                                             port)) {
+                               dev_warn(sport->port.dev,
+                                        "timeout waiting for transmit engine to complete\n");
+                               clk_disable_unprepare(sport->ipg_clk);
+                               return 0;
+                       }
+               }
+
+               global_addr = port->membase + UART_GLOBAL - IMX_REG_OFF;
+               writel(UART_GLOBAL_RST, global_addr);
+               usleep_range(GLOBAL_RST_MIN_US, GLOBAL_RST_MAX_US);
+               writel(0, global_addr);
+               usleep_range(GLOBAL_RST_MIN_US, GLOBAL_RST_MAX_US);
+
+               /* Recover the transmitter for earlycon. */
+               if (ctrl & UARTCTRL_TE) {
+                       lpuart32_write(port, bd, UARTBAUD);
+                       lpuart32_write(port, ctrl, UARTCTRL);
+               }
+       }
+
+       clk_disable_unprepare(sport->ipg_clk);
+       return 0;
+}
+
 static int lpuart_probe(struct platform_device *pdev)
 {
        const struct lpuart_soc_data *sdata = of_device_get_match_data(&pdev->dev);
index 05b432d..aadda66 100644 (file)
@@ -2594,6 +2594,7 @@ static const struct dev_pm_ops imx_uart_pm_ops = {
        .suspend_noirq = imx_uart_suspend_noirq,
        .resume_noirq = imx_uart_resume_noirq,
        .freeze_noirq = imx_uart_suspend_noirq,
+       .thaw_noirq = imx_uart_resume_noirq,
        .restore_noirq = imx_uart_resume_noirq,
        .suspend = imx_uart_suspend,
        .resume = imx_uart_resume,
index 9643b90..6164fc4 100644 (file)
 #define CFG_RXDET_P3_EN                BIT(15)
 #define LPM_2_STB_SWITCH_EN    BIT(25)
 
-static int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd);
+static void xhci_cdns3_plat_start(struct usb_hcd *hcd)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       u32 value;
+
+       /* set usbcmd.EU3S */
+       value = readl(&xhci->op_regs->command);
+       value |= CMD_PM_INDEX;
+       writel(value, &xhci->op_regs->command);
+
+       if (hcd->regs) {
+               value = readl(hcd->regs + XECP_AUX_CTRL_REG1);
+               value |= CFG_RXDET_P3_EN;
+               writel(value, hcd->regs + XECP_AUX_CTRL_REG1);
+
+               value = readl(hcd->regs + XECP_PORT_CAP_REG);
+               value |= LPM_2_STB_SWITCH_EN;
+               writel(value, hcd->regs + XECP_PORT_CAP_REG);
+       }
+}
+
+static int xhci_cdns3_resume_quirk(struct usb_hcd *hcd)
+{
+       xhci_cdns3_plat_start(hcd);
+       return 0;
+}
 
 static const struct xhci_plat_priv xhci_plat_cdns3_xhci = {
        .quirks = XHCI_SKIP_PHY_INIT | XHCI_AVOID_BEI,
-       .suspend_quirk = xhci_cdns3_suspend_quirk,
+       .plat_start = xhci_cdns3_plat_start,
+       .resume_quirk = xhci_cdns3_resume_quirk,
 };
 
 static int __cdns_host_init(struct cdns *cdns)
@@ -90,32 +116,6 @@ err1:
        return ret;
 }
 
-static int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd)
-{
-       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-       u32 value;
-
-       if (pm_runtime_status_suspended(hcd->self.controller))
-               return 0;
-
-       /* set usbcmd.EU3S */
-       value = readl(&xhci->op_regs->command);
-       value |= CMD_PM_INDEX;
-       writel(value, &xhci->op_regs->command);
-
-       if (hcd->regs) {
-               value = readl(hcd->regs + XECP_AUX_CTRL_REG1);
-               value |= CFG_RXDET_P3_EN;
-               writel(value, hcd->regs + XECP_AUX_CTRL_REG1);
-
-               value = readl(hcd->regs + XECP_PORT_CAP_REG);
-               value |= LPM_2_STB_SWITCH_EN;
-               writel(value, hcd->regs + XECP_PORT_CAP_REG);
-       }
-
-       return 0;
-}
-
 static void cdns_host_exit(struct cdns *cdns)
 {
        kfree(cdns->xhci_plat_data);
index ada78da..c17516c 100644 (file)
@@ -256,8 +256,10 @@ static void ci_otg_del_timer(struct ci_hdrc *ci, enum otg_fsm_timer t)
        ci->enabled_otg_timer_bits &= ~(1 << t);
        if (ci->next_otg_timer == t) {
                if (ci->enabled_otg_timer_bits == 0) {
+                       spin_unlock_irqrestore(&ci->lock, flags);
                        /* No enabled timers after delete it */
                        hrtimer_cancel(&ci->otg_fsm_hrtimer);
+                       spin_lock_irqsave(&ci->lock, flags);
                        ci->next_otg_timer = NUM_OTG_FSM_TIMERS;
                } else {
                        /* Find the next timer */
index 0722d21..079e183 100644 (file)
@@ -362,6 +362,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
        { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
 
+       /* Realforce 87U Keyboard */
+       { USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM },
+
        /* M-Systems Flash Disk Pioneers */
        { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
 
index c0e7c76..1f348bc 100644 (file)
@@ -1711,6 +1711,16 @@ static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
                return extcon_get_extcon_dev(name);
 
        /*
+        * Check explicitly if "usb-role-switch" is used since
+        * extcon_find_edev_by_node() can not be used to check the absence of
+        * an extcon device. In the absence of an device it will always return
+        * EPROBE_DEFER.
+        */
+       if (IS_ENABLED(CONFIG_USB_ROLE_SWITCH) &&
+           device_property_read_bool(dev, "usb-role-switch"))
+               return NULL;
+
+       /*
         * Try to get an extcon device from the USB PHY controller's "port"
         * node. Check if it has the "port" node first, to avoid printing the
         * error message from underlying code, as it's a valid case: extcon
index 5fe2d13..026d402 100644 (file)
@@ -1029,7 +1029,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
                dep->endpoint.desc = NULL;
        }
 
-       dwc3_remove_requests(dwc, dep, -ECONNRESET);
+       dwc3_remove_requests(dwc, dep, -ESHUTDOWN);
 
        dep->stream_capable = false;
        dep->type = 0;
index a7154fe..f6f13e7 100644 (file)
 #include <linux/of.h>
 #include <linux/platform_device.h>
 
-#include "../host/xhci-plat.h"
 #include "core.h"
 
-static const struct xhci_plat_priv dwc3_xhci_plat_priv = {
-       .quirks = XHCI_SKIP_PHY_INIT,
-};
-
 static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc,
                                        int irq, char *name)
 {
@@ -97,11 +92,6 @@ int dwc3_host_init(struct dwc3 *dwc)
                goto err;
        }
 
-       ret = platform_device_add_data(xhci, &dwc3_xhci_plat_priv,
-                                       sizeof(dwc3_xhci_plat_priv));
-       if (ret)
-               goto err;
-
        memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
 
        if (dwc->usb3_lpm_capable)
index 2df52f7..7558cc4 100644 (file)
@@ -285,7 +285,7 @@ static void bcma_hci_platform_power_gpio(struct bcma_device *dev, bool val)
 {
        struct bcma_hcd_device *usb_dev = bcma_get_drvdata(dev);
 
-       if (IS_ERR_OR_NULL(usb_dev->gpio_desc))
+       if (!usb_dev->gpio_desc)
                return;
 
        gpiod_set_value(usb_dev->gpio_desc, val);
@@ -406,9 +406,11 @@ static int bcma_hcd_probe(struct bcma_device *core)
                return -ENOMEM;
        usb_dev->core = core;
 
-       if (core->dev.of_node)
-               usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc",
-                                                   GPIOD_OUT_HIGH);
+       usb_dev->gpio_desc = devm_gpiod_get_optional(&core->dev, "vcc",
+                                                    GPIOD_OUT_HIGH);
+       if (IS_ERR(usb_dev->gpio_desc))
+               return dev_err_probe(&core->dev, PTR_ERR(usb_dev->gpio_desc),
+                                    "error obtaining VCC GPIO");
 
        switch (core->id.id) {
        case BCMA_CORE_USB20_HOST:
index 697683e..c3b7f1d 100644 (file)
@@ -162,6 +162,8 @@ static void option_instat_callback(struct urb *urb);
 #define NOVATELWIRELESS_PRODUCT_G2             0xA010
 #define NOVATELWIRELESS_PRODUCT_MC551          0xB001
 
+#define UBLOX_VENDOR_ID                                0x1546
+
 /* AMOI PRODUCTS */
 #define AMOI_VENDOR_ID                         0x1614
 #define AMOI_PRODUCT_H01                       0x0800
@@ -240,7 +242,6 @@ static void option_instat_callback(struct urb *urb);
 #define QUECTEL_PRODUCT_UC15                   0x9090
 /* These u-blox products use Qualcomm's vendor ID */
 #define UBLOX_PRODUCT_R410M                    0x90b2
-#define UBLOX_PRODUCT_R6XX                     0x90fa
 /* These Yuga products use Qualcomm's vendor ID */
 #define YUGA_PRODUCT_CLM920_NC5                        0x9625
 
@@ -581,6 +582,9 @@ static void option_instat_callback(struct urb *urb);
 #define OPPO_VENDOR_ID                         0x22d9
 #define OPPO_PRODUCT_R11                       0x276c
 
+/* Sierra Wireless products */
+#define SIERRA_VENDOR_ID                       0x1199
+#define SIERRA_PRODUCT_EM9191                  0x90d3
 
 /* Device flags */
 
@@ -1124,8 +1128,16 @@ static const struct usb_device_id option_ids[] = {
        /* u-blox products using Qualcomm vendor ID */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
          .driver_info = RSVD(1) | RSVD(3) },
-       { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX),
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x908b),       /* u-blox LARA-R6 00B */
+         .driver_info = RSVD(4) },
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x90fa),
          .driver_info = RSVD(3) },
+       /* u-blox products */
+       { USB_DEVICE(UBLOX_VENDOR_ID, 0x1341) },        /* u-blox LARA-L6 */
+       { USB_DEVICE(UBLOX_VENDOR_ID, 0x1342),          /* u-blox LARA-L6 (RMNET) */
+         .driver_info = RSVD(4) },
+       { USB_DEVICE(UBLOX_VENDOR_ID, 0x1343),          /* u-blox LARA-L6 (ECM) */
+         .driver_info = RSVD(4) },
        /* Quectel products using Quectel vendor ID */
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
          .driver_info = NUMEP2 },
@@ -2167,6 +2179,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x010a, 0xff) },                   /* Fibocom MA510 (ECM mode) */
        { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) },    /* Fibocom FG150 Diag */
        { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) },          /* Fibocom FG150 AT */
+       { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) },                   /* Fibocom FM160 (MBIM mode) */
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) },                   /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) },                   /* Fibocom FM101-GL (laptop MBIM) */
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff),                     /* Fibocom FM101-GL (laptop MBIM) */
@@ -2176,6 +2189,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) },                   /* GosunCn GM500 MBIM */
        { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) },                   /* GosunCn GM500 ECM/NCM */
        { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
+       { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
+       { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index e1f4df7..fdbf369 100644 (file)
@@ -369,13 +369,24 @@ pmc_usb_mux_usb4(struct pmc_usb_port *port, struct typec_mux_state *state)
        return pmc_usb_command(port, (void *)&req, sizeof(req));
 }
 
-static int pmc_usb_mux_safe_state(struct pmc_usb_port *port)
+static int pmc_usb_mux_safe_state(struct pmc_usb_port *port,
+                                 struct typec_mux_state *state)
 {
        u8 msg;
 
        if (IOM_PORT_ACTIVITY_IS(port->iom_status, SAFE_MODE))
                return 0;
 
+       if ((IOM_PORT_ACTIVITY_IS(port->iom_status, DP) ||
+            IOM_PORT_ACTIVITY_IS(port->iom_status, DP_MFD)) &&
+            state->alt && state->alt->svid == USB_TYPEC_DP_SID)
+               return 0;
+
+       if ((IOM_PORT_ACTIVITY_IS(port->iom_status, TBT) ||
+            IOM_PORT_ACTIVITY_IS(port->iom_status, ALT_MODE_TBT_USB)) &&
+            state->alt && state->alt->svid == USB_TYPEC_TBT_SID)
+               return 0;
+
        msg = PMC_USB_SAFE_MODE;
        msg |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
 
@@ -443,7 +454,7 @@ pmc_usb_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
                return 0;
 
        if (state->mode == TYPEC_STATE_SAFE)
-               return pmc_usb_mux_safe_state(port);
+               return pmc_usb_mux_safe_state(port, state);
        if (state->mode == TYPEC_STATE_USB)
                return pmc_usb_connect(port, port->role);
 
index b637e8b..2a77bab 100644 (file)
@@ -474,7 +474,7 @@ static void tps6598x_handle_plug_event(struct tps6598x *tps, u32 status)
 static irqreturn_t cd321x_interrupt(int irq, void *data)
 {
        struct tps6598x *tps = data;
-       u64 event;
+       u64 event = 0;
        u32 status;
        int ret;
 
@@ -519,8 +519,8 @@ err_unlock:
 static irqreturn_t tps6598x_interrupt(int irq, void *data)
 {
        struct tps6598x *tps = data;
-       u64 event1;
-       u64 event2;
+       u64 event1 = 0;
+       u64 event2 = 0;
        u32 status;
        int ret;
 
index badc9d8..e030c21 100644 (file)
@@ -2488,12 +2488,12 @@ static bool vfio_pci_dev_set_needs_reset(struct vfio_device_set *dev_set)
        struct vfio_pci_core_device *cur;
        bool needs_reset = false;
 
-       list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
-               /* No VFIO device in the set can have an open device FD */
-               if (cur->vdev.open_count)
-                       return false;
+       /* No other VFIO device in the set can be open. */
+       if (vfio_device_set_open_count(dev_set) > 1)
+               return false;
+
+       list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
                needs_reset |= cur->needs_reset;
-       }
        return needs_reset;
 }
 
index 2d16879..6e8804f 100644 (file)
@@ -125,6 +125,19 @@ static void vfio_release_device_set(struct vfio_device *device)
        xa_unlock(&vfio_device_set_xa);
 }
 
+unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set)
+{
+       struct vfio_device *cur;
+       unsigned int open_count = 0;
+
+       lockdep_assert_held(&dev_set->lock);
+
+       list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
+               open_count += cur->open_count;
+       return open_count;
+}
+EXPORT_SYMBOL_GPL(vfio_device_set_open_count);
+
 /*
  * Group objects - create, release, get, put, search
  */
@@ -801,8 +814,9 @@ static struct file *vfio_device_open(struct vfio_device *device)
 err_close_device:
        mutex_lock(&device->dev_set->lock);
        mutex_lock(&device->group->group_lock);
-       if (device->open_count == 1 && device->ops->close_device) {
-               device->ops->close_device(device);
+       if (device->open_count == 1) {
+               if (device->ops->close_device)
+                       device->ops->close_device(device);
 
                vfio_device_container_unregister(device);
        }
@@ -1017,10 +1031,12 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep)
        mutex_lock(&device->dev_set->lock);
        vfio_assert_device_open(device);
        mutex_lock(&device->group->group_lock);
-       if (device->open_count == 1 && device->ops->close_device)
-               device->ops->close_device(device);
+       if (device->open_count == 1) {
+               if (device->ops->close_device)
+                       device->ops->close_device(device);
 
-       vfio_device_container_unregister(device);
+               vfio_device_container_unregister(device);
+       }
        mutex_unlock(&device->group->group_lock);
        device->open_count--;
        if (device->open_count == 0)
index 098b62f..c0143d3 100644 (file)
@@ -577,7 +577,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
                if (scr_readw(r) != vc->vc_video_erase_char)
                        break;
        if (r != q && new_rows >= rows + logo_lines) {
-               save = kmalloc(array3_size(logo_lines, new_cols, 2),
+               save = kzalloc(array3_size(logo_lines, new_cols, 2),
                               GFP_KERNEL);
                if (save) {
                        int i = min(cols, new_cols);
index f422f9c..1ea6d2e 100644 (file)
@@ -67,8 +67,27 @@ static bool is_vmpck_empty(struct snp_guest_dev *snp_dev)
        return true;
 }
 
+/*
+ * If an error is received from the host or AMD Secure Processor (ASP) there
+ * are two options. Either retry the exact same encrypted request or discontinue
+ * using the VMPCK.
+ *
+ * This is because in the current encryption scheme GHCB v2 uses AES-GCM to
+ * encrypt the requests. The IV for this scheme is the sequence number. GCM
+ * cannot tolerate IV reuse.
+ *
+ * The ASP FW v1.51 only increments the sequence numbers on a successful
+ * guest<->ASP back and forth and only accepts messages at its exact sequence
+ * number.
+ *
+ * So if the sequence number were to be reused the encryption scheme is
+ * vulnerable. If the sequence number were incremented for a fresh IV the ASP
+ * will reject the request.
+ */
 static void snp_disable_vmpck(struct snp_guest_dev *snp_dev)
 {
+       dev_alert(snp_dev->dev, "Disabling vmpck_id %d to prevent IV reuse.\n",
+                 vmpck_id);
        memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN);
        snp_dev->vmpck = NULL;
 }
@@ -321,34 +340,71 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
        if (rc)
                return rc;
 
-       /* Call firmware to process the request */
+       /*
+        * Call firmware to process the request. In this function the encrypted
+        * message enters shared memory with the host. So after this call the
+        * sequence number must be incremented or the VMPCK must be deleted to
+        * prevent reuse of the IV.
+        */
        rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
+
+       /*
+        * If the extended guest request fails due to having too small of a
+        * certificate data buffer, retry the same guest request without the
+        * extended data request in order to increment the sequence number
+        * and thus avoid IV reuse.
+        */
+       if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
+           err == SNP_GUEST_REQ_INVALID_LEN) {
+               const unsigned int certs_npages = snp_dev->input.data_npages;
+
+               exit_code = SVM_VMGEXIT_GUEST_REQUEST;
+
+               /*
+                * If this call to the firmware succeeds, the sequence number can
+                * be incremented allowing for continued use of the VMPCK. If
+                * there is an error reflected in the return value, this value
+                * is checked further down and the result will be the deletion
+                * of the VMPCK and the error code being propagated back to the
+                * user as an ioctl() return code.
+                */
+               rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
+
+               /*
+                * Override the error to inform callers the given extended
+                * request buffer size was too small and give the caller the
+                * required buffer size.
+                */
+               err = SNP_GUEST_REQ_INVALID_LEN;
+               snp_dev->input.data_npages = certs_npages;
+       }
+
        if (fw_err)
                *fw_err = err;
 
-       if (rc)
-               return rc;
+       if (rc) {
+               dev_alert(snp_dev->dev,
+                         "Detected error from ASP request. rc: %d, fw_err: %llu\n",
+                         rc, *fw_err);
+               goto disable_vmpck;
+       }
 
-       /*
-        * The verify_and_dec_payload() will fail only if the hypervisor is
-        * actively modifying the message header or corrupting the encrypted payload.
-        * This hints that hypervisor is acting in a bad faith. Disable the VMPCK so that
-        * the key cannot be used for any communication. The key is disabled to ensure
-        * that AES-GCM does not use the same IV while encrypting the request payload.
-        */
        rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
        if (rc) {
                dev_alert(snp_dev->dev,
-                         "Detected unexpected decode failure, disabling the vmpck_id %d\n",
-                         vmpck_id);
-               snp_disable_vmpck(snp_dev);
-               return rc;
+                         "Detected unexpected decode failure from ASP. rc: %d\n",
+                         rc);
+               goto disable_vmpck;
        }
 
        /* Increment to new message sequence after payload decryption was successful. */
        snp_inc_msg_seqno(snp_dev);
 
        return 0;
+
+disable_vmpck:
+       snp_disable_vmpck(snp_dev);
+       return rc;
 }
 
 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
index 47aa3a1..fd3a644 100644 (file)
@@ -228,7 +228,7 @@ static int register_pcpu(struct pcpu *pcpu)
 
        err = device_register(dev);
        if (err) {
-               pcpu_release(dev);
+               put_device(dev);
                return err;
        }
 
index 18f0ed8..cd07e3f 100644 (file)
@@ -54,7 +54,8 @@ static uint64_t get_callback_via(struct pci_dev *pdev)
        pin = pdev->pin;
 
        /* We don't know the GSI. Specify the PCI INTx line instead. */
-       return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */
+       return ((uint64_t)HVM_PARAM_CALLBACK_TYPE_PCI_INTX <<
+                         HVM_CALLBACK_VIA_TYPE_SHIFT) |
                ((uint64_t)pci_domain_nr(pdev->bus) << 32) |
                ((uint64_t)pdev->bus->number << 16) |
                ((uint64_t)(pdev->devfn & 0xff) << 8) |
@@ -144,7 +145,7 @@ static int platform_pci_probe(struct pci_dev *pdev,
                if (ret) {
                        dev_warn(&pdev->dev, "Unable to set the evtchn callback "
                                         "err=%d\n", ret);
-                       goto out;
+                       goto irq_out;
                }
        }
 
@@ -152,13 +153,16 @@ static int platform_pci_probe(struct pci_dev *pdev,
        grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
        ret = gnttab_setup_auto_xlat_frames(grant_frames);
        if (ret)
-               goto out;
+               goto irq_out;
        ret = gnttab_init();
        if (ret)
                goto grant_out;
        return 0;
 grant_out:
        gnttab_free_auto_xlat_frames();
+irq_out:
+       if (!xen_have_vector_callback)
+               free_irq(pdev->irq, pdev);
 out:
        pci_release_region(pdev, 0);
 mem_out:
index 5e53b48..097316a 100644 (file)
@@ -190,13 +190,16 @@ static const struct config_field caplist_pm[] = {
 };
 
 static struct msi_msix_field_config {
-       u16          enable_bit; /* bit for enabling MSI/MSI-X */
-       unsigned int int_type;   /* interrupt type for exclusiveness check */
+       u16          enable_bit;   /* bit for enabling MSI/MSI-X */
+       u16          allowed_bits; /* bits allowed to be changed */
+       unsigned int int_type;     /* interrupt type for exclusiveness check */
 } msi_field_config = {
        .enable_bit     = PCI_MSI_FLAGS_ENABLE,
+       .allowed_bits   = PCI_MSI_FLAGS_ENABLE,
        .int_type       = INTERRUPT_TYPE_MSI,
 }, msix_field_config = {
        .enable_bit     = PCI_MSIX_FLAGS_ENABLE,
+       .allowed_bits   = PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL,
        .int_type       = INTERRUPT_TYPE_MSIX,
 };
 
@@ -229,7 +232,7 @@ static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value,
                return 0;
 
        if (!dev_data->allow_interrupt_control ||
-           (new_value ^ old_value) & ~field_config->enable_bit)
+           (new_value ^ old_value) & ~field_config->allowed_bits)
                return PCIBIOS_SET_FAILED;
 
        if (new_value & field_config->enable_bit) {
index a9543f0..dcb510f 100644 (file)
@@ -4663,7 +4663,12 @@ int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
        int ret;
        int i;
 
-       ASSERT(!path->nowait);
+       /*
+        * The nowait semantics are used only for write paths, where we don't
+        * use the tree mod log and sequence numbers.
+        */
+       if (time_seq)
+               ASSERT(!path->nowait);
 
        nritems = btrfs_header_nritems(path->nodes[0]);
        if (nritems == 0)
@@ -4683,7 +4688,14 @@ again:
                if (path->need_commit_sem) {
                        path->need_commit_sem = 0;
                        need_commit_sem = true;
-                       down_read(&fs_info->commit_root_sem);
+                       if (path->nowait) {
+                               if (!down_read_trylock(&fs_info->commit_root_sem)) {
+                                       ret = -EAGAIN;
+                                       goto done;
+                               }
+                       } else {
+                               down_read(&fs_info->commit_root_sem);
+                       }
                }
                ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        }
@@ -4759,7 +4771,7 @@ again:
                next = c;
                ret = read_block_for_search(root, path, &next, level,
                                            slot, &key);
-               if (ret == -EAGAIN)
+               if (ret == -EAGAIN && !path->nowait)
                        goto again;
 
                if (ret < 0) {
@@ -4769,6 +4781,10 @@ again:
 
                if (!path->skip_locking) {
                        ret = btrfs_try_tree_read_lock(next);
+                       if (!ret && path->nowait) {
+                               ret = -EAGAIN;
+                               goto done;
+                       }
                        if (!ret && time_seq) {
                                /*
                                 * If we don't get the lock, we may be racing
@@ -4799,7 +4815,7 @@ again:
 
                ret = read_block_for_search(root, path, &next, level,
                                            0, &key);
-               if (ret == -EAGAIN)
+               if (ret == -EAGAIN && !path->nowait)
                        goto again;
 
                if (ret < 0) {
@@ -4807,8 +4823,16 @@ again:
                        goto done;
                }
 
-               if (!path->skip_locking)
-                       btrfs_tree_read_lock(next);
+               if (!path->skip_locking) {
+                       if (path->nowait) {
+                               if (!btrfs_try_tree_read_lock(next)) {
+                                       ret = -EAGAIN;
+                                       goto done;
+                               }
+                       } else {
+                               btrfs_tree_read_lock(next);
+                       }
+               }
        }
        ret = 0;
 done:
index d5dd8be..5ba2e81 100644 (file)
@@ -3105,6 +3105,8 @@ static int btrfs_ioctl_get_subvol_info(struct inode *inode, void __user *argp)
                }
        }
 
+       btrfs_free_path(path);
+       path = NULL;
        if (copy_to_user(argp, subvol_info, sizeof(*subvol_info)))
                ret = -EFAULT;
 
@@ -3194,6 +3196,8 @@ static int btrfs_ioctl_get_subvol_rootref(struct btrfs_root *root,
        }
 
 out:
+       btrfs_free_path(path);
+
        if (!ret || ret == -EOVERFLOW) {
                rootrefs->num_items = found;
                /* update min_treeid for next search */
@@ -3205,7 +3209,6 @@ out:
        }
 
        kfree(rootrefs);
-       btrfs_free_path(path);
 
        return ret;
 }
@@ -4231,6 +4234,8 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
                ipath->fspath->val[i] = rel_ptr;
        }
 
+       btrfs_free_path(path);
+       path = NULL;
        ret = copy_to_user((void __user *)(unsigned long)ipa->fspath,
                           ipath->fspath, size);
        if (ret) {
@@ -4281,21 +4286,20 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
                size = min_t(u32, loi->size, SZ_16M);
        }
 
-       path = btrfs_alloc_path();
-       if (!path) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
        inodes = init_data_container(size);
        if (IS_ERR(inodes)) {
                ret = PTR_ERR(inodes);
-               inodes = NULL;
-               goto out;
+               goto out_loi;
        }
 
+       path = btrfs_alloc_path();
+       if (!path) {
+               ret = -ENOMEM;
+               goto out;
+       }
        ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
                                          inodes, ignore_offset);
+       btrfs_free_path(path);
        if (ret == -EINVAL)
                ret = -ENOENT;
        if (ret < 0)
@@ -4307,7 +4311,6 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
                ret = -EFAULT;
 
 out:
-       btrfs_free_path(path);
        kvfree(inodes);
 out_loi:
        kfree(loi);
index 9334c31..b74105a 100644 (file)
@@ -2951,14 +2951,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
                dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
                dstgroup->rsv_excl = inherit->lim.rsv_excl;
 
-               ret = update_qgroup_limit_item(trans, dstgroup);
-               if (ret) {
-                       qgroup_mark_inconsistent(fs_info);
-                       btrfs_info(fs_info,
-                                  "unable to update quota limit for %llu",
-                                  dstgroup->qgroupid);
-                       goto unlock;
-               }
+               qgroup_dirty(fs_info, dstgroup);
        }
 
        if (srcid) {
index 145c84b..1c4b693 100644 (file)
@@ -5702,6 +5702,7 @@ static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path,
                u64 ext_len;
                u64 clone_len;
                u64 clone_data_offset;
+               bool crossed_src_i_size = false;
 
                if (slot >= btrfs_header_nritems(leaf)) {
                        ret = btrfs_next_leaf(clone_root->root, path);
@@ -5759,8 +5760,10 @@ static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path,
                if (key.offset >= clone_src_i_size)
                        break;
 
-               if (key.offset + ext_len > clone_src_i_size)
+               if (key.offset + ext_len > clone_src_i_size) {
                        ext_len = clone_src_i_size - key.offset;
+                       crossed_src_i_size = true;
+               }
 
                clone_data_offset = btrfs_file_extent_offset(leaf, ei);
                if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
@@ -5821,6 +5824,25 @@ static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path,
                                ret = send_clone(sctx, offset, clone_len,
                                                 clone_root);
                        }
+               } else if (crossed_src_i_size && clone_len < len) {
+                       /*
+                        * If we are at i_size of the clone source inode and we
+                        * can not clone from it, terminate the loop. This is
+                        * to avoid sending two write operations, one with a
+                        * length matching clone_len and the final one after
+                        * this loop with a length of len - clone_len.
+                        *
+                        * When using encoded writes (BTRFS_SEND_FLAG_COMPRESSED
+                        * was passed to the send ioctl), this helps avoid
+                        * sending an encoded write for an offset that is not
+                        * sector size aligned, in case the i_size of the source
+                        * inode is not sector size aligned. That will make the
+                        * receiver fallback to decompression of the data and
+                        * writing it using regular buffered IO, therefore while
+                        * not incorrect, it's not optimal due decompression and
+                        * possible re-compression at the receiver.
+                        */
+                       break;
                } else {
                        ret = send_extent_data(sctx, dst_path, offset,
                                               clone_len);
index 699b54b..74fef1f 100644 (file)
@@ -2321,8 +2321,11 @@ int __init btrfs_init_sysfs(void)
 
 #ifdef CONFIG_BTRFS_DEBUG
        ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_debug_feature_attr_group);
-       if (ret)
-               goto out2;
+       if (ret) {
+               sysfs_unmerge_group(&btrfs_kset->kobj,
+                                   &btrfs_static_feature_attr_group);
+               goto out_remove_group;
+       }
 #endif
 
        return 0;
index 813986e..c3cf3da 100644 (file)
@@ -3694,15 +3694,29 @@ static int process_dir_items_leaf(struct btrfs_trans_handle *trans,
                                  u64 *last_old_dentry_offset)
 {
        struct btrfs_root *log = inode->root->log_root;
-       struct extent_buffer *src = path->nodes[0];
-       const int nritems = btrfs_header_nritems(src);
+       struct extent_buffer *src;
+       const int nritems = btrfs_header_nritems(path->nodes[0]);
        const u64 ino = btrfs_ino(inode);
        bool last_found = false;
        int batch_start = 0;
        int batch_size = 0;
        int i;
 
-       for (i = path->slots[0]; i < nritems; i++) {
+       /*
+        * We need to clone the leaf, release the read lock on it, and use the
+        * clone before modifying the log tree. See the comment at copy_items()
+        * about why we need to do this.
+        */
+       src = btrfs_clone_extent_buffer(path->nodes[0]);
+       if (!src)
+               return -ENOMEM;
+
+       i = path->slots[0];
+       btrfs_release_path(path);
+       path->nodes[0] = src;
+       path->slots[0] = i;
+
+       for (; i < nritems; i++) {
                struct btrfs_dir_item *di;
                struct btrfs_key key;
                int ret;
@@ -4303,7 +4317,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
 {
        struct btrfs_root *log = inode->root->log_root;
        struct btrfs_file_extent_item *extent;
-       struct extent_buffer *src = src_path->nodes[0];
+       struct extent_buffer *src;
        int ret = 0;
        struct btrfs_key *ins_keys;
        u32 *ins_sizes;
@@ -4314,6 +4328,43 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
        const bool skip_csum = (inode->flags & BTRFS_INODE_NODATASUM);
        const u64 i_size = i_size_read(&inode->vfs_inode);
 
+       /*
+        * To keep lockdep happy and avoid deadlocks, clone the source leaf and
+        * use the clone. This is because otherwise we would be changing the log
+        * tree, to insert items from the subvolume tree or insert csum items,
+        * while holding a read lock on a leaf from the subvolume tree, which
+        * creates a nasty lock dependency when COWing log tree nodes/leaves:
+        *
+        * 1) Modifying the log tree triggers an extent buffer allocation while
+        *    holding a write lock on a parent extent buffer from the log tree.
+        *    Allocating the pages for an extent buffer, or the extent buffer
+        *    struct, can trigger inode eviction and finally the inode eviction
+        *    will trigger a release/remove of a delayed node, which requires
+        *    taking the delayed node's mutex;
+        *
+        * 2) Allocating a metadata extent for a log tree can trigger the async
+        *    reclaim thread and make us wait for it to release enough space and
+        *    unblock our reservation ticket. The reclaim thread can start
+        *    flushing delayed items, and that in turn results in the need to
+        *    lock delayed node mutexes and in the need to write lock extent
+        *    buffers of a subvolume tree - all this while holding a write lock
+        *    on the parent extent buffer in the log tree.
+        *
+        * So one task in scenario 1) running in parallel with another task in
+        * scenario 2) could lead to a deadlock, one wanting to lock a delayed
+        * node mutex while having a read lock on a leaf from the subvolume,
+        * while the other is holding the delayed node's mutex and wants to
+        * write lock the same subvolume leaf for flushing delayed items.
+        */
+       src = btrfs_clone_extent_buffer(src_path->nodes[0]);
+       if (!src)
+               return -ENOMEM;
+
+       i = src_path->slots[0];
+       btrfs_release_path(src_path);
+       src_path->nodes[0] = src;
+       src_path->slots[0] = i;
+
        ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
                           nr * sizeof(u32), GFP_NOFS);
        if (!ins_data)
index 1912abf..c9e2b0c 100644 (file)
@@ -134,7 +134,8 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
                        super[i] = page_address(page[i]);
                }
 
-               if (super[0]->generation > super[1]->generation)
+               if (btrfs_super_generation(super[0]) >
+                   btrfs_super_generation(super[1]))
                        sector = zones[1].start;
                else
                        sector = zones[0].start;
@@ -466,7 +467,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
                goto out;
        }
 
-       zones = kcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
+       zones = kvcalloc(BTRFS_REPORT_NR_ZONES, sizeof(struct blk_zone), GFP_KERNEL);
        if (!zones) {
                ret = -ENOMEM;
                goto out;
@@ -585,7 +586,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
        }
 
 
-       kfree(zones);
+       kvfree(zones);
 
        switch (bdev_zoned_model(bdev)) {
        case BLK_ZONED_HM:
@@ -617,7 +618,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
        return 0;
 
 out:
-       kfree(zones);
+       kvfree(zones);
 out_free_zone_info:
        btrfs_destroy_dev_zone_info(device);
 
index fb023f9..e54814d 100644 (file)
@@ -2248,7 +2248,6 @@ static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode)
        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_mds_request *req1 = NULL, *req2 = NULL;
-       unsigned int max_sessions;
        int ret, err = 0;
 
        spin_lock(&ci->i_unsafe_lock);
@@ -2267,27 +2266,23 @@ static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode)
        spin_unlock(&ci->i_unsafe_lock);
 
        /*
-        * The mdsc->max_sessions is unlikely to be changed
-        * mostly, here we will retry it by reallocating the
-        * sessions array memory to get rid of the mdsc->mutex
-        * lock.
-        */
-retry:
-       max_sessions = mdsc->max_sessions;
-
-       /*
         * Trigger to flush the journal logs in all the relevant MDSes
         * manually, or in the worst case we must wait at most 5 seconds
         * to wait the journal logs to be flushed by the MDSes periodically.
         */
-       if ((req1 || req2) && likely(max_sessions)) {
-               struct ceph_mds_session **sessions = NULL;
-               struct ceph_mds_session *s;
+       if (req1 || req2) {
                struct ceph_mds_request *req;
+               struct ceph_mds_session **sessions;
+               struct ceph_mds_session *s;
+               unsigned int max_sessions;
                int i;
 
+               mutex_lock(&mdsc->mutex);
+               max_sessions = mdsc->max_sessions;
+
                sessions = kcalloc(max_sessions, sizeof(s), GFP_KERNEL);
                if (!sessions) {
+                       mutex_unlock(&mdsc->mutex);
                        err = -ENOMEM;
                        goto out;
                }
@@ -2299,16 +2294,6 @@ retry:
                                s = req->r_session;
                                if (!s)
                                        continue;
-                               if (unlikely(s->s_mds >= max_sessions)) {
-                                       spin_unlock(&ci->i_unsafe_lock);
-                                       for (i = 0; i < max_sessions; i++) {
-                                               s = sessions[i];
-                                               if (s)
-                                                       ceph_put_mds_session(s);
-                                       }
-                                       kfree(sessions);
-                                       goto retry;
-                               }
                                if (!sessions[s->s_mds]) {
                                        s = ceph_get_mds_session(s);
                                        sessions[s->s_mds] = s;
@@ -2321,16 +2306,6 @@ retry:
                                s = req->r_session;
                                if (!s)
                                        continue;
-                               if (unlikely(s->s_mds >= max_sessions)) {
-                                       spin_unlock(&ci->i_unsafe_lock);
-                                       for (i = 0; i < max_sessions; i++) {
-                                               s = sessions[i];
-                                               if (s)
-                                                       ceph_put_mds_session(s);
-                                       }
-                                       kfree(sessions);
-                                       goto retry;
-                               }
                                if (!sessions[s->s_mds]) {
                                        s = ceph_get_mds_session(s);
                                        sessions[s->s_mds] = s;
@@ -2342,11 +2317,12 @@ retry:
                /* the auth MDS */
                spin_lock(&ci->i_ceph_lock);
                if (ci->i_auth_cap) {
-                     s = ci->i_auth_cap->session;
-                     if (!sessions[s->s_mds])
-                             sessions[s->s_mds] = ceph_get_mds_session(s);
+                       s = ci->i_auth_cap->session;
+                       if (!sessions[s->s_mds])
+                               sessions[s->s_mds] = ceph_get_mds_session(s);
                }
                spin_unlock(&ci->i_ceph_lock);
+               mutex_unlock(&mdsc->mutex);
 
                /* send flush mdlog request to MDSes */
                for (i = 0; i < max_sessions; i++) {
index 4af5e55..bad9eeb 100644 (file)
@@ -2492,7 +2492,7 @@ int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path,
                        struct inode *parent;
 
                        parent = ceph_lookup_inode(sb, ceph_ino(inode));
-                       if (!parent)
+                       if (IS_ERR(parent))
                                return PTR_ERR(parent);
 
                        pci = ceph_inode(parent);
index 864cdaa..e415185 100644 (file)
@@ -763,7 +763,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
        struct ceph_mds_snap_realm *ri;    /* encoded */
        __le64 *snaps;                     /* encoded */
        __le64 *prior_parent_snaps;        /* encoded */
-       struct ceph_snap_realm *realm = NULL;
+       struct ceph_snap_realm *realm;
        struct ceph_snap_realm *first_realm = NULL;
        struct ceph_snap_realm *realm_to_rebuild = NULL;
        int rebuild_snapcs;
@@ -774,6 +774,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
 
        dout("%s deletion=%d\n", __func__, deletion);
 more:
+       realm = NULL;
        rebuild_snapcs = 0;
        ceph_decode_need(&p, e, sizeof(*ri), bad);
        ri = p;
index fe22068..712a431 100644 (file)
@@ -1281,7 +1281,7 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
        rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
                                          off + len - 1);
        if (rc)
-               goto out;
+               goto unlock;
 
        /* should we flush first and last page first */
        truncate_inode_pages(&target_inode->i_data, 0);
@@ -1297,6 +1297,8 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
         * that target is updated on the server
         */
        CIFS_I(target_inode)->time = 0;
+
+unlock:
        /* although unlocking in the reverse order from locking is not
         * strictly necessary here it is a little cleaner to be consistent
         */
index 1cc47dd..9db9527 100644 (file)
@@ -3855,9 +3855,13 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
        uuid_copy(&cifs_sb->dfs_mount_id, &mnt_ctx.mount_id);
 
 out:
-       free_xid(mnt_ctx.xid);
        cifs_try_adding_channels(cifs_sb, mnt_ctx.ses);
-       return mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
+       rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
+       if (rc)
+               goto error;
+
+       free_xid(mnt_ctx.xid);
+       return rc;
 
 error:
        dfs_cache_put_refsrv_sessions(&mnt_ctx.mount_id);
@@ -3884,8 +3888,12 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
                        goto error;
        }
 
+       rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
+       if (rc)
+               goto error;
+
        free_xid(mnt_ctx.xid);
-       return mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
+       return rc;
 
 error:
        mount_put_conns(&mnt_ctx);
index 89d5fa8..6419ec4 100644 (file)
@@ -343,7 +343,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                                        rc = put_user(ExtAttrBits &
                                                FS_FL_USER_VISIBLE,
                                                (int __user *)arg);
-                               if (rc != EOPNOTSUPP)
+                               if (rc != -EOPNOTSUPP)
                                        break;
                        }
 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
@@ -373,7 +373,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                         *                     pSMBFile->fid.netfid,
                         *                     extAttrBits,
                         *                     &ExtAttrMask);
-                        * if (rc != EOPNOTSUPP)
+                        * if (rc != -EOPNOTSUPP)
                         *      break;
                         */
 
index 92e4278..9e7d9f0 100644 (file)
@@ -302,14 +302,14 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
 
        /* now drop the ref to the current iface */
        if (old_iface && iface) {
-               kref_put(&old_iface->refcount, release_iface);
                cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n",
                         &old_iface->sockaddr,
                         &iface->sockaddr);
-       } else if (old_iface) {
                kref_put(&old_iface->refcount, release_iface);
+       } else if (old_iface) {
                cifs_dbg(FYI, "releasing ref to iface: %pIS\n",
                         &old_iface->sockaddr);
+               kref_put(&old_iface->refcount, release_iface);
        } else {
                WARN_ON(!iface);
                cifs_dbg(FYI, "adding new iface: %pIS\n", &iface->sockaddr);
index 880cd49..bfaafd0 100644 (file)
@@ -1116,6 +1116,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
                                COMPOUND_FID, current->tgid,
                                FILE_FULL_EA_INFORMATION,
                                SMB2_O_INFO_FILE, 0, data, size);
+       if (rc)
+               goto sea_exit;
        smb2_set_next_command(tcon, &rqst[1]);
        smb2_set_related(&rqst[1]);
 
@@ -1126,6 +1128,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
        rqst[2].rq_nvec = 1;
        rc = SMB2_close_init(tcon, server,
                             &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
+       if (rc)
+               goto sea_exit;
        smb2_set_related(&rqst[2]);
 
        rc = compound_send_recv(xid, ses, server,
index fe05bc5..af5ed6b 100644 (file)
@@ -75,11 +75,15 @@ static void erofs_fscache_rreq_unlock_folios(struct netfs_io_request *rreq)
 
        rcu_read_lock();
        xas_for_each(&xas, folio, last_page) {
-               unsigned int pgpos =
-                       (folio_index(folio) - start_page) * PAGE_SIZE;
-               unsigned int pgend = pgpos + folio_size(folio);
+               unsigned int pgpos, pgend;
                bool pg_failed = false;
 
+               if (xas_retry(&xas, folio))
+                       continue;
+
+               pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
+               pgend = pgpos + folio_size(folio);
+
                for (;;) {
                        if (!subreq) {
                                pg_failed = true;
@@ -287,22 +291,25 @@ static int erofs_fscache_data_read(struct address_space *mapping,
                        return PTR_ERR(src);
 
                iov_iter_xarray(&iter, READ, &mapping->i_pages, pos, PAGE_SIZE);
-               if (copy_to_iter(src + offset, size, &iter) != size)
+               if (copy_to_iter(src + offset, size, &iter) != size) {
+                       erofs_put_metabuf(&buf);
                        return -EFAULT;
+               }
                iov_iter_zero(PAGE_SIZE - size, &iter);
                erofs_put_metabuf(&buf);
                return PAGE_SIZE;
        }
 
-       count = min_t(size_t, map.m_llen - (pos - map.m_la), len);
-       DBG_BUGON(!count || count % PAGE_SIZE);
-
        if (!(map.m_flags & EROFS_MAP_MAPPED)) {
+               count = len;
                iov_iter_xarray(&iter, READ, &mapping->i_pages, pos, count);
                iov_iter_zero(count, &iter);
                return count;
        }
 
+       count = min_t(size_t, map.m_llen - (pos - map.m_la), len);
+       DBG_BUGON(!count || count % PAGE_SIZE);
+
        mdev = (struct erofs_map_dev) {
                .m_deviceid = map.m_deviceid,
                .m_pa = map.m_pa,
@@ -403,13 +410,13 @@ static void erofs_fscache_domain_put(struct erofs_domain *domain)
 static int erofs_fscache_register_volume(struct super_block *sb)
 {
        struct erofs_sb_info *sbi = EROFS_SB(sb);
-       char *domain_id = sbi->opt.domain_id;
+       char *domain_id = sbi->domain_id;
        struct fscache_volume *volume;
        char *name;
        int ret = 0;
 
        name = kasprintf(GFP_KERNEL, "erofs,%s",
-                        domain_id ? domain_id : sbi->opt.fsid);
+                        domain_id ? domain_id : sbi->fsid);
        if (!name)
                return -ENOMEM;
 
@@ -435,7 +442,7 @@ static int erofs_fscache_init_domain(struct super_block *sb)
        if (!domain)
                return -ENOMEM;
 
-       domain->domain_id = kstrdup(sbi->opt.domain_id, GFP_KERNEL);
+       domain->domain_id = kstrdup(sbi->domain_id, GFP_KERNEL);
        if (!domain->domain_id) {
                kfree(domain);
                return -ENOMEM;
@@ -472,7 +479,7 @@ static int erofs_fscache_register_domain(struct super_block *sb)
 
        mutex_lock(&erofs_domain_list_lock);
        list_for_each_entry(domain, &erofs_domain_list, list) {
-               if (!strcmp(domain->domain_id, sbi->opt.domain_id)) {
+               if (!strcmp(domain->domain_id, sbi->domain_id)) {
                        sbi->domain = domain;
                        sbi->volume = domain->volume;
                        refcount_inc(&domain->ref);
@@ -609,7 +616,7 @@ struct erofs_fscache *erofs_domain_register_cookie(struct super_block *sb,
 struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
                                                    char *name, bool need_inode)
 {
-       if (EROFS_SB(sb)->opt.domain_id)
+       if (EROFS_SB(sb)->domain_id)
                return erofs_domain_register_cookie(sb, name, need_inode);
        return erofs_fscache_acquire_cookie(sb, name, need_inode);
 }
@@ -641,7 +648,7 @@ int erofs_fscache_register_fs(struct super_block *sb)
        struct erofs_sb_info *sbi = EROFS_SB(sb);
        struct erofs_fscache *fscache;
 
-       if (sbi->opt.domain_id)
+       if (sbi->domain_id)
                ret = erofs_fscache_register_domain(sb);
        else
                ret = erofs_fscache_register_volume(sb);
@@ -649,7 +656,7 @@ int erofs_fscache_register_fs(struct super_block *sb)
                return ret;
 
        /* acquired domain/volume will be relinquished in kill_sb() on error */
-       fscache = erofs_fscache_register_cookie(sb, sbi->opt.fsid, true);
+       fscache = erofs_fscache_register_cookie(sb, sbi->fsid, true);
        if (IS_ERR(fscache))
                return PTR_ERR(fscache);
 
index 1701df4..05dc686 100644 (file)
@@ -75,8 +75,6 @@ struct erofs_mount_opts {
        unsigned int max_sync_decompress_pages;
 #endif
        unsigned int mount_opt;
-       char *fsid;
-       char *domain_id;
 };
 
 struct erofs_dev_context {
@@ -89,6 +87,8 @@ struct erofs_dev_context {
 struct erofs_fs_context {
        struct erofs_mount_opts opt;
        struct erofs_dev_context *devs;
+       char *fsid;
+       char *domain_id;
 };
 
 /* all filesystem-wide lz4 configurations */
@@ -170,6 +170,8 @@ struct erofs_sb_info {
        struct fscache_volume *volume;
        struct erofs_fscache *s_fscache;
        struct erofs_domain *domain;
+       char *fsid;
+       char *domain_id;
 };
 
 #define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
index 2cf96ce..1c7dcca 100644 (file)
@@ -579,9 +579,9 @@ static int erofs_fc_parse_param(struct fs_context *fc,
                break;
        case Opt_fsid:
 #ifdef CONFIG_EROFS_FS_ONDEMAND
-               kfree(ctx->opt.fsid);
-               ctx->opt.fsid = kstrdup(param->string, GFP_KERNEL);
-               if (!ctx->opt.fsid)
+               kfree(ctx->fsid);
+               ctx->fsid = kstrdup(param->string, GFP_KERNEL);
+               if (!ctx->fsid)
                        return -ENOMEM;
 #else
                errorfc(fc, "fsid option not supported");
@@ -589,9 +589,9 @@ static int erofs_fc_parse_param(struct fs_context *fc,
                break;
        case Opt_domain_id:
 #ifdef CONFIG_EROFS_FS_ONDEMAND
-               kfree(ctx->opt.domain_id);
-               ctx->opt.domain_id = kstrdup(param->string, GFP_KERNEL);
-               if (!ctx->opt.domain_id)
+               kfree(ctx->domain_id);
+               ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
+               if (!ctx->domain_id)
                        return -ENOMEM;
 #else
                errorfc(fc, "domain_id option not supported");
@@ -728,10 +728,12 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
 
        sb->s_fs_info = sbi;
        sbi->opt = ctx->opt;
-       ctx->opt.fsid = NULL;
-       ctx->opt.domain_id = NULL;
        sbi->devs = ctx->devs;
        ctx->devs = NULL;
+       sbi->fsid = ctx->fsid;
+       ctx->fsid = NULL;
+       sbi->domain_id = ctx->domain_id;
+       ctx->domain_id = NULL;
 
        if (erofs_is_fscache_mode(sb)) {
                sb->s_blocksize = EROFS_BLKSIZ;
@@ -820,7 +822,7 @@ static int erofs_fc_get_tree(struct fs_context *fc)
 {
        struct erofs_fs_context *ctx = fc->fs_private;
 
-       if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->opt.fsid)
+       if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid)
                return get_tree_nodev(fc, erofs_fc_fill_super);
 
        return get_tree_bdev(fc, erofs_fc_fill_super);
@@ -834,6 +836,9 @@ static int erofs_fc_reconfigure(struct fs_context *fc)
 
        DBG_BUGON(!sb_rdonly(sb));
 
+       if (ctx->fsid || ctx->domain_id)
+               erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
+
        if (test_opt(&ctx->opt, POSIX_ACL))
                fc->sb_flags |= SB_POSIXACL;
        else
@@ -873,8 +878,8 @@ static void erofs_fc_free(struct fs_context *fc)
        struct erofs_fs_context *ctx = fc->fs_private;
 
        erofs_free_dev_context(ctx->devs);
-       kfree(ctx->opt.fsid);
-       kfree(ctx->opt.domain_id);
+       kfree(ctx->fsid);
+       kfree(ctx->domain_id);
        kfree(ctx);
 }
 
@@ -944,8 +949,8 @@ static void erofs_kill_sb(struct super_block *sb)
        erofs_free_dev_context(sbi->devs);
        fs_put_dax(sbi->dax_dev, NULL);
        erofs_fscache_unregister_fs(sb);
-       kfree(sbi->opt.fsid);
-       kfree(sbi->opt.domain_id);
+       kfree(sbi->fsid);
+       kfree(sbi->domain_id);
        kfree(sbi);
        sb->s_fs_info = NULL;
 }
@@ -1098,10 +1103,10 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
        if (test_opt(opt, DAX_NEVER))
                seq_puts(seq, ",dax=never");
 #ifdef CONFIG_EROFS_FS_ONDEMAND
-       if (opt->fsid)
-               seq_printf(seq, ",fsid=%s", opt->fsid);
-       if (opt->domain_id)
-               seq_printf(seq, ",domain_id=%s", opt->domain_id);
+       if (sbi->fsid)
+               seq_printf(seq, ",fsid=%s", sbi->fsid);
+       if (sbi->domain_id)
+               seq_printf(seq, ",domain_id=%s", sbi->domain_id);
 #endif
        return 0;
 }
index 783bb7b..fd47696 100644 (file)
@@ -210,14 +210,14 @@ int erofs_register_sysfs(struct super_block *sb)
        int err;
 
        if (erofs_is_fscache_mode(sb)) {
-               if (sbi->opt.domain_id) {
-                       str = kasprintf(GFP_KERNEL, "%s,%s", sbi->opt.domain_id,
-                                       sbi->opt.fsid);
+               if (sbi->domain_id) {
+                       str = kasprintf(GFP_KERNEL, "%s,%s", sbi->domain_id,
+                                       sbi->fsid);
                        if (!str)
                                return -ENOMEM;
                        name = str;
                } else {
-                       name = sbi->opt.fsid;
+                       name = sbi->fsid;
                }
        } else {
                name = sb->s_id;
index 064a166..b792d42 100644 (file)
@@ -660,6 +660,9 @@ static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
        u8 *src, *dst;
        unsigned int i, cnt;
 
+       if (!packed_inode)
+               return -EFSCORRUPTED;
+
        pos += EROFS_I(inode)->z_fragmentoff;
        for (i = 0; i < len; i += cnt) {
                cnt = min_t(unsigned int, len - i,
index f195628..6c399a8 100644 (file)
@@ -5184,6 +5184,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
         * and it is decreased till we reach start.
         */
 again:
+       ret = 0;
        if (SHIFT == SHIFT_LEFT)
                iterator = &start;
        else
@@ -5227,14 +5228,21 @@ again:
                                        ext4_ext_get_actual_len(extent);
                } else {
                        extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
-                       if (le32_to_cpu(extent->ee_block) > 0)
+                       if (le32_to_cpu(extent->ee_block) > start)
                                *iterator = le32_to_cpu(extent->ee_block) - 1;
-                       else
-                               /* Beginning is reached, end of the loop */
+                       else if (le32_to_cpu(extent->ee_block) == start)
                                iterator = NULL;
-                       /* Update path extent in case we need to stop */
-                       while (le32_to_cpu(extent->ee_block) < start)
+                       else {
+                               extent = EXT_LAST_EXTENT(path[depth].p_hdr);
+                               while (le32_to_cpu(extent->ee_block) >= start)
+                                       extent--;
+
+                               if (extent == EXT_LAST_EXTENT(path[depth].p_hdr))
+                                       break;
+
                                extent++;
+                               iterator = NULL;
+                       }
                        path[depth].p_ext = extent;
                }
                ret = ext4_ext_shift_path_extents(path, shift, inode,
index 5f9c802..c942c89 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -1003,7 +1003,16 @@ static unsigned long __fget_light(unsigned int fd, fmode_t mask)
        struct files_struct *files = current->files;
        struct file *file;
 
-       if (atomic_read(&files->count) == 1) {
+       /*
+        * If another thread is concurrently calling close_fd() followed
+        * by put_files_struct(), we must not observe the old table
+        * entry combined with the new refcount - otherwise we could
+        * return a file that is concurrently being freed.
+        *
+        * atomic_read_acquire() pairs with atomic_dec_and_test() in
+        * put_files_struct().
+        */
+       if (atomic_read_acquire(&files->count) == 1) {
                file = files_lookup_fd_raw(files, fd);
                if (!file || unlikely(file->f_mode & mask))
                        return 0;
index 443f833..9958d40 100644 (file)
@@ -1712,18 +1712,26 @@ static int writeback_single_inode(struct inode *inode,
        wb = inode_to_wb_and_lock_list(inode);
        spin_lock(&inode->i_lock);
        /*
-        * If the inode is now fully clean, then it can be safely removed from
-        * its writeback list (if any).  Otherwise the flusher threads are
-        * responsible for the writeback lists.
+        * If the inode is freeing, its i_io_list shoudn't be updated
+        * as it can be finally deleted at this moment.
         */
-       if (!(inode->i_state & I_DIRTY_ALL))
-               inode_cgwb_move_to_attached(inode, wb);
-       else if (!(inode->i_state & I_SYNC_QUEUED)) {
-               if ((inode->i_state & I_DIRTY))
-                       redirty_tail_locked(inode, wb);
-               else if (inode->i_state & I_DIRTY_TIME) {
-                       inode->dirtied_when = jiffies;
-                       inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
+       if (!(inode->i_state & I_FREEING)) {
+               /*
+                * If the inode is now fully clean, then it can be safely
+                * removed from its writeback list (if any). Otherwise the
+                * flusher threads are responsible for the writeback lists.
+                */
+               if (!(inode->i_state & I_DIRTY_ALL))
+                       inode_cgwb_move_to_attached(inode, wb);
+               else if (!(inode->i_state & I_SYNC_QUEUED)) {
+                       if ((inode->i_state & I_DIRTY))
+                               redirty_tail_locked(inode, wb);
+                       else if (inode->i_state & I_DIRTY_TIME) {
+                               inode->dirtied_when = jiffies;
+                               inode_io_list_move_locked(inode,
+                                                         wb,
+                                                         &wb->b_dirty_time);
+                       }
                }
        }
 
index a058e01..ab8cedd 100644 (file)
@@ -203,7 +203,11 @@ static struct fscache_volume *fscache_alloc_volume(const char *volume_key,
        struct fscache_volume *volume;
        struct fscache_cache *cache;
        size_t klen, hlen;
-       char *key;
+       u8 *key;
+
+       klen = strlen(volume_key);
+       if (klen > NAME_MAX)
+               return NULL;
 
        if (!coherency_data)
                coherency_len = 0;
@@ -229,7 +233,6 @@ static struct fscache_volume *fscache_alloc_volume(const char *volume_key,
        /* Stick the length on the front of the key and pad it out to make
         * hashing easier.
         */
-       klen = strlen(volume_key);
        hlen = round_up(1 + klen + 1, sizeof(__le32));
        key = kzalloc(hlen, GFP_KERNEL);
        if (!key)
index 3990f3e..f33b3ba 100644 (file)
@@ -31,10 +31,15 @@ static DEFINE_SPINLOCK(kernfs_idr_lock);    /* root->ino_idr */
 
 #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
 
+static bool __kernfs_active(struct kernfs_node *kn)
+{
+       return atomic_read(&kn->active) >= 0;
+}
+
 static bool kernfs_active(struct kernfs_node *kn)
 {
        lockdep_assert_held(&kernfs_root(kn)->kernfs_rwsem);
-       return atomic_read(&kn->active) >= 0;
+       return __kernfs_active(kn);
 }
 
 static bool kernfs_lockdep(struct kernfs_node *kn)
@@ -705,7 +710,12 @@ struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
                        goto err_unlock;
        }
 
-       if (unlikely(!kernfs_active(kn) || !atomic_inc_not_zero(&kn->count)))
+       /*
+        * We should fail if @kn has never been activated and guarantee success
+        * if the caller knows that @kn is active. Both can be achieved by
+        * __kernfs_active() which tests @kn->active without kernfs_rwsem.
+        */
+       if (unlikely(!__kernfs_active(kn) || !atomic_inc_not_zero(&kn->count)))
                goto err_unlock;
 
        spin_unlock(&kernfs_idr_lock);
index 578c211..9155ecb 100644 (file)
@@ -3591,6 +3591,7 @@ static int vfs_tmpfile(struct user_namespace *mnt_userns,
        struct inode *dir = d_inode(parentpath->dentry);
        struct inode *inode;
        int error;
+       int open_flag = file->f_flags;
 
        /* we want directory to be writable */
        error = inode_permission(mnt_userns, dir, MAY_WRITE | MAY_EXEC);
@@ -3613,7 +3614,7 @@ static int vfs_tmpfile(struct user_namespace *mnt_userns,
        if (error)
                return error;
        inode = file_inode(file);
-       if (!(file->f_flags & O_EXCL)) {
+       if (!(open_flag & O_EXCL)) {
                spin_lock(&inode->i_lock);
                inode->i_state |= I_LINKABLE;
                spin_unlock(&inode->i_lock);
index 0ce5358..7679a68 100644 (file)
@@ -17,9 +17,9 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
 {
        struct netfs_io_subrequest *subreq;
        struct folio *folio;
-       unsigned int iopos, account = 0;
        pgoff_t start_page = rreq->start / PAGE_SIZE;
        pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
+       size_t account = 0;
        bool subreq_failed = false;
 
        XA_STATE(xas, &rreq->mapping->i_pages, start_page);
@@ -39,18 +39,23 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
         */
        subreq = list_first_entry(&rreq->subrequests,
                                  struct netfs_io_subrequest, rreq_link);
-       iopos = 0;
        subreq_failed = (subreq->error < 0);
 
        trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
 
        rcu_read_lock();
        xas_for_each(&xas, folio, last_page) {
-               unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
-               unsigned int pgend = pgpos + folio_size(folio);
+               loff_t pg_end;
                bool pg_failed = false;
 
+               if (xas_retry(&xas, folio))
+                       continue;
+
+               pg_end = folio_pos(folio) + folio_size(folio) - 1;
+
                for (;;) {
+                       loff_t sreq_end;
+
                        if (!subreq) {
                                pg_failed = true;
                                break;
@@ -58,11 +63,11 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
                        if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
                                folio_start_fscache(folio);
                        pg_failed |= subreq_failed;
-                       if (pgend < iopos + subreq->len)
+                       sreq_end = subreq->start + subreq->len - 1;
+                       if (pg_end < sreq_end)
                                break;
 
                        account += subreq->transferred;
-                       iopos += subreq->len;
                        if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
                                subreq = list_next_entry(subreq, rreq_link);
                                subreq_failed = (subreq->error < 0);
@@ -70,7 +75,8 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
                                subreq = NULL;
                                subreq_failed = false;
                        }
-                       if (pgend == iopos)
+
+                       if (pg_end == sreq_end)
                                break;
                }
 
index 4289258..e374767 100644 (file)
@@ -121,6 +121,9 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
                XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
 
                xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
+                       if (xas_retry(&xas, folio))
+                               continue;
+
                        /* We might have multiple writes from the same huge
                         * folio, but we mustn't unlock a folio more than once.
                         */
index 06a96e9..d4b6839 100644 (file)
@@ -254,7 +254,10 @@ TRACE_EVENT_CONDITION(nfsd_fh_verify_err,
                                  rqstp->rq_xprt->xpt_remotelen);
                __entry->xid = be32_to_cpu(rqstp->rq_xid);
                __entry->fh_hash = knfsd_fh_hash(&fhp->fh_handle);
-               __entry->inode = d_inode(fhp->fh_dentry);
+               if (fhp->fh_dentry)
+                       __entry->inode = d_inode(fhp->fh_dentry);
+               else
+                       __entry->inode = NULL;
                __entry->type = type;
                __entry->access = access;
                __entry->error = be32_to_cpu(error);
index f650afe..ac3c384 100644 (file)
@@ -871,10 +871,11 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
        struct svc_rqst *rqstp = sd->u.data;
        struct page *page = buf->page;  // may be a compound one
        unsigned offset = buf->offset;
+       struct page *last_page;
 
-       page += offset / PAGE_SIZE;
-       for (int i = sd->len; i > 0; i -= PAGE_SIZE)
-               svc_rqst_replace_page(rqstp, page++);
+       last_page = page + (offset + sd->len - 1) / PAGE_SIZE;
+       for (page += offset / PAGE_SIZE; page <= last_page; page++)
+               svc_rqst_replace_page(rqstp, page);
        if (rqstp->rq_res.page_len == 0)        // first call
                rqstp->rq_res.page_base = offset % PAGE_SIZE;
        rqstp->rq_res.page_len += sd->len;
index 77ff8e9..dc359b5 100644 (file)
@@ -495,14 +495,22 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
 int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
 {
        struct buffer_head *bh;
+       void *kaddr;
+       struct nilfs_segment_usage *su;
        int ret;
 
+       down_write(&NILFS_MDT(sufile)->mi_sem);
        ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
        if (!ret) {
                mark_buffer_dirty(bh);
                nilfs_mdt_mark_dirty(sufile);
+               kaddr = kmap_atomic(bh->b_page);
+               su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
+               nilfs_segment_usage_set_dirty(su);
+               kunmap_atomic(kaddr);
                brelse(bh);
        }
+       up_write(&NILFS_MDT(sufile)->mi_sem);
        return ret;
 }
 
index 5101131..4409601 100644 (file)
@@ -115,7 +115,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
 #endif
        show_val_kb(m, "PageTables:     ",
                    global_node_page_state(NR_PAGETABLE));
-       show_val_kb(m, "SecPageTables:  ",
+       show_val_kb(m, "SecPageTables:  ",
                    global_node_page_state(NR_SECONDARY_PAGETABLE));
 
        show_val_kb(m, "NFS_Unstable:   ", 0);
index 860f0b1..2c53fbb 100644 (file)
@@ -41,6 +41,13 @@ static void zonefs_account_active(struct inode *inode)
                return;
 
        /*
+        * For zones that transitioned to the offline or readonly condition,
+        * we only need to clear the active state.
+        */
+       if (zi->i_flags & (ZONEFS_ZONE_OFFLINE | ZONEFS_ZONE_READONLY))
+               goto out;
+
+       /*
         * If the zone is active, that is, if it is explicitly open or
         * partially written, check if it was already accounted as active.
         */
@@ -53,6 +60,7 @@ static void zonefs_account_active(struct inode *inode)
                return;
        }
 
+out:
        /* The zone is not active. If it was, update the active count */
        if (zi->i_flags & ZONEFS_ZONE_ACTIVE) {
                zi->i_flags &= ~ZONEFS_ZONE_ACTIVE;
@@ -324,6 +332,7 @@ static loff_t zonefs_check_zone_condition(struct inode *inode,
                inode->i_flags |= S_IMMUTABLE;
                inode->i_mode &= ~0777;
                zone->wp = zone->start;
+               zi->i_flags |= ZONEFS_ZONE_OFFLINE;
                return 0;
        case BLK_ZONE_COND_READONLY:
                /*
@@ -342,8 +351,10 @@ static loff_t zonefs_check_zone_condition(struct inode *inode,
                        zone->cond = BLK_ZONE_COND_OFFLINE;
                        inode->i_mode &= ~0777;
                        zone->wp = zone->start;
+                       zi->i_flags |= ZONEFS_ZONE_OFFLINE;
                        return 0;
                }
+               zi->i_flags |= ZONEFS_ZONE_READONLY;
                inode->i_mode &= ~0222;
                return i_size_read(inode);
        case BLK_ZONE_COND_FULL:
@@ -478,8 +489,7 @@ static void __zonefs_io_error(struct inode *inode, bool write)
        struct super_block *sb = inode->i_sb;
        struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
        unsigned int noio_flag;
-       unsigned int nr_zones =
-               zi->i_zone_size >> (sbi->s_zone_sectors_shift + SECTOR_SHIFT);
+       unsigned int nr_zones = 1;
        struct zonefs_ioerr_data err = {
                .inode = inode,
                .write = write,
@@ -487,6 +497,15 @@ static void __zonefs_io_error(struct inode *inode, bool write)
        int ret;
 
        /*
+        * The only files that have more than one zone are conventional zone
+        * files with aggregated conventional zones, for which the inode zone
+        * size is always larger than the device zone size.
+        */
+       if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev))
+               nr_zones = zi->i_zone_size >>
+                       (sbi->s_zone_sectors_shift + SECTOR_SHIFT);
+
+       /*
         * Memory allocations in blkdev_report_zones() can trigger a memory
         * reclaim which may in turn cause a recursion into zonefs as well as
         * struct request allocations for the same device. The former case may
@@ -1407,6 +1426,14 @@ static int zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
        zi->i_ztype = type;
        zi->i_zsector = zone->start;
        zi->i_zone_size = zone->len << SECTOR_SHIFT;
+       if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT &&
+           !(sbi->s_features & ZONEFS_F_AGGRCNV)) {
+               zonefs_err(sb,
+                          "zone size %llu doesn't match device's zone sectors %llu\n",
+                          zi->i_zone_size,
+                          bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT);
+               return -EINVAL;
+       }
 
        zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE,
                               zone->capacity << SECTOR_SHIFT);
@@ -1456,11 +1483,11 @@ static struct dentry *zonefs_create_inode(struct dentry *parent,
        struct inode *dir = d_inode(parent);
        struct dentry *dentry;
        struct inode *inode;
-       int ret;
+       int ret = -ENOMEM;
 
        dentry = d_alloc_name(parent, name);
        if (!dentry)
-               return NULL;
+               return ERR_PTR(ret);
 
        inode = new_inode(parent->d_sb);
        if (!inode)
@@ -1485,7 +1512,7 @@ static struct dentry *zonefs_create_inode(struct dentry *parent,
 dput:
        dput(dentry);
 
-       return NULL;
+       return ERR_PTR(ret);
 }
 
 struct zonefs_zone_data {
@@ -1505,7 +1532,7 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
        struct blk_zone *zone, *next, *end;
        const char *zgroup_name;
        char *file_name;
-       struct dentry *dir;
+       struct dentry *dir, *dent;
        unsigned int n = 0;
        int ret;
 
@@ -1523,8 +1550,8 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
                zgroup_name = "seq";
 
        dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type);
-       if (!dir) {
-               ret = -ENOMEM;
+       if (IS_ERR(dir)) {
+               ret = PTR_ERR(dir);
                goto free;
        }
 
@@ -1570,8 +1597,9 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
                 * Use the file number within its group as file name.
                 */
                snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n);
-               if (!zonefs_create_inode(dir, file_name, zone, type)) {
-                       ret = -ENOMEM;
+               dent = zonefs_create_inode(dir, file_name, zone, type);
+               if (IS_ERR(dent)) {
+                       ret = PTR_ERR(dent);
                        goto free;
                }
 
@@ -1905,18 +1933,18 @@ static int __init zonefs_init(void)
        if (ret)
                return ret;
 
-       ret = register_filesystem(&zonefs_type);
+       ret = zonefs_sysfs_init();
        if (ret)
                goto destroy_inodecache;
 
-       ret = zonefs_sysfs_init();
+       ret = register_filesystem(&zonefs_type);
        if (ret)
-               goto unregister_fs;
+               goto sysfs_exit;
 
        return 0;
 
-unregister_fs:
-       unregister_filesystem(&zonefs_type);
+sysfs_exit:
+       zonefs_sysfs_exit();
 destroy_inodecache:
        zonefs_destroy_inodecache();
 
@@ -1925,9 +1953,9 @@ destroy_inodecache:
 
 static void __exit zonefs_exit(void)
 {
+       unregister_filesystem(&zonefs_type);
        zonefs_sysfs_exit();
        zonefs_destroy_inodecache();
-       unregister_filesystem(&zonefs_type);
 }
 
 MODULE_AUTHOR("Damien Le Moal");
index 9cb6755..9920689 100644 (file)
@@ -15,11 +15,6 @@ struct zonefs_sysfs_attr {
        ssize_t (*show)(struct zonefs_sb_info *sbi, char *buf);
 };
 
-static inline struct zonefs_sysfs_attr *to_attr(struct attribute *attr)
-{
-       return container_of(attr, struct zonefs_sysfs_attr, attr);
-}
-
 #define ZONEFS_SYSFS_ATTR_RO(name) \
 static struct zonefs_sysfs_attr zonefs_sysfs_attr_##name = __ATTR_RO(name)
 
index 4b3de66..1dbe781 100644 (file)
@@ -39,8 +39,10 @@ static inline enum zonefs_ztype zonefs_zone_type(struct blk_zone *zone)
        return ZONEFS_ZTYPE_SEQ;
 }
 
-#define ZONEFS_ZONE_OPEN       (1 << 0)
-#define ZONEFS_ZONE_ACTIVE     (1 << 1)
+#define ZONEFS_ZONE_OPEN       (1U << 0)
+#define ZONEFS_ZONE_ACTIVE     (1U << 1)
+#define ZONEFS_ZONE_OFFLINE    (1U << 2)
+#define ZONEFS_ZONE_READONLY   (1U << 3)
 
 /*
  * In-memory inode data.
index 50e358a..891f8cb 100644 (file)
@@ -311,6 +311,13 @@ struct queue_limits {
        unsigned char           discard_misaligned;
        unsigned char           raid_partial_stripes_expensive;
        enum blk_zoned_model    zoned;
+
+       /*
+        * Drivers that set dma_alignment to less than 511 must be prepared to
+        * handle individual bvec's that are not a multiple of a SECTOR_SIZE
+        * due to possible offsets.
+        */
+       unsigned int            dma_alignment;
 };
 
 typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
@@ -456,12 +463,6 @@ struct request_queue {
        unsigned long           nr_requests;    /* Max # of requests */
 
        unsigned int            dma_pad_mask;
-       /*
-        * Drivers that set dma_alignment to less than 511 must be prepared to
-        * handle individual bvec's that are not a multiple of a SECTOR_SIZE
-        * due to possible offsets.
-        */
-       unsigned int            dma_alignment;
 
 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
        struct blk_crypto_profile *crypto_profile;
@@ -944,7 +945,6 @@ extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
-extern void blk_set_default_limits(struct queue_limits *lim);
 extern void blk_set_stacking_limits(struct queue_limits *lim);
 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                            sector_t offset);
@@ -1324,7 +1324,7 @@ static inline sector_t bdev_zone_sectors(struct block_device *bdev)
 
 static inline int queue_dma_alignment(const struct request_queue *q)
 {
-       return q ? q->dma_alignment : 511;
+       return q ? q->limits.dma_alignment : 511;
 }
 
 static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
index 0566705..c1bd1bd 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/bpfptr.h>
 #include <linux/btf.h>
 #include <linux/rcupdate_trace.h>
-#include <linux/init.h>
+#include <linux/static_call.h>
 
 struct bpf_verifier_env;
 struct bpf_verifier_log;
@@ -315,7 +315,7 @@ static inline void __copy_map_value(struct bpf_map *map, void *dst, void *src, b
                u32 next_off = map->off_arr->field_off[i];
 
                memcpy(dst + curr_off, src + curr_off, next_off - curr_off);
-               curr_off += map->off_arr->field_sz[i];
+               curr_off = next_off + map->off_arr->field_sz[i];
        }
        memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off);
 }
@@ -344,7 +344,7 @@ static inline void zero_map_value(struct bpf_map *map, void *dst)
                u32 next_off = map->off_arr->field_off[i];
 
                memset(dst + curr_off, 0, next_off - curr_off);
-               curr_off += map->off_arr->field_sz[i];
+               curr_off = next_off + map->off_arr->field_sz[i];
        }
        memset(dst + curr_off, 0, map->value_size - curr_off);
 }
@@ -954,6 +954,10 @@ struct bpf_dispatcher {
        void *rw_image;
        u32 image_off;
        struct bpf_ksym ksym;
+#ifdef CONFIG_HAVE_STATIC_CALL
+       struct static_call_key *sc_key;
+       void *sc_tramp;
+#endif
 };
 
 static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
@@ -971,7 +975,33 @@ struct bpf_trampoline *bpf_trampoline_get(u64 key,
                                          struct bpf_attach_target_info *tgt_info);
 void bpf_trampoline_put(struct bpf_trampoline *tr);
 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
-int __init bpf_arch_init_dispatcher_early(void *ip);
+
+/*
+ * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
+ * indirection with a direct call to the bpf program. If the architecture does
+ * not have STATIC_CALL, avoid a double-indirection.
+ */
+#ifdef CONFIG_HAVE_STATIC_CALL
+
+#define __BPF_DISPATCHER_SC_INIT(_name)                                \
+       .sc_key = &STATIC_CALL_KEY(_name),                      \
+       .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),
+
+#define __BPF_DISPATCHER_SC(name)                              \
+       DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func)
+
+#define __BPF_DISPATCHER_CALL(name)                            \
+       static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
+
+#define __BPF_DISPATCHER_UPDATE(_d, _new)                      \
+       __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))
+
+#else
+#define __BPF_DISPATCHER_SC_INIT(name)
+#define __BPF_DISPATCHER_SC(name)
+#define __BPF_DISPATCHER_CALL(name)            bpf_func(ctx, insnsi)
+#define __BPF_DISPATCHER_UPDATE(_d, _new)
+#endif
 
 #define BPF_DISPATCHER_INIT(_name) {                           \
        .mutex = __MUTEX_INITIALIZER(_name.mutex),              \
@@ -984,34 +1014,21 @@ int __init bpf_arch_init_dispatcher_early(void *ip);
                .name  = #_name,                                \
                .lnode = LIST_HEAD_INIT(_name.ksym.lnode),      \
        },                                                      \
+       __BPF_DISPATCHER_SC_INIT(_name##_call)                  \
 }
 
-#define BPF_DISPATCHER_INIT_CALL(_name)                                        \
-       static int __init _name##_init(void)                            \
-       {                                                               \
-               return bpf_arch_init_dispatcher_early(_name##_func);    \
-       }                                                               \
-       early_initcall(_name##_init)
-
-#ifdef CONFIG_X86_64
-#define BPF_DISPATCHER_ATTRIBUTES __attribute__((patchable_function_entry(5)))
-#else
-#define BPF_DISPATCHER_ATTRIBUTES
-#endif
-
 #define DEFINE_BPF_DISPATCHER(name)                                    \
-       notrace BPF_DISPATCHER_ATTRIBUTES                               \
+       __BPF_DISPATCHER_SC(name);                                      \
        noinline __nocfi unsigned int bpf_dispatcher_##name##_func(     \
                const void *ctx,                                        \
                const struct bpf_insn *insnsi,                          \
                bpf_func_t bpf_func)                                    \
        {                                                               \
-               return bpf_func(ctx, insnsi);                           \
+               return __BPF_DISPATCHER_CALL(name);                     \
        }                                                               \
        EXPORT_SYMBOL(bpf_dispatcher_##name##_func);                    \
        struct bpf_dispatcher bpf_dispatcher_##name =                   \
-               BPF_DISPATCHER_INIT(bpf_dispatcher_##name);             \
-       BPF_DISPATCHER_INIT_CALL(bpf_dispatcher_##name);
+               BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
 
 #define DECLARE_BPF_DISPATCHER(name)                                   \
        unsigned int bpf_dispatcher_##name##_func(                      \
@@ -1019,6 +1036,7 @@ int __init bpf_arch_init_dispatcher_early(void *ip);
                const struct bpf_insn *insnsi,                          \
                bpf_func_t bpf_func);                                   \
        extern struct bpf_dispatcher bpf_dispatcher_##name;
+
 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
 #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
index 9f6e254..444236d 100644 (file)
@@ -20,7 +20,6 @@ struct fault_attr {
        atomic_t space;
        unsigned long verbose;
        bool task_filter;
-       bool no_warn;
        unsigned long stacktrace_depth;
        unsigned long require_start;
        unsigned long require_end;
@@ -32,6 +31,10 @@ struct fault_attr {
        struct dentry *dname;
 };
 
+enum fault_flags {
+       FAULT_NOWARN =  1 << 0,
+};
+
 #define FAULT_ATTR_INITIALIZER {                                       \
                .interval = 1,                                          \
                .times = ATOMIC_INIT(1),                                \
@@ -40,11 +43,11 @@ struct fault_attr {
                .ratelimit_state = RATELIMIT_STATE_INIT_DISABLED,       \
                .verbose = 2,                                           \
                .dname = NULL,                                          \
-               .no_warn = false,                                       \
        }
 
 #define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
 int setup_fault_attr(struct fault_attr *attr, char *str);
+bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags);
 bool should_fail(struct fault_attr *attr, ssize_t size);
 
 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
index 36e5dd8..8e312c8 100644 (file)
@@ -75,7 +75,7 @@ struct fscache_volume {
        atomic_t                        n_accesses;     /* Number of cache accesses in progress */
        unsigned int                    debug_id;
        unsigned int                    key_hash;       /* Hash of key string */
-       char                            *key;           /* Volume ID, eg. "afs@example.com@1234" */
+       u8                              *key;           /* Volume ID, eg. "afs@example.com@1234" */
        struct list_head                proc_link;      /* Link in /proc/fs/fscache/volumes */
        struct hlist_bl_node            hash_link;      /* Link in hash table */
        struct work_struct              work;
index 43bc8a2..0ded9e2 100644 (file)
@@ -16,6 +16,9 @@ enum io_uring_cmd_flags {
        IO_URING_F_SQE128               = 4,
        IO_URING_F_CQE32                = 8,
        IO_URING_F_IOPOLL               = 16,
+
+       /* the request is executed from poll, it should not be freed */
+       IO_URING_F_MULTISHOT            = 32,
 };
 
 struct io_uring_cmd {
index 18592bd..637a606 100644 (file)
@@ -776,6 +776,7 @@ struct kvm {
        struct srcu_struct srcu;
        struct srcu_struct irq_srcu;
        pid_t userspace_pid;
+       bool override_halt_poll_ns;
        unsigned int max_halt_poll_ns;
        u32 dirty_ring_size;
        bool vm_bugged;
index 7cce390..ad937f5 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef __LICENSE_H
 #define __LICENSE_H
 
+#include <linux/string.h>
+
 static inline int license_is_gpl_compatible(const char *license)
 {
        return (strcmp(license, "GPL") == 0
index af2ceb4..06cbad1 100644 (file)
@@ -981,6 +981,7 @@ struct mlx5_async_work {
        struct mlx5_async_ctx *ctx;
        mlx5_async_cbk_t user_callback;
        u16 opcode; /* cmd opcode */
+       u16 op_mod; /* cmd op_mod */
        void *out; /* pointer to the cmd output buffer */
 };
 
index 2504df9..3c7d295 100644 (file)
@@ -100,7 +100,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
 
 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
-                         struct file *filp, poll_table *poll_table);
+                         struct file *filp, poll_table *poll_table, int full);
 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
 
 #define RING_BUFFER_ALL_CPUS -1
index b5e16e4..80ffda8 100644 (file)
@@ -26,13 +26,13 @@ struct trace_export {
        int flags;
 };
 
+struct trace_array;
+
 #ifdef CONFIG_TRACING
 
 int register_ftrace_export(struct trace_export *export);
 int unregister_ftrace_export(struct trace_export *export);
 
-struct trace_array;
-
 void trace_printk_init_buffers(void);
 __printf(3, 4)
 int trace_array_printk(struct trace_array *tr, unsigned long ip,
index e7cebeb..fdd393f 100644 (file)
@@ -189,6 +189,7 @@ int vfio_register_emulated_iommu_dev(struct vfio_device *device);
 void vfio_unregister_group_dev(struct vfio_device *device);
 
 int vfio_assign_device_set(struct vfio_device *device, void *set_id);
+unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set);
 
 int vfio_mig_get_next_state(struct vfio_device *device,
                            enum vfio_device_mig_state cur_fsm,
index 3af1e92..6917409 100644 (file)
@@ -281,7 +281,8 @@ inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, in
  * sk_v6_rcv_saddr (ipv6) changes after it has been binded. The socket's
  * rcv_saddr field should already have been updated when this is called.
  */
-int inet_bhash2_update_saddr(struct inet_bind_hashbucket *prev_saddr, struct sock *sk);
+int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family);
+void inet_bhash2_reset_saddr(struct sock *sk);
 
 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
                    struct inet_bind2_bucket *tb2, unsigned short port);
index 038097c..144bdfb 100644 (file)
@@ -563,7 +563,7 @@ static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
        BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
                     offsetof(typeof(flow->addrs), v4addrs.src) +
                              sizeof(flow->addrs.v4addrs.src));
-       memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
+       memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs));
        flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
 }
 
index 37943ba..d383c89 100644 (file)
@@ -897,7 +897,7 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
        BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) !=
                     offsetof(typeof(flow->addrs), v6addrs.src) +
                     sizeof(flow->addrs.v6addrs.src));
-       memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs));
+       memcpy(&flow->addrs.v6addrs, &iph->addrs, sizeof(flow->addrs.v6addrs));
        flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
 }
 
index 20745cf..2f2a602 100644 (file)
@@ -83,7 +83,7 @@ struct neigh_parms {
        struct rcu_head rcu_head;
 
        int     reachable_time;
-       int     qlen;
+       u32     qlen;
        int     data[NEIGH_VAR_DATA_MAX];
        DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
 };
index 5db0254..e0517ec 100644 (file)
@@ -323,7 +323,7 @@ struct sk_filter;
   *    @sk_tskey: counter to disambiguate concurrent tstamp requests
   *    @sk_zckey: counter to order MSG_ZEROCOPY notifications
   *    @sk_socket: Identd and reporting IO signals
-  *    @sk_user_data: RPC layer private data
+  *    @sk_user_data: RPC layer private data. Write-protected by @sk_callback_lock.
   *    @sk_frag: cached page frag
   *    @sk_peek_off: current peek_offset value
   *    @sk_send_head: front of stuff to transmit
index 6ce3bd2..5ad7ac2 100644 (file)
 #define        DDR3PHY_PGSR                            (0x0C)          /* DDR3PHY PHY General Status Register */
 #define                DDR3PHY_PGSR_IDONE              (1 << 0)        /* Initialization Done */
 
-#define DDR3PHY_ACIOCR                         (0x24)          /*  DDR3PHY AC I/O Configuration Register */
+#define        DDR3PHY_ACDLLCR                         (0x14)          /* DDR3PHY AC DLL Control Register */
+#define                DDR3PHY_ACDLLCR_DLLSRST         (1 << 30)       /* DLL Soft Reset */
+
+#define DDR3PHY_ACIOCR                         (0x24)          /* DDR3PHY AC I/O Configuration Register */
 #define                DDR3PHY_ACIOCR_CSPDD_CS0        (1 << 18)       /* CS#[0] Power Down Driver */
 #define                DDR3PHY_ACIOCR_CKPDD_CK0        (1 << 8)        /* CK[0] Power Down Driver */
 #define                DDR3PHY_ACIORC_ACPDD            (1 << 3)        /* AC Power Down Driver */
index 83fd81c..9fbd383 100644 (file)
@@ -84,8 +84,8 @@ enum sof_ipc_dai_type {
        SOF_DAI_AMD_BT,                 /**< AMD ACP BT*/
        SOF_DAI_AMD_SP,                 /**< AMD ACP SP */
        SOF_DAI_AMD_DMIC,               /**< AMD ACP DMIC */
-       SOF_DAI_AMD_HS,                 /**< Amd HS */
        SOF_DAI_MEDIATEK_AFE,           /**< Mediatek AFE */
+       SOF_DAI_AMD_HS,                 /**< Amd HS */
 };
 
 /* general purpose DAI configuration */
index 65e86e4..7519385 100644 (file)
@@ -36,6 +36,10 @@ enum sof_ipc_ext_data {
        SOF_IPC_EXT_USER_ABI_INFO       = 4,
 };
 
+/* Build u32 number in format MMmmmppp */
+#define SOF_FW_VER(MAJOR, MINOR, PATCH) ((uint32_t)( \
+       ((MAJOR) << 24) | ((MINOR) << 12) | (PATCH)))
+
 /* FW version - SOF_IPC_GLB_VERSION */
 struct sof_ipc_fw_version {
        struct sof_ipc_hdr hdr;
index 935af49..760455d 100644 (file)
@@ -171,15 +171,15 @@ TRACE_EVENT(mm_collapse_huge_page_swapin,
 
 TRACE_EVENT(mm_khugepaged_scan_file,
 
-       TP_PROTO(struct mm_struct *mm, struct page *page, const char *filename,
+       TP_PROTO(struct mm_struct *mm, struct page *page, struct file *file,
                 int present, int swap, int result),
 
-       TP_ARGS(mm, page, filename, present, swap, result),
+       TP_ARGS(mm, page, file, present, swap, result),
 
        TP_STRUCT__entry(
                __field(struct mm_struct *, mm)
                __field(unsigned long, pfn)
-               __string(filename, filename)
+               __string(filename, file->f_path.dentry->d_iname)
                __field(int, present)
                __field(int, swap)
                __field(int, result)
@@ -188,7 +188,7 @@ TRACE_EVENT(mm_khugepaged_scan_file,
        TP_fast_assign(
                __entry->mm = mm;
                __entry->pfn = page ? page_to_pfn(page) : -1;
-               __assign_str(filename, filename);
+               __assign_str(filename, file->f_path.dentry->d_iname);
                __entry->present = present;
                __entry->swap = swap;
                __entry->result = result;
index 961ec16..874a923 100644 (file)
@@ -100,8 +100,10 @@ struct iphdr {
        __u8    ttl;
        __u8    protocol;
        __sum16 check;
-       __be32  saddr;
-       __be32  daddr;
+       __struct_group(/* no tag */, addrs, /* no attrs */,
+               __be32  saddr;
+               __be32  daddr;
+       );
        /*The options start here. */
 };
 
index 03cdbe7..81f4243 100644 (file)
@@ -130,8 +130,10 @@ struct ipv6hdr {
        __u8                    nexthdr;
        __u8                    hop_limit;
 
-       struct  in6_addr        saddr;
-       struct  in6_addr        daddr;
+       __struct_group(/* no tag */, addrs, /* no attrs */,
+               struct  in6_addr        saddr;
+               struct  in6_addr        daddr;
+       );
 };
 
 
index abf6509..94125d3 100644 (file)
@@ -87,7 +87,7 @@ config CC_HAS_ASM_GOTO_OUTPUT
 config CC_HAS_ASM_GOTO_TIED_OUTPUT
        depends on CC_HAS_ASM_GOTO_OUTPUT
        # Detect buggy gcc and clang, fixed in gcc-11 clang-14.
-       def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .\n": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null)
+       def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null)
 
 config TOOLS_SUPPORT_RELR
        def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh)
index 7b47325..68dfc69 100644 (file)
@@ -101,8 +101,6 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
 err:
        if (needs_switch)
                io_rsrc_node_switch(ctx, ctx->file_data);
-       if (ret)
-               fput(file);
        return ret;
 }
 
index 4a1e482..8840cf3 100644 (file)
@@ -1768,7 +1768,7 @@ int io_poll_issue(struct io_kiocb *req, bool *locked)
        io_tw_lock(req->ctx, locked);
        if (unlikely(req->task->flags & PF_EXITING))
                return -EFAULT;
-       return io_issue_sqe(req, IO_URING_F_NONBLOCK);
+       return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT);
 }
 
 struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
index e99a79f..50bc3af 100644 (file)
@@ -17,8 +17,8 @@ enum {
        IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
 
        /*
-        * Intended only when both REQ_F_POLLED and REQ_F_APOLL_MULTISHOT
-        * are set to indicate to the poll runner that multishot should be
+        * Intended only when both IO_URING_F_MULTISHOT is passed
+        * to indicate to the poll runner that multishot should be
         * removed and the result is set on req->cqe.res.
         */
        IOU_STOP_MULTISHOT      = -ECANCELED,
@@ -238,9 +238,14 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
 
 static inline int io_run_task_work(void)
 {
+       /*
+        * Always check-and-clear the task_work notification signal. With how
+        * signaling works for task_work, we can find it set with nothing to
+        * run. We need to clear it for that case, like get_signal() does.
+        */
+       if (test_thread_flag(TIF_NOTIFY_SIGNAL))
+               clear_notify_signal();
        if (task_work_pending(current)) {
-               if (test_thread_flag(TIF_NOTIFY_SIGNAL))
-                       clear_notify_signal();
                __set_current_state(TASK_RUNNING);
                task_work_run();
                return 1;
index 15dea91..ab83da7 100644 (file)
@@ -67,8 +67,6 @@ struct io_sr_msg {
        struct io_kiocb                 *notif;
 };
 
-#define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
-
 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
@@ -591,7 +589,8 @@ static inline void io_recv_prep_retry(struct io_kiocb *req)
  * again (for multishot).
  */
 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
-                                 unsigned int cflags, bool mshot_finished)
+                                 unsigned int cflags, bool mshot_finished,
+                                 unsigned issue_flags)
 {
        if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
                io_req_set_res(req, *ret, cflags);
@@ -614,7 +613,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
 
        io_req_set_res(req, *ret, cflags);
 
-       if (req->flags & REQ_F_POLLED)
+       if (issue_flags & IO_URING_F_MULTISHOT)
                *ret = IOU_STOP_MULTISHOT;
        else
                *ret = IOU_OK;
@@ -773,8 +772,7 @@ retry_multishot:
        if (ret < min_ret) {
                if (ret == -EAGAIN && force_nonblock) {
                        ret = io_setup_async_msg(req, kmsg, issue_flags);
-                       if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
-                                              IO_APOLL_MULTI_POLLED) {
+                       if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
                                io_kbuf_recycle(req, issue_flags);
                                return IOU_ISSUE_SKIP_COMPLETE;
                        }
@@ -803,7 +801,7 @@ retry_multishot:
        if (kmsg->msg.msg_inq)
                cflags |= IORING_CQE_F_SOCK_NONEMPTY;
 
-       if (!io_recv_finish(req, &ret, cflags, mshot_finished))
+       if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
                goto retry_multishot;
 
        if (mshot_finished) {
@@ -869,7 +867,7 @@ retry_multishot:
        ret = sock_recvmsg(sock, &msg, flags);
        if (ret < min_ret) {
                if (ret == -EAGAIN && force_nonblock) {
-                       if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
+                       if (issue_flags & IO_URING_F_MULTISHOT) {
                                io_kbuf_recycle(req, issue_flags);
                                return IOU_ISSUE_SKIP_COMPLETE;
                        }
@@ -902,7 +900,7 @@ out_free:
        if (msg.msg_inq)
                cflags |= IORING_CQE_F_SOCK_NONEMPTY;
 
-       if (!io_recv_finish(req, &ret, cflags, ret <= 0))
+       if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
                goto retry_multishot;
 
        return ret;
@@ -1289,8 +1287,7 @@ retry:
                         * return EAGAIN to arm the poll infra since it
                         * has already been done
                         */
-                       if ((req->flags & IO_APOLL_MULTI_POLLED) ==
-                           IO_APOLL_MULTI_POLLED)
+                       if (issue_flags & IO_URING_F_MULTISHOT)
                                ret = IOU_ISSUE_SKIP_COMPLETE;
                        return ret;
                }
@@ -1315,9 +1312,7 @@ retry:
                goto retry;
 
        io_req_set_res(req, ret, 0);
-       if (req->flags & REQ_F_POLLED)
-               return IOU_STOP_MULTISHOT;
-       return IOU_OK;
+       return (issue_flags & IO_URING_F_MULTISHOT) ? IOU_STOP_MULTISHOT : IOU_OK;
 }
 
 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
index f500506..d9bf176 100644 (file)
@@ -40,7 +40,14 @@ struct io_poll_table {
 };
 
 #define IO_POLL_CANCEL_FLAG    BIT(31)
-#define IO_POLL_REF_MASK       GENMASK(30, 0)
+#define IO_POLL_RETRY_FLAG     BIT(30)
+#define IO_POLL_REF_MASK       GENMASK(29, 0)
+
+/*
+ * We usually have 1-2 refs taken, 128 is more than enough and we want to
+ * maximise the margin between this amount and the moment when it overflows.
+ */
+#define IO_POLL_REF_BIAS       128
 
 #define IO_WQE_F_DOUBLE                1
 
@@ -58,6 +65,21 @@ static inline bool wqe_is_double(struct wait_queue_entry *wqe)
        return priv & IO_WQE_F_DOUBLE;
 }
 
+static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
+{
+       int v;
+
+       /*
+        * poll_refs are already elevated and we don't have much hope for
+        * grabbing the ownership. Instead of incrementing set a retry flag
+        * to notify the loop that there might have been some change.
+        */
+       v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
+       if (v & IO_POLL_REF_MASK)
+               return false;
+       return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
+}
+
 /*
  * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
  * bump it and acquire ownership. It's disallowed to modify requests while not
@@ -66,6 +88,8 @@ static inline bool wqe_is_double(struct wait_queue_entry *wqe)
  */
 static inline bool io_poll_get_ownership(struct io_kiocb *req)
 {
+       if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
+               return io_poll_get_ownership_slowpath(req);
        return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
 }
 
@@ -228,6 +252,23 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
                        return IOU_POLL_DONE;
                if (v & IO_POLL_CANCEL_FLAG)
                        return -ECANCELED;
+               /*
+                * cqe.res contains only events of the first wake up
+                * and all others are be lost. Redo vfs_poll() to get
+                * up to date state.
+                */
+               if ((v & IO_POLL_REF_MASK) != 1)
+                       req->cqe.res = 0;
+               if (v & IO_POLL_RETRY_FLAG) {
+                       req->cqe.res = 0;
+                       /*
+                        * We won't find new events that came in between
+                        * vfs_poll and the ref put unless we clear the flag
+                        * in advance.
+                        */
+                       atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
+                       v &= ~IO_POLL_RETRY_FLAG;
+               }
 
                /* the mask was stashed in __io_poll_execute */
                if (!req->cqe.res) {
@@ -239,6 +280,8 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
                        continue;
                if (req->apoll_events & EPOLLONESHOT)
                        return IOU_POLL_DONE;
+               if (io_is_uring_fops(req->file))
+                       return IOU_POLL_DONE;
 
                /* multishot, just fill a CQE and proceed */
                if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
@@ -258,11 +301,15 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
                                return ret;
                }
 
+               /* force the next iteration to vfs_poll() */
+               req->cqe.res = 0;
+
                /*
                 * Release all references, retry if someone tried to restart
                 * task_work while we were executing it.
                 */
-       } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));
+       } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) &
+                                       IO_POLL_REF_MASK);
 
        return IOU_POLL_NO_ACTION;
 }
@@ -506,7 +553,6 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
                                 unsigned issue_flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       int v;
 
        INIT_HLIST_NODE(&req->hash_node);
        req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
@@ -574,11 +620,10 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
 
        if (ipt->owning) {
                /*
-                * Release ownership. If someone tried to queue a tw while it was
-                * locked, kick it off for them.
+                * Try to release ownership. If we see a change of state, e.g.
+                * poll was waken up, queue up a tw, it'll deal with it.
                 */
-               v = atomic_dec_return(&req->poll_refs);
-               if (unlikely(v & IO_POLL_REF_MASK))
+               if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
                        __io_poll_execute(req, 0);
        }
        return 0;
index 7d86f05..bd2fcc4 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -275,10 +275,8 @@ static inline void shm_rmid(struct shmid_kernel *s)
 }
 
 
-static int __shm_open(struct vm_area_struct *vma)
+static int __shm_open(struct shm_file_data *sfd)
 {
-       struct file *file = vma->vm_file;
-       struct shm_file_data *sfd = shm_file_data(file);
        struct shmid_kernel *shp;
 
        shp = shm_lock(sfd->ns, sfd->id);
@@ -302,7 +300,15 @@ static int __shm_open(struct vm_area_struct *vma)
 /* This is called by fork, once for every shm attach. */
 static void shm_open(struct vm_area_struct *vma)
 {
-       int err = __shm_open(vma);
+       struct file *file = vma->vm_file;
+       struct shm_file_data *sfd = shm_file_data(file);
+       int err;
+
+       /* Always call underlying open if present */
+       if (sfd->vm_ops->open)
+               sfd->vm_ops->open(vma);
+
+       err = __shm_open(sfd);
        /*
         * We raced in the idr lookup or with shm_destroy().
         * Either way, the ID is busted.
@@ -359,10 +365,8 @@ static bool shm_may_destroy(struct shmid_kernel *shp)
  * The descriptor has already been removed from the current->mm->mmap list
  * and will later be kfree()d.
  */
-static void shm_close(struct vm_area_struct *vma)
+static void __shm_close(struct shm_file_data *sfd)
 {
-       struct file *file = vma->vm_file;
-       struct shm_file_data *sfd = shm_file_data(file);
        struct shmid_kernel *shp;
        struct ipc_namespace *ns = sfd->ns;
 
@@ -388,6 +392,18 @@ done:
        up_write(&shm_ids(ns).rwsem);
 }
 
+static void shm_close(struct vm_area_struct *vma)
+{
+       struct file *file = vma->vm_file;
+       struct shm_file_data *sfd = shm_file_data(file);
+
+       /* Always call underlying close if present */
+       if (sfd->vm_ops->close)
+               sfd->vm_ops->close(vma);
+
+       __shm_close(sfd);
+}
+
 /* Called with ns->shm_ids(ns).rwsem locked */
 static int shm_try_destroy_orphaned(int id, void *p, void *data)
 {
@@ -583,13 +599,13 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
         * IPC ID that was removed, and possibly even reused by another shm
         * segment already.  Propagate this case as an error to caller.
         */
-       ret = __shm_open(vma);
+       ret = __shm_open(sfd);
        if (ret)
                return ret;
 
        ret = call_mmap(sfd->file, vma);
        if (ret) {
-               shm_close(vma);
+               __shm_close(sfd);
                return ret;
        }
        sfd->vm_ops = vma->vm_ops;
index 04f0a04..c19719f 100644 (file)
@@ -4,7 +4,7 @@
 #include <linux/hash.h>
 #include <linux/bpf.h>
 #include <linux/filter.h>
-#include <linux/init.h>
+#include <linux/static_call.h>
 
 /* The BPF dispatcher is a multiway branch code generator. The
  * dispatcher is a mechanism to avoid the performance penalty of an
@@ -91,11 +91,6 @@ int __weak arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int n
        return -ENOTSUPP;
 }
 
-int __weak __init bpf_arch_init_dispatcher_early(void *ip)
-{
-       return -ENOTSUPP;
-}
-
 static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image, void *buf)
 {
        s64 ips[BPF_DISPATCHER_MAX] = {}, *ipsp = &ips[0];
@@ -110,17 +105,11 @@ static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image, void *b
 
 static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
 {
-       void *old, *new, *tmp;
-       u32 noff;
-       int err;
-
-       if (!prev_num_progs) {
-               old = NULL;
-               noff = 0;
-       } else {
-               old = d->image + d->image_off;
+       void *new, *tmp;
+       u32 noff = 0;
+
+       if (prev_num_progs)
                noff = d->image_off ^ (PAGE_SIZE / 2);
-       }
 
        new = d->num_progs ? d->image + noff : NULL;
        tmp = d->num_progs ? d->rw_image + noff : NULL;
@@ -134,11 +123,10 @@ static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
                        return;
        }
 
-       err = bpf_arch_text_poke(d->func, BPF_MOD_JUMP, old, new);
-       if (err || !new)
-               return;
+       __BPF_DISPATCHER_UPDATE(d, new ?: (void *)&bpf_dispatcher_nop_func);
 
-       d->image_off = noff;
+       if (new)
+               d->image_off = noff;
 }
 
 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
index b6e7f5c..034cf87 100644 (file)
@@ -100,22 +100,21 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
                            u32 nr_elems)
 {
        struct pcpu_freelist_head *head;
-       int i, cpu, pcpu_entries;
+       unsigned int cpu, cpu_idx, i, j, n, m;
 
-       pcpu_entries = nr_elems / num_possible_cpus() + 1;
-       i = 0;
+       n = nr_elems / num_possible_cpus();
+       m = nr_elems % num_possible_cpus();
 
+       cpu_idx = 0;
        for_each_possible_cpu(cpu) {
-again:
                head = per_cpu_ptr(s->freelist, cpu);
-               /* No locking required as this is not visible yet. */
-               pcpu_freelist_push_node(head, buf);
-               i++;
-               buf += elem_size;
-               if (i == nr_elems)
-                       break;
-               if (i % pcpu_entries)
-                       goto again;
+               j = n + (cpu_idx < m ? 1 : 0);
+               for (i = 0; i < j; i++) {
+                       /* No locking required as this is not visible yet. */
+                       pcpu_freelist_push_node(head, buf);
+                       buf += elem_size;
+               }
+               cpu_idx++;
        }
 }
 
index 2256663..264b3dc 100644 (file)
@@ -6745,11 +6745,11 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
        /* Transfer references to the callee */
        err = copy_reference_state(callee, caller);
        if (err)
-               return err;
+               goto err_out;
 
        err = set_callee_state_cb(env, caller, callee, *insn_idx);
        if (err)
-               return err;
+               goto err_out;
 
        clear_caller_saved_regs(env, caller->regs);
 
@@ -6766,6 +6766,11 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
                print_verifier_state(env, callee, true);
        }
        return 0;
+
+err_out:
+       free_func_state(callee);
+       state->frame[state->curframe + 1] = NULL;
+       return err;
 }
 
 int map_set_for_each_callback_args(struct bpf_verifier_env *env,
@@ -6979,8 +6984,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
                return -EINVAL;
        }
 
-       state->curframe--;
-       caller = state->frame[state->curframe];
+       caller = state->frame[state->curframe - 1];
        if (callee->in_callback_fn) {
                /* enforce R0 return value range [0, 1]. */
                struct tnum range = callee->callback_ret_range;
@@ -7019,7 +7023,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
        }
        /* clear everything in the callee */
        free_func_state(callee);
-       state->frame[state->curframe + 1] = NULL;
+       state->frame[state->curframe--] = NULL;
        return 0;
 }
 
index 4ec3717..9d15d2d 100644 (file)
@@ -9273,6 +9273,19 @@ int perf_event_account_interrupt(struct perf_event *event)
        return __perf_event_account_interrupt(event, 1);
 }
 
+static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
+{
+       /*
+        * Due to interrupt latency (AKA "skid"), we may enter the
+        * kernel before taking an overflow, even if the PMU is only
+        * counting user events.
+        */
+       if (event->attr.exclude_kernel && !user_mode(regs))
+               return false;
+
+       return true;
+}
+
 /*
  * Generic event overflow handling, sampling.
  */
@@ -9307,15 +9320,38 @@ static int __perf_event_overflow(struct perf_event *event,
 
        if (event->attr.sigtrap) {
                /*
-                * Should not be able to return to user space without processing
-                * pending_sigtrap (kernel events can overflow multiple times).
+                * The desired behaviour of sigtrap vs invalid samples is a bit
+                * tricky; on the one hand, one should not loose the SIGTRAP if
+                * it is the first event, on the other hand, we should also not
+                * trigger the WARN or override the data address.
                 */
-               WARN_ON_ONCE(event->pending_sigtrap && event->attr.exclude_kernel);
+               bool valid_sample = sample_is_allowed(event, regs);
+               unsigned int pending_id = 1;
+
+               if (regs)
+                       pending_id = hash32_ptr((void *)instruction_pointer(regs)) ?: 1;
                if (!event->pending_sigtrap) {
-                       event->pending_sigtrap = 1;
+                       event->pending_sigtrap = pending_id;
                        local_inc(&event->ctx->nr_pending);
+               } else if (event->attr.exclude_kernel && valid_sample) {
+                       /*
+                        * Should not be able to return to user space without
+                        * consuming pending_sigtrap; with exceptions:
+                        *
+                        *  1. Where !exclude_kernel, events can overflow again
+                        *     in the kernel without returning to user space.
+                        *
+                        *  2. Events that can overflow again before the IRQ-
+                        *     work without user space progress (e.g. hrtimer).
+                        *     To approximate progress (with false negatives),
+                        *     check 32-bit hash of the current IP.
+                        */
+                       WARN_ON_ONCE(event->pending_sigtrap != pending_id);
                }
-               event->pending_addr = data->addr;
+
+               event->pending_addr = 0;
+               if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
+                       event->pending_addr = data->addr;
                irq_work_queue(&event->pending_irq);
        }
 
index cbb0bed..7670a81 100644 (file)
@@ -280,6 +280,8 @@ void gcov_info_add(struct gcov_info *dst, struct gcov_info *src)
 
                for (i = 0; i < sfn_ptr->num_counters; i++)
                        dfn_ptr->counters[i] += sfn_ptr->counters[i];
+
+               sfn_ptr = list_next_entry(sfn_ptr, head);
        }
 }
 
index cd9f5a6..3050631 100644 (file)
@@ -1766,7 +1766,13 @@ static int __unregister_kprobe_top(struct kprobe *p)
                                if ((list_p != p) && (list_p->post_handler))
                                        goto noclean;
                        }
-                       ap->post_handler = NULL;
+                       /*
+                        * For the kprobe-on-ftrace case, we keep the
+                        * post_handler setting to identify this aggrprobe
+                        * armed with kprobe_ipmodify_ops.
+                        */
+                       if (!kprobe_ftrace(ap))
+                               ap->post_handler = NULL;
                }
 noclean:
                /*
index bda8175..d38ab94 100644 (file)
@@ -171,12 +171,27 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
        return 0;
 }
 
+static bool rseq_warn_flags(const char *str, u32 flags)
+{
+       u32 test_flags;
+
+       if (!flags)
+               return false;
+       test_flags = flags & RSEQ_CS_NO_RESTART_FLAGS;
+       if (test_flags)
+               pr_warn_once("Deprecated flags (%u) in %s ABI structure", test_flags, str);
+       test_flags = flags & ~RSEQ_CS_NO_RESTART_FLAGS;
+       if (test_flags)
+               pr_warn_once("Unknown flags (%u) in %s ABI structure", test_flags, str);
+       return true;
+}
+
 static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
 {
        u32 flags, event_mask;
        int ret;
 
-       if (WARN_ON_ONCE(cs_flags & RSEQ_CS_NO_RESTART_FLAGS) || cs_flags)
+       if (rseq_warn_flags("rseq_cs", cs_flags))
                return -EINVAL;
 
        /* Get thread flags. */
@@ -184,7 +199,7 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
        if (ret)
                return ret;
 
-       if (WARN_ON_ONCE(flags & RSEQ_CS_NO_RESTART_FLAGS) || flags)
+       if (rseq_warn_flags("rseq", flags))
                return -EINVAL;
 
        /*
index cb2aa2b..daff72f 100644 (file)
@@ -4200,6 +4200,40 @@ out:
        return success;
 }
 
+static bool __task_needs_rq_lock(struct task_struct *p)
+{
+       unsigned int state = READ_ONCE(p->__state);
+
+       /*
+        * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
+        * the task is blocked. Make sure to check @state since ttwu() can drop
+        * locks at the end, see ttwu_queue_wakelist().
+        */
+       if (state == TASK_RUNNING || state == TASK_WAKING)
+               return true;
+
+       /*
+        * Ensure we load p->on_rq after p->__state, otherwise it would be
+        * possible to, falsely, observe p->on_rq == 0.
+        *
+        * See try_to_wake_up() for a longer comment.
+        */
+       smp_rmb();
+       if (p->on_rq)
+               return true;
+
+#ifdef CONFIG_SMP
+       /*
+        * Ensure the task has finished __schedule() and will not be referenced
+        * anymore. Again, see try_to_wake_up() for a longer comment.
+        */
+       smp_rmb();
+       smp_cond_load_acquire(&p->on_cpu, !VAL);
+#endif
+
+       return false;
+}
+
 /**
  * task_call_func - Invoke a function on task in fixed state
  * @p: Process for which the function is to be invoked, can be @current.
@@ -4217,28 +4251,12 @@ out:
 int task_call_func(struct task_struct *p, task_call_f func, void *arg)
 {
        struct rq *rq = NULL;
-       unsigned int state;
        struct rq_flags rf;
        int ret;
 
        raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
 
-       state = READ_ONCE(p->__state);
-
-       /*
-        * Ensure we load p->on_rq after p->__state, otherwise it would be
-        * possible to, falsely, observe p->on_rq == 0.
-        *
-        * See try_to_wake_up() for a longer comment.
-        */
-       smp_rmb();
-
-       /*
-        * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
-        * the task is blocked. Make sure to check @state since ttwu() can drop
-        * locks at the end, see ttwu_queue_wakelist().
-        */
-       if (state == TASK_RUNNING || state == TASK_WAKING || p->on_rq)
+       if (__task_needs_rq_lock(p))
                rq = __task_rq_lock(p, &rf);
 
        /*
index 9161d11..1207c78 100644 (file)
@@ -25,9 +25,6 @@ struct sugov_policy {
        unsigned int            next_freq;
        unsigned int            cached_raw_freq;
 
-       /* max CPU capacity, which is equal for all CPUs in freq. domain */
-       unsigned long           max;
-
        /* The next fields are only needed if fast switch cannot be used: */
        struct                  irq_work irq_work;
        struct                  kthread_work work;
@@ -51,6 +48,7 @@ struct sugov_cpu {
 
        unsigned long           util;
        unsigned long           bw_dl;
+       unsigned long           max;
 
        /* The field below is for single-CPU policies only: */
 #ifdef CONFIG_NO_HZ_COMMON
@@ -160,6 +158,7 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
 {
        struct rq *rq = cpu_rq(sg_cpu->cpu);
 
+       sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
        sg_cpu->bw_dl = cpu_bw_dl(rq);
        sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu),
                                          FREQUENCY_UTIL, NULL);
@@ -254,7 +253,6 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
  */
 static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
 {
-       struct sugov_policy *sg_policy = sg_cpu->sg_policy;
        unsigned long boost;
 
        /* No boost currently required */
@@ -282,8 +280,7 @@ static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
         * sg_cpu->util is already in capacity scale; convert iowait_boost
         * into the same scale so we can compare.
         */
-       boost = sg_cpu->iowait_boost * sg_policy->max;
-       boost >>= SCHED_CAPACITY_SHIFT;
+       boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT;
        boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL);
        if (sg_cpu->util < boost)
                sg_cpu->util = boost;
@@ -340,7 +337,7 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
        if (!sugov_update_single_common(sg_cpu, time, flags))
                return;
 
-       next_f = get_next_freq(sg_policy, sg_cpu->util, sg_policy->max);
+       next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max);
        /*
         * Do not reduce the frequency if the CPU has not been idle
         * recently, as the reduction is likely to be premature then.
@@ -376,7 +373,6 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
                                     unsigned int flags)
 {
        struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
-       struct sugov_policy *sg_policy = sg_cpu->sg_policy;
        unsigned long prev_util = sg_cpu->util;
 
        /*
@@ -403,8 +399,7 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
                sg_cpu->util = prev_util;
 
        cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
-                                  map_util_perf(sg_cpu->util),
-                                  sg_policy->max);
+                                  map_util_perf(sg_cpu->util), sg_cpu->max);
 
        sg_cpu->sg_policy->last_freq_update_time = time;
 }
@@ -413,19 +408,25 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
 {
        struct sugov_policy *sg_policy = sg_cpu->sg_policy;
        struct cpufreq_policy *policy = sg_policy->policy;
-       unsigned long util = 0;
+       unsigned long util = 0, max = 1;
        unsigned int j;
 
        for_each_cpu(j, policy->cpus) {
                struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
+               unsigned long j_util, j_max;
 
                sugov_get_util(j_sg_cpu);
                sugov_iowait_apply(j_sg_cpu, time);
+               j_util = j_sg_cpu->util;
+               j_max = j_sg_cpu->max;
 
-               util = max(j_sg_cpu->util, util);
+               if (j_util * max > j_max * util) {
+                       util = j_util;
+                       max = j_max;
+               }
        }
 
-       return get_next_freq(sg_policy, util, sg_policy->max);
+       return get_next_freq(sg_policy, util, max);
 }
 
 static void
@@ -751,7 +752,7 @@ static int sugov_start(struct cpufreq_policy *policy)
 {
        struct sugov_policy *sg_policy = policy->governor_data;
        void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
-       unsigned int cpu = cpumask_first(policy->cpus);
+       unsigned int cpu;
 
        sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
        sg_policy->last_freq_update_time        = 0;
@@ -759,7 +760,6 @@ static int sugov_start(struct cpufreq_policy *policy)
        sg_policy->work_in_progress             = false;
        sg_policy->limits_changed               = false;
        sg_policy->cached_raw_freq              = 0;
-       sg_policy->max                          = arch_scale_cpu_capacity(cpu);
 
        sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
 
index 7dc0236..3323624 100644 (file)
@@ -1289,6 +1289,7 @@ static int ftrace_add_mod(struct trace_array *tr,
        if (!ftrace_mod)
                return -ENOMEM;
 
+       INIT_LIST_HEAD(&ftrace_mod->list);
        ftrace_mod->func = kstrdup(func, GFP_KERNEL);
        ftrace_mod->module = kstrdup(module, GFP_KERNEL);
        ftrace_mod->enable = enable;
@@ -3190,7 +3191,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
                /* if we can't allocate this size, try something smaller */
                if (!order)
                        return -ENOMEM;
-               order >>= 1;
+               order--;
                goto again;
        }
 
@@ -7391,7 +7392,7 @@ void __init ftrace_init(void)
        }
 
        pr_info("ftrace: allocating %ld entries in %ld pages\n",
-               count, count / ENTRIES_PER_PAGE + 1);
+               count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
 
        ret = ftrace_process_locs(NULL,
                                  __start_mcount_loc,
index d81f7c5..c736487 100644 (file)
@@ -73,6 +73,10 @@ static struct trace_event_file *gen_kretprobe_test;
 #define KPROBE_GEN_TEST_ARG3   NULL
 #endif
 
+static bool trace_event_file_is_valid(struct trace_event_file *input)
+{
+       return input && !IS_ERR(input);
+}
 
 /*
  * Test to make sure we can create a kprobe event, then add more
@@ -139,6 +143,8 @@ static int __init test_gen_kprobe_cmd(void)
        kfree(buf);
        return ret;
  delete:
+       if (trace_event_file_is_valid(gen_kprobe_test))
+               gen_kprobe_test = NULL;
        /* We got an error after creating the event, delete it */
        ret = kprobe_event_delete("gen_kprobe_test");
        goto out;
@@ -202,6 +208,8 @@ static int __init test_gen_kretprobe_cmd(void)
        kfree(buf);
        return ret;
  delete:
+       if (trace_event_file_is_valid(gen_kretprobe_test))
+               gen_kretprobe_test = NULL;
        /* We got an error after creating the event, delete it */
        ret = kprobe_event_delete("gen_kretprobe_test");
        goto out;
@@ -217,10 +225,12 @@ static int __init kprobe_event_gen_test_init(void)
 
        ret = test_gen_kretprobe_cmd();
        if (ret) {
-               WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
-                                                 "kprobes",
-                                                 "gen_kretprobe_test", false));
-               trace_put_event_file(gen_kretprobe_test);
+               if (trace_event_file_is_valid(gen_kretprobe_test)) {
+                       WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
+                                                         "kprobes",
+                                                         "gen_kretprobe_test", false));
+                       trace_put_event_file(gen_kretprobe_test);
+               }
                WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
        }
 
@@ -229,24 +239,30 @@ static int __init kprobe_event_gen_test_init(void)
 
 static void __exit kprobe_event_gen_test_exit(void)
 {
-       /* Disable the event or you can't remove it */
-       WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
-                                         "kprobes",
-                                         "gen_kprobe_test", false));
+       if (trace_event_file_is_valid(gen_kprobe_test)) {
+               /* Disable the event or you can't remove it */
+               WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
+                                                 "kprobes",
+                                                 "gen_kprobe_test", false));
+
+               /* Now give the file and instance back */
+               trace_put_event_file(gen_kprobe_test);
+       }
 
-       /* Now give the file and instance back */
-       trace_put_event_file(gen_kprobe_test);
 
        /* Now unregister and free the event */
        WARN_ON(kprobe_event_delete("gen_kprobe_test"));
 
-       /* Disable the event or you can't remove it */
-       WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
-                                         "kprobes",
-                                         "gen_kretprobe_test", false));
+       if (trace_event_file_is_valid(gen_kretprobe_test)) {
+               /* Disable the event or you can't remove it */
+               WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
+                                                 "kprobes",
+                                                 "gen_kretprobe_test", false));
+
+               /* Now give the file and instance back */
+               trace_put_event_file(gen_kretprobe_test);
+       }
 
-       /* Now give the file and instance back */
-       trace_put_event_file(gen_kretprobe_test);
 
        /* Now unregister and free the event */
        WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
index c69d822..32c3dfd 100644 (file)
@@ -83,8 +83,10 @@ struct rethook *rethook_alloc(void *data, rethook_handler_t handler)
 {
        struct rethook *rh = kzalloc(sizeof(struct rethook), GFP_KERNEL);
 
-       if (!rh || !handler)
+       if (!rh || !handler) {
+               kfree(rh);
                return NULL;
+       }
 
        rh->data = data;
        rh->handler = handler;
index 9712083..b21bf14 100644 (file)
@@ -519,6 +519,7 @@ struct ring_buffer_per_cpu {
        local_t                         committing;
        local_t                         commits;
        local_t                         pages_touched;
+       local_t                         pages_lost;
        local_t                         pages_read;
        long                            last_pages_touch;
        size_t                          shortest_full;
@@ -894,10 +895,18 @@ size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
 {
        size_t read;
+       size_t lost;
        size_t cnt;
 
        read = local_read(&buffer->buffers[cpu]->pages_read);
+       lost = local_read(&buffer->buffers[cpu]->pages_lost);
        cnt = local_read(&buffer->buffers[cpu]->pages_touched);
+
+       if (WARN_ON_ONCE(cnt < lost))
+               return 0;
+
+       cnt -= lost;
+
        /* The reader can read an empty page, but not more than that */
        if (cnt < read) {
                WARN_ON_ONCE(read > cnt + 1);
@@ -907,6 +916,21 @@ size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
        return cnt - read;
 }
 
+static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
+{
+       struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+       size_t nr_pages;
+       size_t dirty;
+
+       nr_pages = cpu_buffer->nr_pages;
+       if (!nr_pages || !full)
+               return true;
+
+       dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
+
+       return (dirty * 100) > (full * nr_pages);
+}
+
 /*
  * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
  *
@@ -1046,22 +1070,20 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
                    !ring_buffer_empty_cpu(buffer, cpu)) {
                        unsigned long flags;
                        bool pagebusy;
-                       size_t nr_pages;
-                       size_t dirty;
+                       bool done;
 
                        if (!full)
                                break;
 
                        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
                        pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
-                       nr_pages = cpu_buffer->nr_pages;
-                       dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
+                       done = !pagebusy && full_hit(buffer, cpu, full);
+
                        if (!cpu_buffer->shortest_full ||
                            cpu_buffer->shortest_full > full)
                                cpu_buffer->shortest_full = full;
                        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-                       if (!pagebusy &&
-                           (!nr_pages || (dirty * 100) > full * nr_pages))
+                       if (done)
                                break;
                }
 
@@ -1087,6 +1109,7 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
  * @cpu: the cpu buffer to wait on
  * @filp: the file descriptor
  * @poll_table: The poll descriptor
+ * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
  *
  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
  * as data is added to any of the @buffer's cpu buffers. Otherwise
@@ -1096,14 +1119,15 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
  * zero otherwise.
  */
 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
-                         struct file *filp, poll_table *poll_table)
+                         struct file *filp, poll_table *poll_table, int full)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct rb_irq_work *work;
 
-       if (cpu == RING_BUFFER_ALL_CPUS)
+       if (cpu == RING_BUFFER_ALL_CPUS) {
                work = &buffer->irq_work;
-       else {
+               full = 0;
+       } else {
                if (!cpumask_test_cpu(cpu, buffer->cpumask))
                        return -EINVAL;
 
@@ -1111,8 +1135,14 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
                work = &cpu_buffer->irq_work;
        }
 
-       poll_wait(filp, &work->waiters, poll_table);
-       work->waiters_pending = true;
+       if (full) {
+               poll_wait(filp, &work->full_waiters, poll_table);
+               work->full_waiters_pending = true;
+       } else {
+               poll_wait(filp, &work->waiters, poll_table);
+               work->waiters_pending = true;
+       }
+
        /*
         * There's a tight race between setting the waiters_pending and
         * checking if the ring buffer is empty.  Once the waiters_pending bit
@@ -1128,6 +1158,9 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
         */
        smp_mb();
 
+       if (full)
+               return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;
+
        if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
            (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
                return EPOLLIN | EPOLLRDNORM;
@@ -1769,9 +1802,9 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
 
        free_buffer_page(cpu_buffer->reader_page);
 
-       rb_head_page_deactivate(cpu_buffer);
-
        if (head) {
+               rb_head_page_deactivate(cpu_buffer);
+
                list_for_each_entry_safe(bpage, tmp, head, list) {
                        list_del_init(&bpage->list);
                        free_buffer_page(bpage);
@@ -2007,6 +2040,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
                         */
                        local_add(page_entries, &cpu_buffer->overrun);
                        local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
+                       local_inc(&cpu_buffer->pages_lost);
                }
 
                /*
@@ -2491,6 +2525,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
                 */
                local_add(entries, &cpu_buffer->overrun);
                local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
+               local_inc(&cpu_buffer->pages_lost);
 
                /*
                 * The entries will be zeroed out when we move the
@@ -3155,10 +3190,6 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
 static __always_inline void
 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
 {
-       size_t nr_pages;
-       size_t dirty;
-       size_t full;
-
        if (buffer->irq_work.waiters_pending) {
                buffer->irq_work.waiters_pending = false;
                /* irq_work_queue() supplies it's own memory barriers */
@@ -3182,10 +3213,7 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
 
        cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
 
-       full = cpu_buffer->shortest_full;
-       nr_pages = cpu_buffer->nr_pages;
-       dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
-       if (full && nr_pages && (dirty * 100) <= full * nr_pages)
+       if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
                return;
 
        cpu_buffer->irq_work.wakeup_full = true;
@@ -5248,6 +5276,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
        local_set(&cpu_buffer->committing, 0);
        local_set(&cpu_buffer->commits, 0);
        local_set(&cpu_buffer->pages_touched, 0);
+       local_set(&cpu_buffer->pages_lost, 0);
        local_set(&cpu_buffer->pages_read, 0);
        cpu_buffer->last_pages_touch = 0;
        cpu_buffer->shortest_full = 0;
index 0b15e97..8d77526 100644 (file)
@@ -120,15 +120,13 @@ static int __init test_gen_synth_cmd(void)
 
        /* Now generate a gen_synth_test event */
        ret = synth_event_trace_array(gen_synth_test, vals, ARRAY_SIZE(vals));
- out:
+ free:
+       kfree(buf);
        return ret;
  delete:
        /* We got an error after creating the event, delete it */
        synth_event_delete("gen_synth_test");
- free:
-       kfree(buf);
-
-       goto out;
+       goto free;
 }
 
 /*
@@ -227,15 +225,13 @@ static int __init test_empty_synth_event(void)
 
        /* Now trace an empty_synth_test event */
        ret = synth_event_trace_array(empty_synth_test, vals, ARRAY_SIZE(vals));
- out:
+ free:
+       kfree(buf);
        return ret;
  delete:
        /* We got an error after creating the event, delete it */
        synth_event_delete("empty_synth_test");
- free:
-       kfree(buf);
-
-       goto out;
+       goto free;
 }
 
 static struct synth_field_desc create_synth_test_fields[] = {
index 47a44b0..a7fe0e1 100644 (file)
@@ -6657,6 +6657,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
        mutex_unlock(&trace_types_lock);
 
        free_cpumask_var(iter->started);
+       kfree(iter->fmt);
        mutex_destroy(&iter->mutex);
        kfree(iter);
 
@@ -6681,7 +6682,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl
                return EPOLLIN | EPOLLRDNORM;
        else
                return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
-                                            filp, poll_table);
+                                            filp, poll_table, iter->tr->buffer_percent);
 }
 
 static __poll_t
@@ -7802,6 +7803,7 @@ static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
                                                   int len)
 {
        struct tracing_log_err *err;
+       char *cmd;
 
        if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
                err = alloc_tracing_log_err(len);
@@ -7810,12 +7812,12 @@ static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
 
                return err;
        }
-
+       cmd = kzalloc(len, GFP_KERNEL);
+       if (!cmd)
+               return ERR_PTR(-ENOMEM);
        err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
        kfree(err->cmd);
-       err->cmd = kzalloc(len, GFP_KERNEL);
-       if (!err->cmd)
-               return ERR_PTR(-ENOMEM);
+       err->cmd = cmd;
        list_del(&err->list);
 
        return err;
index 5dd0617..352b65e 100644 (file)
@@ -52,6 +52,7 @@ static void trace_event_probe_cleanup(struct trace_eprobe *ep)
        kfree(ep->event_system);
        if (ep->event)
                trace_event_put_ref(ep->event);
+       kfree(ep->filter_str);
        kfree(ep);
 }
 
@@ -563,6 +564,9 @@ static void eprobe_trigger_func(struct event_trigger_data *data,
 {
        struct eprobe_data *edata = data->private_data;
 
+       if (unlikely(!rec))
+               return;
+
        __eprobe_trace_func(edata, rec);
 }
 
@@ -642,7 +646,7 @@ new_eprobe_trigger(struct trace_eprobe *ep, struct trace_event_file *file)
        INIT_LIST_HEAD(&trigger->list);
 
        if (ep->filter_str) {
-               ret = create_event_filter(file->tr, file->event_call,
+               ret = create_event_filter(file->tr, ep->event,
                                        ep->filter_str, false, &filter);
                if (ret)
                        goto error;
@@ -900,7 +904,7 @@ static int trace_eprobe_tp_update_arg(struct trace_eprobe *ep, const char *argv[
 
 static int trace_eprobe_parse_filter(struct trace_eprobe *ep, int argc, const char *argv[])
 {
-       struct event_filter *dummy;
+       struct event_filter *dummy = NULL;
        int i, ret, len = 0;
        char *p;
 
index e310052..29fbfb2 100644 (file)
@@ -828,10 +828,9 @@ static int register_synth_event(struct synth_event *event)
        }
 
        ret = set_synth_event_print_fmt(call);
-       if (ret < 0) {
+       /* unregister_trace_event() will be called inside */
+       if (ret < 0)
                trace_remove_event_call(call);
-               goto err;
-       }
  out:
        return ret;
  err:
index b69e207..942ddbd 100644 (file)
@@ -201,8 +201,6 @@ print_syscall_exit(struct trace_iterator *iter, int flags,
        return trace_handle_return(s);
 }
 
-extern char *__bad_type_size(void);
-
 #define SYSCALL_FIELD(_type, _name) {                                  \
        .type = #_type, .name = #_name,                                 \
        .size = sizeof(_type), .align = __alignof__(_type),             \
index c3c0b07..a100541 100644 (file)
@@ -2107,6 +2107,7 @@ config KPROBES_SANITY_TEST
        depends on DEBUG_KERNEL
        depends on KPROBES
        depends on KUNIT
+       select STACKTRACE if ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
        default KUNIT_ALL_TESTS
        help
          This option provides for testing basic kprobes functionality on
index 96e092d..adb2f93 100644 (file)
@@ -41,9 +41,6 @@ EXPORT_SYMBOL_GPL(setup_fault_attr);
 
 static void fail_dump(struct fault_attr *attr)
 {
-       if (attr->no_warn)
-               return;
-
        if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) {
                printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
                       "name %pd, interval %lu, probability %lu, "
@@ -103,7 +100,7 @@ static inline bool fail_stacktrace(struct fault_attr *attr)
  * http://www.nongnu.org/failmalloc/
  */
 
-bool should_fail(struct fault_attr *attr, ssize_t size)
+bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
 {
        if (in_task()) {
                unsigned int fail_nth = READ_ONCE(current->fail_nth);
@@ -146,13 +143,19 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
                return false;
 
 fail:
-       fail_dump(attr);
+       if (!(flags & FAULT_NOWARN))
+               fail_dump(attr);
 
        if (atomic_read(&attr->times) != -1)
                atomic_dec_not_zero(&attr->times);
 
        return true;
 }
+
+bool should_fail(struct fault_attr *attr, ssize_t size)
+{
+       return should_fail_ex(attr, size, 0);
+}
 EXPORT_SYMBOL_GPL(should_fail);
 
 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
index 9f1219a..5ce4033 100644 (file)
@@ -2339,6 +2339,10 @@ static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond *kdamond)
        damon_for_each_scheme(scheme, ctx) {
                struct damon_sysfs_stats *sysfs_stats;
 
+               /* user could have removed the scheme sysfs dir */
+               if (schemes_idx >= sysfs_schemes->nr)
+                       break;
+
                sysfs_stats = sysfs_schemes->schemes_arr[schemes_idx++]->stats;
                sysfs_stats->nr_tried = scheme->stat.nr_tried;
                sysfs_stats->sz_tried = scheme->stat.sz_tried;
index 58df978..ffc420c 100644 (file)
@@ -16,6 +16,8 @@ static struct {
 
 bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
 {
+       int flags = 0;
+
        /* No fault-injection for bootstrap cache */
        if (unlikely(s == kmem_cache))
                return false;
@@ -30,10 +32,16 @@ bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
        if (failslab.cache_filter && !(s->flags & SLAB_FAILSLAB))
                return false;
 
+       /*
+        * In some cases, it expects to specify __GFP_NOWARN
+        * to avoid printing any information(not just a warning),
+        * thus avoiding deadlocks. See commit 6b9dbedbe349 for
+        * details.
+        */
        if (gfpflags & __GFP_NOWARN)
-               failslab.attr.no_warn = true;
+               flags |= FAULT_NOWARN;
 
-       return should_fail(&failslab.attr, s->object_size);
+       return should_fail_ex(&failslab.attr, s->object_size, flags);
 }
 
 static int __init setup_failslab(char *str)
index e48f8ef..f1385c3 100644 (file)
@@ -1800,6 +1800,7 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
 
        /* we rely on prep_new_huge_page to set the destructor */
        set_compound_order(page, order);
+       __ClearPageReserved(page);
        __SetPageHead(page);
        for (i = 0; i < nr_pages; i++) {
                p = nth_page(page, i);
@@ -1816,7 +1817,8 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
                 * on the head page when they need know if put_page() is needed
                 * after get_user_pages().
                 */
-               __ClearPageReserved(p);
+               if (i != 0)     /* head page cleared above */
+                       __ClearPageReserved(p);
                /*
                 * Subtle and very unlikely
                 *
index 7e49685..46ecea1 100644 (file)
@@ -75,18 +75,23 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries
 
                if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfence_") ||
                    str_has_prefix(buf, ARCH_FUNC_PREFIX "__kfence_") ||
+                   str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmem_cache_free") ||
                    !strncmp(buf, ARCH_FUNC_PREFIX "__slab_free", len)) {
                        /*
-                        * In case of tail calls from any of the below
-                        * to any of the above.
+                        * In case of tail calls from any of the below to any of
+                        * the above, optimized by the compiler such that the
+                        * stack trace would omit the initial entry point below.
                         */
                        fallback = skipnr + 1;
                }
 
-               /* Also the *_bulk() variants by only checking prefixes. */
+               /*
+                * The below list should only include the initial entry points
+                * into the slab allocators. Includes the *_bulk() variants by
+                * checking prefixes.
+                */
                if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfree") ||
                    str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_free") ||
-                   str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmem_cache_free") ||
                    str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmalloc") ||
                    str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_alloc"))
                        goto found;
index 4734315..a8d5ef2 100644 (file)
@@ -97,8 +97,8 @@ struct collapse_control {
        /* Num pages scanned per node */
        u32 node_load[MAX_NUMNODES];
 
-       /* Last target selected in hpage_collapse_find_target_node() */
-       int last_target_node;
+       /* nodemask for allocation fallback */
+       nodemask_t alloc_nmask;
 };
 
 /**
@@ -734,7 +734,6 @@ static void khugepaged_alloc_sleep(void)
 
 struct collapse_control khugepaged_collapse_control = {
        .is_khugepaged = true,
-       .last_target_node = NUMA_NO_NODE,
 };
 
 static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
@@ -783,16 +782,11 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
                        target_node = nid;
                }
 
-       /* do some balance if several nodes have the same hit record */
-       if (target_node <= cc->last_target_node)
-               for (nid = cc->last_target_node + 1; nid < MAX_NUMNODES;
-                    nid++)
-                       if (max_value == cc->node_load[nid]) {
-                               target_node = nid;
-                               break;
-                       }
+       for_each_online_node(nid) {
+               if (max_value == cc->node_load[nid])
+                       node_set(nid, cc->alloc_nmask);
+       }
 
-       cc->last_target_node = target_node;
        return target_node;
 }
 #else
@@ -802,9 +796,10 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
 }
 #endif
 
-static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node)
+static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
+                                     nodemask_t *nmask)
 {
-       *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
+       *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
        if (unlikely(!*hpage)) {
                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                return false;
@@ -955,12 +950,11 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
 static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
                              struct collapse_control *cc)
 {
-       /* Only allocate from the target node */
        gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
-                    GFP_TRANSHUGE) | __GFP_THISNODE;
+                    GFP_TRANSHUGE);
        int node = hpage_collapse_find_target_node(cc);
 
-       if (!hpage_collapse_alloc_page(hpage, gfp, node))
+       if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
                return SCAN_ALLOC_HUGE_PAGE_FAIL;
        if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp)))
                return SCAN_CGROUP_CHARGE_FAIL;
@@ -1144,6 +1138,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
                goto out;
 
        memset(cc->node_load, 0, sizeof(cc->node_load));
+       nodes_clear(cc->alloc_nmask);
        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
        for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
             _pte++, _address += PAGE_SIZE) {
@@ -2077,6 +2072,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
        present = 0;
        swap = 0;
        memset(cc->node_load, 0, sizeof(cc->node_load));
+       nodes_clear(cc->alloc_nmask);
        rcu_read_lock();
        xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
                if (xas_retry(&xas, page))
@@ -2157,8 +2153,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
                }
        }
 
-       trace_mm_khugepaged_scan_file(mm, page, file->f_path.dentry->d_iname,
-                                     present, swap, result);
+       trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
        return result;
 }
 #else
@@ -2576,7 +2571,6 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
        if (!cc)
                return -ENOMEM;
        cc->is_khugepaged = false;
-       cc->last_target_node = NUMA_NO_NODE;
 
        mmgrab(mm);
        lru_add_drain_all();
@@ -2602,6 +2596,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
                }
                mmap_assert_locked(mm);
                memset(cc->node_load, 0, sizeof(cc->node_load));
+               nodes_clear(cc->alloc_nmask);
                if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
                        struct file *file = get_file(vma->vm_file);
                        pgoff_t pgoff = linear_page_index(vma, addr);
index 5f4d240..074f6b0 100644 (file)
@@ -97,7 +97,7 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
        return src - unsafe_addr;
 Efault:
        pagefault_enable();
-       dst[-1] = '\0';
+       dst[0] = '\0';
        return -EFAULT;
 }
 
index 2d8549a..a1a35c1 100644 (file)
@@ -3026,7 +3026,7 @@ struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
 {
        struct obj_cgroup *objcg;
 
-       if (!memcg_kmem_enabled() || memcg_kmem_bypass())
+       if (!memcg_kmem_enabled())
                return NULL;
 
        if (PageMemcgKmem(page)) {
index f88c351..8a6d5c8 100644 (file)
@@ -3763,7 +3763,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                         */
                        get_page(vmf->page);
                        pte_unmap_unlock(vmf->pte, vmf->ptl);
-                       vmf->page->pgmap->ops->migrate_to_ram(vmf);
+                       ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
                        put_page(vmf->page);
                } else if (is_hwpoison_entry(entry)) {
                        ret = VM_FAULT_HWPOISON;
index 6fa682e..721b236 100644 (file)
@@ -357,7 +357,8 @@ static bool migrate_vma_check_page(struct page *page, struct page *fault_page)
 }
 
 /*
- * Unmaps pages for migration. Returns number of unmapped pages.
+ * Unmaps pages for migration. Returns number of source pfns marked as
+ * migrating.
  */
 static unsigned long migrate_device_unmap(unsigned long *src_pfns,
                                          unsigned long npages,
@@ -373,8 +374,11 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns,
                struct page *page = migrate_pfn_to_page(src_pfns[i]);
                struct folio *folio;
 
-               if (!page)
+               if (!page) {
+                       if (src_pfns[i] & MIGRATE_PFN_MIGRATE)
+                               unmapped++;
                        continue;
+               }
 
                /* ZONE_DEVICE pages are not on LRU */
                if (!is_zone_device_page(page)) {
index c3c5c1d..74a84eb 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -456,7 +456,7 @@ void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
  * vma_mas_szero() - Set a given range to zero.  Used when modifying a
  * vm_area_struct start or end.
  *
- * @mm: The struct_mm
+ * @mas: The maple tree ma_state
  * @start: The start address to zero
  * @end: The end address to zero.
  */
index 218b28e..6e60657 100644 (file)
@@ -3887,6 +3887,8 @@ __setup("fail_page_alloc=", setup_fail_page_alloc);
 
 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
 {
+       int flags = 0;
+
        if (order < fail_page_alloc.min_order)
                return false;
        if (gfp_mask & __GFP_NOFAIL)
@@ -3897,10 +3899,11 @@ static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
                        (gfp_mask & __GFP_DIRECT_RECLAIM))
                return false;
 
+       /* See comment in __should_failslab() */
        if (gfp_mask & __GFP_NOWARN)
-               fail_page_alloc.attr.no_warn = true;
+               flags |= FAULT_NOWARN;
 
-       return should_fail(&fail_page_alloc.attr, 1 << order);
+       return should_fail_ex(&fail_page_alloc.attr, 1 << order, flags);
 }
 
 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
index affe802..ddf1968 100644 (file)
@@ -166,7 +166,7 @@ struct page_ext *page_ext_get(struct page *page)
 
 /**
  * page_ext_put() - Working with page extended information is done.
- * @page_ext - Page extended information received from page_ext_get().
+ * @page_ext: Page extended information received from page_ext_get().
  *
  * The page extended information of the page may not be valid after this
  * function is called.
index 5fc1237..72e481a 100644 (file)
@@ -973,23 +973,23 @@ done:
 scan:
        spin_unlock(&si->lock);
        while (++offset <= READ_ONCE(si->highest_bit)) {
-               if (swap_offset_available_and_locked(si, offset))
-                       goto checks;
                if (unlikely(--latency_ration < 0)) {
                        cond_resched();
                        latency_ration = LATENCY_LIMIT;
                        scanned_many = true;
                }
+               if (swap_offset_available_and_locked(si, offset))
+                       goto checks;
        }
        offset = si->lowest_bit;
        while (offset < scan_base) {
-               if (swap_offset_available_and_locked(si, offset))
-                       goto checks;
                if (unlikely(--latency_ration < 0)) {
                        cond_resched();
                        latency_ration = LATENCY_LIMIT;
                        scanned_many = true;
                }
+               if (swap_offset_available_and_locked(si, offset))
+                       goto checks;
                offset++;
        }
        spin_lock(&si->lock);
index 04d8b88..026199c 100644 (file)
@@ -2514,8 +2514,20 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
         * the flushers simply cannot keep up with the allocation
         * rate. Nudge the flusher threads in case they are asleep.
         */
-       if (stat.nr_unqueued_dirty == nr_taken)
+       if (stat.nr_unqueued_dirty == nr_taken) {
                wakeup_flusher_threads(WB_REASON_VMSCAN);
+               /*
+                * For cgroupv1 dirty throttling is achieved by waking up
+                * the kernel flusher here and later waiting on folios
+                * which are in writeback to finish (see shrink_folio_list()).
+                *
+                * Flusher may not be able to issue writeback quickly
+                * enough for cgroupv1 writeback throttling to work
+                * on a large system.
+                */
+               if (!writeback_throttling_sane(sc))
+                       reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
+       }
 
        sc->nr.dirty += stat.nr_dirty;
        sc->nr.congested += stat.nr_congested;
@@ -4971,10 +4983,13 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
        int scanned;
        int reclaimed;
        LIST_HEAD(list);
+       LIST_HEAD(clean);
        struct folio *folio;
+       struct folio *next;
        enum vm_event_item item;
        struct reclaim_stat stat;
        struct lru_gen_mm_walk *walk;
+       bool skip_retry = false;
        struct mem_cgroup *memcg = lruvec_memcg(lruvec);
        struct pglist_data *pgdat = lruvec_pgdat(lruvec);
 
@@ -4991,20 +5006,37 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
 
        if (list_empty(&list))
                return scanned;
-
+retry:
        reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false);
+       sc->nr_reclaimed += reclaimed;
 
-       list_for_each_entry(folio, &list, lru) {
-               /* restore LRU_REFS_FLAGS cleared by isolate_folio() */
-               if (folio_test_workingset(folio))
-                       folio_set_referenced(folio);
+       list_for_each_entry_safe_reverse(folio, next, &list, lru) {
+               if (!folio_evictable(folio)) {
+                       list_del(&folio->lru);
+                       folio_putback_lru(folio);
+                       continue;
+               }
 
-               /* don't add rejected pages to the oldest generation */
                if (folio_test_reclaim(folio) &&
-                   (folio_test_dirty(folio) || folio_test_writeback(folio)))
-                       folio_clear_active(folio);
-               else
-                       folio_set_active(folio);
+                   (folio_test_dirty(folio) || folio_test_writeback(folio))) {
+                       /* restore LRU_REFS_FLAGS cleared by isolate_folio() */
+                       if (folio_test_workingset(folio))
+                               folio_set_referenced(folio);
+                       continue;
+               }
+
+               if (skip_retry || folio_test_active(folio) || folio_test_referenced(folio) ||
+                   folio_mapped(folio) || folio_test_locked(folio) ||
+                   folio_test_dirty(folio) || folio_test_writeback(folio)) {
+                       /* don't add rejected folios to the oldest generation */
+                       set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS,
+                                     BIT(PG_active));
+                       continue;
+               }
+
+               /* retry folios that may have missed folio_rotate_reclaimable() */
+               list_move(&folio->lru, &clean);
+               sc->nr_scanned -= folio_nr_pages(folio);
        }
 
        spin_lock_irq(&lruvec->lru_lock);
@@ -5026,7 +5058,13 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
        mem_cgroup_uncharge_list(&list);
        free_unref_page_list(&list);
 
-       sc->nr_reclaimed += reclaimed;
+       INIT_LIST_HEAD(&list);
+       list_splice_init(&clean, &list);
+
+       if (!list_empty(&list)) {
+               skip_retry = true;
+               goto retry;
+       }
 
        if (need_swapping && type == LRU_GEN_ANON)
                *need_swapping = true;
@@ -5844,8 +5882,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
        enum lru_list lru;
        unsigned long nr_reclaimed = 0;
        unsigned long nr_to_reclaim = sc->nr_to_reclaim;
+       bool proportional_reclaim;
        struct blk_plug plug;
-       bool scan_adjusted;
 
        if (lru_gen_enabled()) {
                lru_gen_shrink_lruvec(lruvec, sc);
@@ -5868,8 +5906,8 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
         * abort proportional reclaim if either the file or anon lru has already
         * dropped to zero at the first pass.
         */
-       scan_adjusted = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
-                        sc->priority == DEF_PRIORITY);
+       proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
+                               sc->priority == DEF_PRIORITY);
 
        blk_start_plug(&plug);
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -5889,7 +5927,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 
                cond_resched();
 
-               if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
+               if (nr_reclaimed < nr_to_reclaim || proportional_reclaim)
                        continue;
 
                /*
@@ -5940,8 +5978,6 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
                nr_scanned = targets[lru] - nr[lru];
                nr[lru] = targets[lru] * (100 - percentage) / 100;
                nr[lru] -= min(nr[lru], nr_scanned);
-
-               scan_adjusted = true;
        }
        blk_finish_plug(&plug);
        sc->nr_reclaimed += nr_reclaimed;
index 56a1867..eeea0a6 100644 (file)
@@ -120,7 +120,7 @@ struct p9_conn {
        struct list_head unsent_req_list;
        struct p9_req_t *rreq;
        struct p9_req_t *wreq;
-       char tmp_buf[7];
+       char tmp_buf[P9_HDRSZ];
        struct p9_fcall rc;
        int wpos;
        int wsize;
@@ -202,9 +202,11 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
 
        list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
                list_move(&req->req_list, &cancel_list);
+               req->status = REQ_STATUS_ERROR;
        }
        list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
                list_move(&req->req_list, &cancel_list);
+               req->status = REQ_STATUS_ERROR;
        }
 
        spin_unlock(&m->req_lock);
@@ -291,7 +293,7 @@ static void p9_read_work(struct work_struct *work)
        if (!m->rc.sdata) {
                m->rc.sdata = m->tmp_buf;
                m->rc.offset = 0;
-               m->rc.capacity = 7; /* start by reading header */
+               m->rc.capacity = P9_HDRSZ; /* start by reading header */
        }
 
        clear_bit(Rpending, &m->wsched);
@@ -314,7 +316,7 @@ static void p9_read_work(struct work_struct *work)
                p9_debug(P9_DEBUG_TRANS, "got new header\n");
 
                /* Header size */
-               m->rc.size = 7;
+               m->rc.size = P9_HDRSZ;
                err = p9_parse_header(&m->rc, &m->rc.size, NULL, NULL, 0);
                if (err) {
                        p9_debug(P9_DEBUG_ERROR,
@@ -322,14 +324,6 @@ static void p9_read_work(struct work_struct *work)
                        goto error;
                }
 
-               if (m->rc.size >= m->client->msize) {
-                       p9_debug(P9_DEBUG_ERROR,
-                                "requested packet size too big: %d\n",
-                                m->rc.size);
-                       err = -EIO;
-                       goto error;
-               }
-
                p9_debug(P9_DEBUG_TRANS,
                         "mux %p pkt: size: %d bytes tag: %d\n",
                         m, m->rc.size, m->rc.tag);
@@ -342,6 +336,14 @@ static void p9_read_work(struct work_struct *work)
                        goto error;
                }
 
+               if (m->rc.size > m->rreq->rc.capacity) {
+                       p9_debug(P9_DEBUG_ERROR,
+                                "requested packet size too big: %d for tag %d with capacity %zd\n",
+                                m->rc.size, m->rc.tag, m->rreq->rc.capacity);
+                       err = -EIO;
+                       goto error;
+               }
+
                if (!m->rreq->rc.sdata) {
                        p9_debug(P9_DEBUG_ERROR,
                                 "No recv fcall for tag %d (req %p), disconnecting!\n",
index b15c641..aaa5fd3 100644 (file)
@@ -208,6 +208,14 @@ static void p9_xen_response(struct work_struct *work)
                        continue;
                }
 
+               if (h.size > req->rc.capacity) {
+                       dev_warn(&priv->dev->dev,
+                                "requested packet size too big: %d for tag %d with capacity %zd\n",
+                                h.size, h.tag, req->rc.capacity);
+                       req->status = REQ_STATUS_ERROR;
+                       goto recv_error;
+               }
+
                memcpy(&req->rc, &h, sizeof(h));
                req->rc.offset = 0;
 
@@ -217,6 +225,7 @@ static void p9_xen_response(struct work_struct *work)
                                     masked_prod, &masked_cons,
                                     XEN_9PFS_RING_SIZE(ring));
 
+recv_error:
                virt_mb();
                cons += h.size;
                ring->intf->in_cons = cons;
index 13d578c..fcb3e6c 100644 (file)
@@ -774,6 +774,7 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
        if (user_size > size)
                return ERR_PTR(-EMSGSIZE);
 
+       size = SKB_DATA_ALIGN(size);
        data = kzalloc(size + headroom + tailroom, GFP_USER);
        if (!data)
                return ERR_PTR(-ENOMEM);
index 6e53dc9..9ffd40b 100644 (file)
@@ -959,6 +959,8 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto,
        list_for_each_entry(p, &br->port_list, list) {
                vg = nbp_vlan_group(p);
                list_for_each_entry(vlan, &vg->vlan_list, vlist) {
+                       if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
+                               continue;
                        err = vlan_vid_add(p->dev, proto, vlan->vid);
                        if (err)
                                goto err_filt;
@@ -973,8 +975,11 @@ int __br_vlan_set_proto(struct net_bridge *br, __be16 proto,
        /* Delete VLANs for the old proto from the device filter. */
        list_for_each_entry(p, &br->port_list, list) {
                vg = nbp_vlan_group(p);
-               list_for_each_entry(vlan, &vg->vlan_list, vlist)
+               list_for_each_entry(vlan, &vg->vlan_list, vlist) {
+                       if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
+                               continue;
                        vlan_vid_del(p->dev, oldproto, vlan->vid);
+               }
        }
 
        return 0;
@@ -983,13 +988,19 @@ err_filt:
        attr.u.vlan_protocol = ntohs(oldproto);
        switchdev_port_attr_set(br->dev, &attr, NULL);
 
-       list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
+       list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist) {
+               if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
+                       continue;
                vlan_vid_del(p->dev, proto, vlan->vid);
+       }
 
        list_for_each_entry_continue_reverse(p, &br->port_list, list) {
                vg = nbp_vlan_group(p);
-               list_for_each_entry(vlan, &vg->vlan_list, vlist)
+               list_for_each_entry(vlan, &vg->vlan_list, vlist) {
+                       if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
+                               continue;
                        vlan_vid_del(p->dev, proto, vlan->vid);
+               }
        }
 
        return err;
index 4d63ef1..f35fc87 100644 (file)
@@ -310,9 +310,6 @@ static int chnl_net_open(struct net_device *dev)
 
        if (result == 0) {
                pr_debug("connect timeout\n");
-               caif_disconnect_client(dev_net(dev), &priv->chnl);
-               priv->state = CAIF_DISCONNECTED;
-               pr_debug("state disconnected\n");
                result = -ETIMEDOUT;
                goto error;
        }
index 25cd35f..0077304 100644 (file)
@@ -296,7 +296,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb,
        key->ct_zone = ct->zone.id;
 #endif
 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
-       key->ct_mark = ct->mark;
+       key->ct_mark = READ_ONCE(ct->mark);
 #endif
 
        cl = nf_ct_labels_find(ct);
index 6fac2f0..711cd3b 100644 (file)
@@ -48,9 +48,11 @@ static const char *lwtunnel_encap_str(enum lwtunnel_encap_types encap_type)
                return "RPL";
        case LWTUNNEL_ENCAP_IOAM6:
                return "IOAM6";
+       case LWTUNNEL_ENCAP_XFRM:
+               /* module autoload not supported for encap type */
+               return NULL;
        case LWTUNNEL_ENCAP_IP6:
        case LWTUNNEL_ENCAP_IP:
-       case LWTUNNEL_ENCAP_XFRM:
        case LWTUNNEL_ENCAP_NONE:
        case __LWTUNNEL_ENCAP_MAX:
                /* should not have got here */
index a77a85e..952a547 100644 (file)
@@ -307,7 +307,31 @@ static int neigh_del_timer(struct neighbour *n)
        return 0;
 }
 
-static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
+static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
+                                                  int family)
+{
+       switch (family) {
+       case AF_INET:
+               return __in_dev_arp_parms_get_rcu(dev);
+       case AF_INET6:
+               return __in6_dev_nd_parms_get_rcu(dev);
+       }
+       return NULL;
+}
+
+static void neigh_parms_qlen_dec(struct net_device *dev, int family)
+{
+       struct neigh_parms *p;
+
+       rcu_read_lock();
+       p = neigh_get_dev_parms_rcu(dev, family);
+       if (p)
+               p->qlen--;
+       rcu_read_unlock();
+}
+
+static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net,
+                              int family)
 {
        struct sk_buff_head tmp;
        unsigned long flags;
@@ -321,13 +345,7 @@ static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
                struct net_device *dev = skb->dev;
 
                if (net == NULL || net_eq(dev_net(dev), net)) {
-                       struct in_device *in_dev;
-
-                       rcu_read_lock();
-                       in_dev = __in_dev_get_rcu(dev);
-                       if (in_dev)
-                               in_dev->arp_parms->qlen--;
-                       rcu_read_unlock();
+                       neigh_parms_qlen_dec(dev, family);
                        __skb_unlink(skb, list);
                        __skb_queue_tail(&tmp, skb);
                }
@@ -409,7 +427,8 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
        write_lock_bh(&tbl->lock);
        neigh_flush_dev(tbl, dev, skip_perm);
        pneigh_ifdown_and_unlock(tbl, dev);
-       pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL);
+       pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
+                          tbl->family);
        if (skb_queue_empty_lockless(&tbl->proxy_queue))
                del_timer_sync(&tbl->proxy_timer);
        return 0;
@@ -1621,13 +1640,8 @@ static void neigh_proxy_process(struct timer_list *t)
 
                if (tdif <= 0) {
                        struct net_device *dev = skb->dev;
-                       struct in_device *in_dev;
 
-                       rcu_read_lock();
-                       in_dev = __in_dev_get_rcu(dev);
-                       if (in_dev)
-                               in_dev->arp_parms->qlen--;
-                       rcu_read_unlock();
+                       neigh_parms_qlen_dec(dev, tbl->family);
                        __skb_unlink(skb, &tbl->proxy_queue);
 
                        if (tbl->proxy_redo && netif_running(dev)) {
@@ -1821,7 +1835,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
        cancel_delayed_work_sync(&tbl->managed_work);
        cancel_delayed_work_sync(&tbl->gc_work);
        del_timer_sync(&tbl->proxy_timer);
-       pneigh_queue_purge(&tbl->proxy_queue, NULL);
+       pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family);
        neigh_ifdown(tbl, NULL);
        if (atomic_read(&tbl->entries))
                pr_crit("neighbour leakage\n");
@@ -3539,18 +3553,6 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
        return ret;
 }
 
-static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
-                                                  int family)
-{
-       switch (family) {
-       case AF_INET:
-               return __in_dev_arp_parms_get_rcu(dev);
-       case AF_INET6:
-               return __in6_dev_nd_parms_get_rcu(dev);
-       }
-       return NULL;
-}
-
 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
                                  int index)
 {
index 713b7b8..b780827 100644 (file)
@@ -45,11 +45,10 @@ static unsigned int dccp_v4_pernet_id __read_mostly;
 int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
        const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
-       struct inet_bind_hashbucket *prev_addr_hashbucket = NULL;
-       __be32 daddr, nexthop, prev_sk_rcv_saddr;
        struct inet_sock *inet = inet_sk(sk);
        struct dccp_sock *dp = dccp_sk(sk);
        __be16 orig_sport, orig_dport;
+       __be32 daddr, nexthop;
        struct flowi4 *fl4;
        struct rtable *rt;
        int err;
@@ -91,26 +90,13 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                daddr = fl4->daddr;
 
        if (inet->inet_saddr == 0) {
-               if (inet_csk(sk)->icsk_bind2_hash) {
-                       prev_addr_hashbucket =
-                               inet_bhashfn_portaddr(&dccp_hashinfo, sk,
-                                                     sock_net(sk),
-                                                     inet->inet_num);
-                       prev_sk_rcv_saddr = sk->sk_rcv_saddr;
-               }
-               inet->inet_saddr = fl4->saddr;
-       }
-
-       sk_rcv_saddr_set(sk, inet->inet_saddr);
-
-       if (prev_addr_hashbucket) {
-               err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk);
+               err = inet_bhash2_update_saddr(sk,  &fl4->saddr, AF_INET);
                if (err) {
-                       inet->inet_saddr = 0;
-                       sk_rcv_saddr_set(sk, prev_sk_rcv_saddr);
                        ip_rt_put(rt);
                        return err;
                }
+       } else {
+               sk_rcv_saddr_set(sk, inet->inet_saddr);
        }
 
        inet->inet_dport = usin->sin_port;
@@ -157,6 +143,7 @@ failure:
         * This unhashes the socket and releases the local port, if necessary.
         */
        dccp_set_state(sk, DCCP_CLOSED);
+       inet_bhash2_reset_saddr(sk);
        ip_rt_put(rt);
        sk->sk_route_caps = 0;
        inet->inet_dport = 0;
index e57b430..602f343 100644 (file)
@@ -934,26 +934,11 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        }
 
        if (saddr == NULL) {
-               struct inet_bind_hashbucket *prev_addr_hashbucket = NULL;
-               struct in6_addr prev_v6_rcv_saddr;
-
-               if (icsk->icsk_bind2_hash) {
-                       prev_addr_hashbucket = inet_bhashfn_portaddr(&dccp_hashinfo,
-                                                                    sk, sock_net(sk),
-                                                                    inet->inet_num);
-                       prev_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
-               }
-
                saddr = &fl6.saddr;
-               sk->sk_v6_rcv_saddr = *saddr;
-
-               if (prev_addr_hashbucket) {
-                       err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk);
-                       if (err) {
-                               sk->sk_v6_rcv_saddr = prev_v6_rcv_saddr;
-                               goto failure;
-                       }
-               }
+
+               err = inet_bhash2_update_saddr(sk, saddr, AF_INET6);
+               if (err)
+                       goto failure;
        }
 
        /* set the source address */
@@ -985,6 +970,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
 late_failure:
        dccp_set_state(sk, DCCP_CLOSED);
+       inet_bhash2_reset_saddr(sk);
        __sk_dst_reset(sk);
 failure:
        inet->inet_dport = 0;
index c548ca3..85e35c5 100644 (file)
@@ -279,8 +279,7 @@ int dccp_disconnect(struct sock *sk, int flags)
 
        inet->inet_dport = 0;
 
-       if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
-               inet_reset_saddr(sk);
+       inet_bhash2_reset_saddr(sk);
 
        sk->sk_shutdown = 0;
        sock_reset_flag(sk, SOCK_DONE);
index e504a18..5417f7b 100644 (file)
@@ -864,6 +864,14 @@ disconnect:
        return err;
 }
 
+static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds)
+{
+       const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
+
+       if (tag_ops->disconnect)
+               tag_ops->disconnect(ds);
+}
+
 static int dsa_switch_setup(struct dsa_switch *ds)
 {
        struct dsa_devlink_priv *dl_priv;
@@ -953,6 +961,8 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
                ds->slave_mii_bus = NULL;
        }
 
+       dsa_switch_teardown_tag_protocol(ds);
+
        if (ds->ops->teardown)
                ds->ops->teardown(ds);
 
index 6e65c7f..71e9707 100644 (file)
@@ -210,6 +210,7 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
 extern struct rtnl_link_ops dsa_link_ops __read_mostly;
 
 /* port.c */
+bool dsa_port_supports_hwtstamp(struct dsa_port *dp, struct ifreq *ifr);
 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
                               const struct dsa_device_ops *tag_ops);
 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age);
index 40367ab..421de16 100644 (file)
@@ -204,8 +204,7 @@ static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                 * switch in the tree that is PTP capable.
                 */
                list_for_each_entry(dp, &dst->ports, list)
-                       if (dp->ds->ops->port_hwtstamp_get ||
-                           dp->ds->ops->port_hwtstamp_set)
+                       if (dsa_port_supports_hwtstamp(dp, ifr))
                                return -EBUSY;
                break;
        }
index 2081682..750fe68 100644 (file)
@@ -110,6 +110,22 @@ static bool dsa_port_can_configure_learning(struct dsa_port *dp)
        return !err;
 }
 
+bool dsa_port_supports_hwtstamp(struct dsa_port *dp, struct ifreq *ifr)
+{
+       struct dsa_switch *ds = dp->ds;
+       int err;
+
+       if (!ds->ops->port_hwtstamp_get || !ds->ops->port_hwtstamp_set)
+               return false;
+
+       /* "See through" shim implementations of the "get" method.
+        * This will clobber the ifreq structure, but we will either return an
+        * error, or the master will overwrite it with proper values.
+        */
+       err = ds->ops->port_hwtstamp_get(ds, dp->index, ifr);
+       return err != -EOPNOTSUPP;
+}
+
 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age)
 {
        struct dsa_switch *ds = dp->ds;
index e983bb0..2dfb122 100644 (file)
@@ -402,6 +402,16 @@ config INET_IPCOMP
 
          If unsure, say Y.
 
+config INET_TABLE_PERTURB_ORDER
+       int "INET: Source port perturbation table size (as power of 2)" if EXPERT
+       default 16
+       help
+         Source port perturbation table size (as power of 2) for
+         RFC 6056 3.3.4.  Algorithm 4: Double-Hash Port Selection Algorithm.
+
+         The default is almost always what you want.
+         Only change this if you know what you are doing.
+
 config INET_XFRM_TUNNEL
        tristate
        select INET_TUNNEL
index 4728087..0da6794 100644 (file)
@@ -1230,7 +1230,6 @@ EXPORT_SYMBOL(inet_unregister_protosw);
 
 static int inet_sk_reselect_saddr(struct sock *sk)
 {
-       struct inet_bind_hashbucket *prev_addr_hashbucket;
        struct inet_sock *inet = inet_sk(sk);
        __be32 old_saddr = inet->inet_saddr;
        __be32 daddr = inet->inet_daddr;
@@ -1260,16 +1259,8 @@ static int inet_sk_reselect_saddr(struct sock *sk)
                return 0;
        }
 
-       prev_addr_hashbucket =
-               inet_bhashfn_portaddr(tcp_or_dccp_get_hashinfo(sk), sk,
-                                     sock_net(sk), inet->inet_num);
-
-       inet->inet_saddr = inet->inet_rcv_saddr = new_saddr;
-
-       err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk);
+       err = inet_bhash2_update_saddr(sk, &new_saddr, AF_INET);
        if (err) {
-               inet->inet_saddr = old_saddr;
-               inet->inet_rcv_saddr = old_saddr;
                ip_rt_put(rt);
                return err;
        }
index 1701527..3969fa8 100644 (file)
@@ -314,6 +314,9 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_
                        xo->seq.low += skb_shinfo(skb)->gso_segs;
        }
 
+       if (xo->seq.low < seq)
+               xo->seq.hi++;
+
        esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
 
        ip_hdr(skb)->tot_len = htons(skb->len);
index 452ff17..74d403d 100644 (file)
@@ -126,7 +126,7 @@ struct key_vector {
                /* This list pointer if valid if (pos | bits) == 0 (LEAF) */
                struct hlist_head leaf;
                /* This array is valid if (pos | bits) > 0 (TNODE) */
-               struct key_vector __rcu *tnode[0];
+               DECLARE_FLEX_ARRAY(struct key_vector __rcu *, tnode);
        };
 };
 
@@ -1381,8 +1381,10 @@ int fib_table_insert(struct net *net, struct fib_table *tb,
 
        /* The alias was already inserted, so the node must exist. */
        l = l ? l : fib_find_node(t, &tp, key);
-       if (WARN_ON_ONCE(!l))
+       if (WARN_ON_ONCE(!l)) {
+               err = -ENOENT;
                goto out_free_new_fa;
+       }
 
        if (fib_find_alias(&l->leaf, new_fa->fa_slen, 0, 0, tb->tb_id, true) ==
            new_fa) {
index d3dc281..3cec471 100644 (file)
@@ -858,34 +858,80 @@ inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, in
        return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
 }
 
-int inet_bhash2_update_saddr(struct inet_bind_hashbucket *prev_saddr, struct sock *sk)
+static void inet_update_saddr(struct sock *sk, void *saddr, int family)
+{
+       if (family == AF_INET) {
+               inet_sk(sk)->inet_saddr = *(__be32 *)saddr;
+               sk_rcv_saddr_set(sk, inet_sk(sk)->inet_saddr);
+       }
+#if IS_ENABLED(CONFIG_IPV6)
+       else {
+               sk->sk_v6_rcv_saddr = *(struct in6_addr *)saddr;
+       }
+#endif
+}
+
+static int __inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family, bool reset)
 {
        struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
+       struct inet_bind_hashbucket *head, *head2;
        struct inet_bind2_bucket *tb2, *new_tb2;
        int l3mdev = inet_sk_bound_l3mdev(sk);
-       struct inet_bind_hashbucket *head2;
        int port = inet_sk(sk)->inet_num;
        struct net *net = sock_net(sk);
+       int bhash;
+
+       if (!inet_csk(sk)->icsk_bind2_hash) {
+               /* Not bind()ed before. */
+               if (reset)
+                       inet_reset_saddr(sk);
+               else
+                       inet_update_saddr(sk, saddr, family);
+
+               return 0;
+       }
 
        /* Allocate a bind2 bucket ahead of time to avoid permanently putting
         * the bhash2 table in an inconsistent state if a new tb2 bucket
         * allocation fails.
         */
        new_tb2 = kmem_cache_alloc(hinfo->bind2_bucket_cachep, GFP_ATOMIC);
-       if (!new_tb2)
+       if (!new_tb2) {
+               if (reset) {
+                       /* The (INADDR_ANY, port) bucket might have already
+                        * been freed, then we cannot fixup icsk_bind2_hash,
+                        * so we give up and unlink sk from bhash/bhash2 not
+                        * to leave inconsistency in bhash2.
+                        */
+                       inet_put_port(sk);
+                       inet_reset_saddr(sk);
+               }
+
                return -ENOMEM;
+       }
 
+       bhash = inet_bhashfn(net, port, hinfo->bhash_size);
+       head = &hinfo->bhash[bhash];
        head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
 
-       if (prev_saddr) {
-               spin_lock_bh(&prev_saddr->lock);
-               __sk_del_bind2_node(sk);
-               inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep,
-                                         inet_csk(sk)->icsk_bind2_hash);
-               spin_unlock_bh(&prev_saddr->lock);
-       }
+       /* If we change saddr locklessly, another thread
+        * iterating over bhash might see corrupted address.
+        */
+       spin_lock_bh(&head->lock);
 
-       spin_lock_bh(&head2->lock);
+       spin_lock(&head2->lock);
+       __sk_del_bind2_node(sk);
+       inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, inet_csk(sk)->icsk_bind2_hash);
+       spin_unlock(&head2->lock);
+
+       if (reset)
+               inet_reset_saddr(sk);
+       else
+               inet_update_saddr(sk, saddr, family);
+
+       head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
+
+       spin_lock(&head2->lock);
        tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
        if (!tb2) {
                tb2 = new_tb2;
@@ -893,26 +939,40 @@ int inet_bhash2_update_saddr(struct inet_bind_hashbucket *prev_saddr, struct soc
        }
        sk_add_bind2_node(sk, &tb2->owners);
        inet_csk(sk)->icsk_bind2_hash = tb2;
-       spin_unlock_bh(&head2->lock);
+       spin_unlock(&head2->lock);
+
+       spin_unlock_bh(&head->lock);
 
        if (tb2 != new_tb2)
                kmem_cache_free(hinfo->bind2_bucket_cachep, new_tb2);
 
        return 0;
 }
+
+int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family)
+{
+       return __inet_bhash2_update_saddr(sk, saddr, family, false);
+}
 EXPORT_SYMBOL_GPL(inet_bhash2_update_saddr);
 
+void inet_bhash2_reset_saddr(struct sock *sk)
+{
+       if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
+               __inet_bhash2_update_saddr(sk, NULL, 0, true);
+}
+EXPORT_SYMBOL_GPL(inet_bhash2_reset_saddr);
+
 /* RFC 6056 3.3.4.  Algorithm 4: Double-Hash Port Selection Algorithm
  * Note that we use 32bit integers (vs RFC 'short integers')
  * because 2^16 is not a multiple of num_ephemeral and this
  * property might be used by clever attacker.
+ *
  * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though
- * attacks were since demonstrated, thus we use 65536 instead to really
- * give more isolation and privacy, at the expense of 256kB of kernel
- * memory.
+ * attacks were since demonstrated, thus we use 65536 by default instead
+ * to really give more isolation and privacy, at the expense of 256kB
+ * of kernel memory.
  */
-#define INET_TABLE_PERTURB_SHIFT 16
-#define INET_TABLE_PERTURB_SIZE (1 << INET_TABLE_PERTURB_SHIFT)
+#define INET_TABLE_PERTURB_SIZE (1 << CONFIG_INET_TABLE_PERTURB_ORDER)
 static u32 *table_perturb;
 
 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
index 1b51239..e880ce7 100644 (file)
@@ -366,6 +366,11 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
                                           iph->tos, dev);
                if (unlikely(err))
                        goto drop_error;
+       } else {
+               struct in_device *in_dev = __in_dev_get_rcu(dev);
+
+               if (in_dev && IN_DEV_ORCONF(in_dev, NOPOLICY))
+                       IPCB(skb)->flags |= IPSKB_NOPOLICY;
        }
 
 #ifdef CONFIG_IP_ROUTE_CLASSID
index f8e176c..b3cc416 100644 (file)
@@ -435,7 +435,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
 
        switch (ctinfo) {
        case IP_CT_NEW:
-               ct->mark = hash;
+               WRITE_ONCE(ct->mark, hash);
                break;
        case IP_CT_RELATED:
        case IP_CT_RELATED_REPLY:
@@ -452,7 +452,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
 #ifdef DEBUG
        nf_ct_dump_tuple_ip(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
 #endif
-       pr_debug("hash=%u ct_hash=%u ", hash, ct->mark);
+       pr_debug("hash=%u ct_hash=%u ", hash, READ_ONCE(ct->mark));
        if (!clusterip_responsible(cipinfo->config, hash)) {
                pr_debug("not responsible\n");
                return NF_DROP;
index 54836a6..4f22057 100644 (file)
@@ -3114,8 +3114,7 @@ int tcp_disconnect(struct sock *sk, int flags)
 
        inet->inet_dport = 0;
 
-       if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
-               inet_reset_saddr(sk);
+       inet_bhash2_reset_saddr(sk);
 
        sk->sk_shutdown = 0;
        sock_reset_flag(sk, SOCK_DONE);
index 87d440f..da46357 100644 (file)
@@ -199,15 +199,14 @@ static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
 /* This will initiate an outgoing connection. */
 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
-       struct inet_bind_hashbucket *prev_addr_hashbucket = NULL;
        struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
        struct inet_timewait_death_row *tcp_death_row;
-       __be32 daddr, nexthop, prev_sk_rcv_saddr;
        struct inet_sock *inet = inet_sk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct ip_options_rcu *inet_opt;
        struct net *net = sock_net(sk);
        __be16 orig_sport, orig_dport;
+       __be32 daddr, nexthop;
        struct flowi4 *fl4;
        struct rtable *rt;
        int err;
@@ -251,24 +250,13 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
 
        if (!inet->inet_saddr) {
-               if (inet_csk(sk)->icsk_bind2_hash) {
-                       prev_addr_hashbucket = inet_bhashfn_portaddr(tcp_death_row->hashinfo,
-                                                                    sk, net, inet->inet_num);
-                       prev_sk_rcv_saddr = sk->sk_rcv_saddr;
-               }
-               inet->inet_saddr = fl4->saddr;
-       }
-
-       sk_rcv_saddr_set(sk, inet->inet_saddr);
-
-       if (prev_addr_hashbucket) {
-               err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk);
+               err = inet_bhash2_update_saddr(sk,  &fl4->saddr, AF_INET);
                if (err) {
-                       inet->inet_saddr = 0;
-                       sk_rcv_saddr_set(sk, prev_sk_rcv_saddr);
                        ip_rt_put(rt);
                        return err;
                }
+       } else {
+               sk_rcv_saddr_set(sk, inet->inet_saddr);
        }
 
        if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
@@ -343,6 +331,7 @@ failure:
         * if necessary.
         */
        tcp_set_state(sk, TCP_CLOSE);
+       inet_bhash2_reset_saddr(sk);
        ip_rt_put(rt);
        sk->sk_route_caps = 0;
        inet->inet_dport = 0;
index 79d4354..242f429 100644 (file)
@@ -346,6 +346,9 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features
                        xo->seq.low += skb_shinfo(skb)->gso_segs;
        }
 
+       if (xo->seq.low < seq)
+               xo->seq.hi++;
+
        esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
 
        len = skb->len - sizeof(struct ipv6hdr);
index 2a3f929..f0548db 100644 (file)
@@ -292,24 +292,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
 
        if (!saddr) {
-               struct inet_bind_hashbucket *prev_addr_hashbucket = NULL;
-               struct in6_addr prev_v6_rcv_saddr;
-
-               if (icsk->icsk_bind2_hash) {
-                       prev_addr_hashbucket = inet_bhashfn_portaddr(tcp_death_row->hashinfo,
-                                                                    sk, net, inet->inet_num);
-                       prev_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
-               }
                saddr = &fl6.saddr;
-               sk->sk_v6_rcv_saddr = *saddr;
 
-               if (prev_addr_hashbucket) {
-                       err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk);
-                       if (err) {
-                               sk->sk_v6_rcv_saddr = prev_v6_rcv_saddr;
-                               goto failure;
-                       }
-               }
+               err = inet_bhash2_update_saddr(sk, saddr, AF_INET6);
+               if (err)
+                       goto failure;
        }
 
        /* set the source address */
@@ -359,6 +346,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
 late_failure:
        tcp_set_state(sk, TCP_CLOSE);
+       inet_bhash2_reset_saddr(sk);
 failure:
        inet->inet_dport = 0;
        sk->sk_route_caps = 0;
index 4a4b0e4..ea435eb 100644 (file)
@@ -287,9 +287,13 @@ int __init xfrm6_init(void)
        if (ret)
                goto out_state;
 
-       register_pernet_subsys(&xfrm6_net_ops);
+       ret = register_pernet_subsys(&xfrm6_net_ops);
+       if (ret)
+               goto out_protocol;
 out:
        return ret;
+out_protocol:
+       xfrm6_protocol_fini();
 out_state:
        xfrm6_state_fini();
 out_policy:
index a500422..890a242 100644 (file)
@@ -222,7 +222,7 @@ static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
        struct sk_buff *skb;
        struct kcm_sock *kcm;
 
-       while ((skb = __skb_dequeue(head))) {
+       while ((skb = skb_dequeue(head))) {
                /* Reset destructor to avoid calling kcm_rcv_ready */
                skb->destructor = sock_rfree;
                skb_orphan(skb);
@@ -1085,53 +1085,17 @@ out_error:
        return err;
 }
 
-static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
-                                    long timeo, int *err)
-{
-       struct sk_buff *skb;
-
-       while (!(skb = skb_peek(&sk->sk_receive_queue))) {
-               if (sk->sk_err) {
-                       *err = sock_error(sk);
-                       return NULL;
-               }
-
-               if (sock_flag(sk, SOCK_DONE))
-                       return NULL;
-
-               if ((flags & MSG_DONTWAIT) || !timeo) {
-                       *err = -EAGAIN;
-                       return NULL;
-               }
-
-               sk_wait_data(sk, &timeo, NULL);
-
-               /* Handle signals */
-               if (signal_pending(current)) {
-                       *err = sock_intr_errno(timeo);
-                       return NULL;
-               }
-       }
-
-       return skb;
-}
-
 static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
                       size_t len, int flags)
 {
        struct sock *sk = sock->sk;
        struct kcm_sock *kcm = kcm_sk(sk);
        int err = 0;
-       long timeo;
        struct strp_msg *stm;
        int copied = 0;
        struct sk_buff *skb;
 
-       timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
-
-       lock_sock(sk);
-
-       skb = kcm_wait_data(sk, flags, timeo, &err);
+       skb = skb_recv_datagram(sk, flags, &err);
        if (!skb)
                goto out;
 
@@ -1162,14 +1126,11 @@ msg_finished:
                        /* Finished with message */
                        msg->msg_flags |= MSG_EOR;
                        KCM_STATS_INCR(kcm->stats.rx_msgs);
-                       skb_unlink(skb, &sk->sk_receive_queue);
-                       kfree_skb(skb);
                }
        }
 
 out:
-       release_sock(sk);
-
+       skb_free_datagram(sk, skb);
        return copied ? : err;
 }
 
@@ -1179,7 +1140,6 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
 {
        struct sock *sk = sock->sk;
        struct kcm_sock *kcm = kcm_sk(sk);
-       long timeo;
        struct strp_msg *stm;
        int err = 0;
        ssize_t copied;
@@ -1187,11 +1147,7 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
 
        /* Only support splice for SOCKSEQPACKET */
 
-       timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
-
-       lock_sock(sk);
-
-       skb = kcm_wait_data(sk, flags, timeo, &err);
+       skb = skb_recv_datagram(sk, flags, &err);
        if (!skb)
                goto err_out;
 
@@ -1219,13 +1175,11 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
         * finish reading the message.
         */
 
-       release_sock(sk);
-
+       skb_free_datagram(sk, skb);
        return copied;
 
 err_out:
-       release_sock(sk);
-
+       skb_free_datagram(sk, skb);
        return err;
 }
 
index c85df5b..95edcbe 100644 (file)
@@ -2905,7 +2905,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
                        break;
                if (!aalg->pfkey_supported)
                        continue;
-               if (aalg_tmpl_set(t, aalg) && aalg->available)
+               if (aalg_tmpl_set(t, aalg))
                        sz += sizeof(struct sadb_comb);
        }
        return sz + sizeof(struct sadb_prop);
@@ -2923,7 +2923,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
                if (!ealg->pfkey_supported)
                        continue;
 
-               if (!(ealg_tmpl_set(t, ealg) && ealg->available))
+               if (!(ealg_tmpl_set(t, ealg)))
                        continue;
 
                for (k = 1; ; k++) {
@@ -2934,16 +2934,17 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
                        if (!aalg->pfkey_supported)
                                continue;
 
-                       if (aalg_tmpl_set(t, aalg) && aalg->available)
+                       if (aalg_tmpl_set(t, aalg))
                                sz += sizeof(struct sadb_comb);
                }
        }
        return sz + sizeof(struct sadb_prop);
 }
 
-static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
+static int dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
 {
        struct sadb_prop *p;
+       int sz = 0;
        int i;
 
        p = skb_put(skb, sizeof(struct sadb_prop));
@@ -2971,13 +2972,17 @@ static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
                        c->sadb_comb_soft_addtime = 20*60*60;
                        c->sadb_comb_hard_usetime = 8*60*60;
                        c->sadb_comb_soft_usetime = 7*60*60;
+                       sz += sizeof(*c);
                }
        }
+
+       return sz + sizeof(*p);
 }
 
-static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
+static int dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
 {
        struct sadb_prop *p;
+       int sz = 0;
        int i, k;
 
        p = skb_put(skb, sizeof(struct sadb_prop));
@@ -3019,8 +3024,11 @@ static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t)
                        c->sadb_comb_soft_addtime = 20*60*60;
                        c->sadb_comb_hard_usetime = 8*60*60;
                        c->sadb_comb_soft_usetime = 7*60*60;
+                       sz += sizeof(*c);
                }
        }
+
+       return sz + sizeof(*p);
 }
 
 static int key_notify_policy_expire(struct xfrm_policy *xp, const struct km_event *c)
@@ -3150,6 +3158,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
        struct sadb_x_sec_ctx *sec_ctx;
        struct xfrm_sec_ctx *xfrm_ctx;
        int ctx_size = 0;
+       int alg_size = 0;
 
        sockaddr_size = pfkey_sockaddr_size(x->props.family);
        if (!sockaddr_size)
@@ -3161,16 +3170,16 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
                sizeof(struct sadb_x_policy);
 
        if (x->id.proto == IPPROTO_AH)
-               size += count_ah_combs(t);
+               alg_size = count_ah_combs(t);
        else if (x->id.proto == IPPROTO_ESP)
-               size += count_esp_combs(t);
+               alg_size = count_esp_combs(t);
 
        if ((xfrm_ctx = x->security)) {
                ctx_size = PFKEY_ALIGN8(xfrm_ctx->ctx_len);
                size +=  sizeof(struct sadb_x_sec_ctx) + ctx_size;
        }
 
-       skb =  alloc_skb(size + 16, GFP_ATOMIC);
+       skb =  alloc_skb(size + alg_size + 16, GFP_ATOMIC);
        if (skb == NULL)
                return -ENOMEM;
 
@@ -3224,10 +3233,13 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
        pol->sadb_x_policy_priority = xp->priority;
 
        /* Set sadb_comb's. */
+       alg_size = 0;
        if (x->id.proto == IPPROTO_AH)
-               dump_ah_combs(skb, t);
+               alg_size = dump_ah_combs(skb, t);
        else if (x->id.proto == IPPROTO_ESP)
-               dump_esp_combs(skb, t);
+               alg_size = dump_esp_combs(skb, t);
+
+       hdr->sadb_msg_len += alg_size / 8;
 
        /* security context */
        if (xfrm_ctx) {
@@ -3382,7 +3394,7 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
        hdr->sadb_msg_len = size / sizeof(uint64_t);
        hdr->sadb_msg_errno = 0;
        hdr->sadb_msg_reserved = 0;
-       hdr->sadb_msg_seq = x->km.seq = get_acqseq();
+       hdr->sadb_msg_seq = x->km.seq;
        hdr->sadb_msg_pid = 0;
 
        /* SA */
index 7499c51..9a1415f 100644 (file)
@@ -1150,8 +1150,10 @@ static void l2tp_tunnel_destruct(struct sock *sk)
        }
 
        /* Remove hooks into tunnel socket */
+       write_lock_bh(&sk->sk_callback_lock);
        sk->sk_destruct = tunnel->old_sk_destruct;
        sk->sk_user_data = NULL;
+       write_unlock_bh(&sk->sk_callback_lock);
 
        /* Call the original destructor */
        if (sk->sk_destruct)
@@ -1469,16 +1471,19 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
                sock = sockfd_lookup(tunnel->fd, &ret);
                if (!sock)
                        goto err;
-
-               ret = l2tp_validate_socket(sock->sk, net, tunnel->encap);
-               if (ret < 0)
-                       goto err_sock;
        }
 
+       sk = sock->sk;
+       write_lock_bh(&sk->sk_callback_lock);
+       ret = l2tp_validate_socket(sk, net, tunnel->encap);
+       if (ret < 0)
+               goto err_inval_sock;
+       rcu_assign_sk_user_data(sk, tunnel);
+       write_unlock_bh(&sk->sk_callback_lock);
+
        tunnel->l2tp_net = net;
        pn = l2tp_pernet(net);
 
-       sk = sock->sk;
        sock_hold(sk);
        tunnel->sock = sk;
 
@@ -1503,8 +1508,6 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
                };
 
                setup_udp_tunnel_sock(net, sock, &udp_cfg);
-       } else {
-               sk->sk_user_data = tunnel;
        }
 
        tunnel->old_sk_destruct = sk->sk_destruct;
@@ -1521,6 +1524,11 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
        return 0;
 
 err_sock:
+       write_lock_bh(&sk->sk_callback_lock);
+       rcu_assign_sk_user_data(sk, NULL);
+err_inval_sock:
+       write_unlock_bh(&sk->sk_callback_lock);
+
        if (tunnel->fd < 0)
                sock_release(sock);
        else
index 3adc291..7499192 100644 (file)
@@ -916,7 +916,7 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
 #ifdef IP_SET_HASH_WITH_MULTI
                if (h->bucketsize >= AHASH_MAX_TUNED)
                        goto set_full;
-               else if (h->bucketsize < multi)
+               else if (h->bucketsize <= multi)
                        h->bucketsize += AHASH_INIT_SIZE;
 #endif
                if (n->size >= AHASH_MAX(h)) {
index dd30c03..75d556d 100644 (file)
@@ -151,18 +151,16 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (((u64)ip_to - ip + 1) >> (32 - h->netmask) > IPSET_MAX_RANGE)
                return -ERANGE;
 
-       if (retried) {
+       if (retried)
                ip = ntohl(h->next.ip);
-               e.ip = htonl(ip);
-       }
        for (; ip <= ip_to;) {
+               e.ip = htonl(ip);
                ret = adtfn(set, &e, &ext, &ext, flags);
                if (ret && !ip_set_eexist(ret, flags))
                        return ret;
 
                ip += hosts;
-               e.ip = htonl(ip);
-               if (e.ip == 0)
+               if (ip == 0)
                        return 0;
 
                ret = 0;
index f97bda0..2692139 100644 (file)
@@ -1781,7 +1781,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
                        }
 
 #ifdef CONFIG_NF_CONNTRACK_MARK
-                       ct->mark = exp->master->mark;
+                       ct->mark = READ_ONCE(exp->master->mark);
 #endif
 #ifdef CONFIG_NF_CONNTRACK_SECMARK
                        ct->secmark = exp->master->secmark;
index 7562b21..d71150a 100644 (file)
@@ -328,9 +328,9 @@ nla_put_failure:
 }
 
 #ifdef CONFIG_NF_CONNTRACK_MARK
-static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
+static int ctnetlink_dump_mark(struct sk_buff *skb, u32 mark)
 {
-       if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
+       if (nla_put_be32(skb, CTA_MARK, htonl(mark)))
                goto nla_put_failure;
        return 0;
 
@@ -543,7 +543,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb,
 static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
 {
        if (ctnetlink_dump_status(skb, ct) < 0 ||
-           ctnetlink_dump_mark(skb, ct) < 0 ||
+           ctnetlink_dump_mark(skb, READ_ONCE(ct->mark)) < 0 ||
            ctnetlink_dump_secctx(skb, ct) < 0 ||
            ctnetlink_dump_id(skb, ct) < 0 ||
            ctnetlink_dump_use(skb, ct) < 0 ||
@@ -722,6 +722,7 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
        struct sk_buff *skb;
        unsigned int type;
        unsigned int flags = 0, group;
+       u32 mark;
        int err;
 
        if (events & (1 << IPCT_DESTROY)) {
@@ -826,8 +827,9 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
        }
 
 #ifdef CONFIG_NF_CONNTRACK_MARK
-       if ((events & (1 << IPCT_MARK) || ct->mark)
-           && ctnetlink_dump_mark(skb, ct) < 0)
+       mark = READ_ONCE(ct->mark);
+       if ((events & (1 << IPCT_MARK) || mark) &&
+           ctnetlink_dump_mark(skb, mark) < 0)
                goto nla_put_failure;
 #endif
        nlmsg_end(skb, nlh);
@@ -1154,7 +1156,7 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
        }
 
 #ifdef CONFIG_NF_CONNTRACK_MARK
-       if ((ct->mark & filter->mark.mask) != filter->mark.val)
+       if ((READ_ONCE(ct->mark) & filter->mark.mask) != filter->mark.val)
                goto ignore_entry;
 #endif
        status = (u32)READ_ONCE(ct->status);
@@ -2002,9 +2004,9 @@ static void ctnetlink_change_mark(struct nf_conn *ct,
                mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
 
        mark = ntohl(nla_get_be32(cda[CTA_MARK]));
-       newmark = (ct->mark & mask) ^ mark;
-       if (newmark != ct->mark)
-               ct->mark = newmark;
+       newmark = (READ_ONCE(ct->mark) & mask) ^ mark;
+       if (newmark != READ_ONCE(ct->mark))
+               WRITE_ONCE(ct->mark, newmark);
 }
 #endif
 
@@ -2669,6 +2671,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
 {
        const struct nf_conntrack_zone *zone;
        struct nlattr *nest_parms;
+       u32 mark;
 
        zone = nf_ct_zone(ct);
 
@@ -2730,7 +2733,8 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
                goto nla_put_failure;
 
 #ifdef CONFIG_NF_CONNTRACK_MARK
-       if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
+       mark = READ_ONCE(ct->mark);
+       if (mark && ctnetlink_dump_mark(skb, mark) < 0)
                goto nla_put_failure;
 #endif
        if (ctnetlink_dump_labels(skb, ct) < 0)
index 4ffe84c..bca839a 100644 (file)
@@ -366,7 +366,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
                goto release;
 
 #if defined(CONFIG_NF_CONNTRACK_MARK)
-       seq_printf(s, "mark=%u ", ct->mark);
+       seq_printf(s, "mark=%u ", READ_ONCE(ct->mark));
 #endif
 
        ct_show_secctx(s, ct);
index b04645c..00b5228 100644 (file)
@@ -1098,6 +1098,7 @@ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
        struct flow_block_cb *block_cb, *next;
        int err = 0;
 
+       down_write(&flowtable->flow_block_lock);
        switch (cmd) {
        case FLOW_BLOCK_BIND:
                list_splice(&bo->cb_list, &flowtable->flow_block.cb_list);
@@ -1112,6 +1113,7 @@ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
                WARN_ON_ONCE(1);
                err = -EOPNOTSUPP;
        }
+       up_write(&flowtable->flow_block_lock);
 
        return err;
 }
@@ -1168,7 +1170,9 @@ static int nf_flow_table_offload_cmd(struct flow_block_offload *bo,
 
        nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
                                         extack);
+       down_write(&flowtable->flow_block_lock);
        err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo);
+       up_write(&flowtable->flow_block_lock);
        if (err < 0)
                return err;
 
index e7152d5..7a09421 100644 (file)
@@ -5958,7 +5958,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                                            &timeout);
                if (err)
                        return err;
-       } else if (set->flags & NFT_SET_TIMEOUT) {
+       } else if (set->flags & NFT_SET_TIMEOUT &&
+                  !(flags & NFT_SET_ELEM_INTERVAL_END)) {
                timeout = set->timeout;
        }
 
@@ -6024,7 +6025,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                        err = -EOPNOTSUPP;
                        goto err_set_elem_expr;
                }
-       } else if (set->num_exprs > 0) {
+       } else if (set->num_exprs > 0 &&
+                  !(flags & NFT_SET_ELEM_INTERVAL_END)) {
                err = nft_set_elem_expr_clone(ctx, set, expr_array);
                if (err < 0)
                        goto err_set_elem_expr_clone;
index a3f01f2..641dc21 100644 (file)
@@ -98,7 +98,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
                return;
 #ifdef CONFIG_NF_CONNTRACK_MARK
        case NFT_CT_MARK:
-               *dest = ct->mark;
+               *dest = READ_ONCE(ct->mark);
                return;
 #endif
 #ifdef CONFIG_NF_CONNTRACK_SECMARK
@@ -297,8 +297,8 @@ static void nft_ct_set_eval(const struct nft_expr *expr,
        switch (priv->key) {
 #ifdef CONFIG_NF_CONNTRACK_MARK
        case NFT_CT_MARK:
-               if (ct->mark != value) {
-                       ct->mark = value;
+               if (READ_ONCE(ct->mark) != value) {
+                       WRITE_ONCE(ct->mark, value);
                        nf_conntrack_event_cache(IPCT_MARK, ct);
                }
                break;
index e5ebc08..ad3c033 100644 (file)
@@ -30,6 +30,7 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
        u_int32_t new_targetmark;
        struct nf_conn *ct;
        u_int32_t newmark;
+       u_int32_t oldmark;
 
        ct = nf_ct_get(skb, &ctinfo);
        if (ct == NULL)
@@ -37,14 +38,15 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
 
        switch (info->mode) {
        case XT_CONNMARK_SET:
-               newmark = (ct->mark & ~info->ctmask) ^ info->ctmark;
+               oldmark = READ_ONCE(ct->mark);
+               newmark = (oldmark & ~info->ctmask) ^ info->ctmark;
                if (info->shift_dir == D_SHIFT_RIGHT)
                        newmark >>= info->shift_bits;
                else
                        newmark <<= info->shift_bits;
 
-               if (ct->mark != newmark) {
-                       ct->mark = newmark;
+               if (READ_ONCE(ct->mark) != newmark) {
+                       WRITE_ONCE(ct->mark, newmark);
                        nf_conntrack_event_cache(IPCT_MARK, ct);
                }
                break;
@@ -55,15 +57,15 @@ connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
                else
                        new_targetmark <<= info->shift_bits;
 
-               newmark = (ct->mark & ~info->ctmask) ^
+               newmark = (READ_ONCE(ct->mark) & ~info->ctmask) ^
                          new_targetmark;
-               if (ct->mark != newmark) {
-                       ct->mark = newmark;
+               if (READ_ONCE(ct->mark) != newmark) {
+                       WRITE_ONCE(ct->mark, newmark);
                        nf_conntrack_event_cache(IPCT_MARK, ct);
                }
                break;
        case XT_CONNMARK_RESTORE:
-               new_targetmark = (ct->mark & info->ctmask);
+               new_targetmark = (READ_ONCE(ct->mark) & info->ctmask);
                if (info->shift_dir == D_SHIFT_RIGHT)
                        new_targetmark >>= info->shift_bits;
                else
@@ -126,7 +128,7 @@ connmark_mt(const struct sk_buff *skb, struct xt_action_param *par)
        if (ct == NULL)
                return false;
 
-       return ((ct->mark & info->mask) == info->mark) ^ info->invert;
+       return ((READ_ONCE(ct->mark) & info->mask) == info->mark) ^ info->invert;
 }
 
 static int connmark_mt_check(const struct xt_mtchk_param *par)
index 6a193cc..4ffdf2f 100644 (file)
@@ -542,7 +542,7 @@ static int nci_open_device(struct nci_dev *ndev)
                skb_queue_purge(&ndev->tx_q);
 
                ndev->ops->close(ndev);
-               ndev->flags = 0;
+               ndev->flags &= BIT(NCI_UNREG);
        }
 
 done:
index aa5e712..3d36ea5 100644 (file)
@@ -279,8 +279,10 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
                 nci_plen(skb->data));
 
        conn_info = nci_get_conn_info_by_conn_id(ndev, nci_conn_id(skb->data));
-       if (!conn_info)
+       if (!conn_info) {
+               kfree_skb(skb);
                return;
+       }
 
        /* strip the nci data header */
        skb_pull(skb, NCI_DATA_HDR_SIZE);
index c7b1023..c8eaf42 100644 (file)
@@ -152,7 +152,7 @@ static u8 ovs_ct_get_state(enum ip_conntrack_info ctinfo)
 static u32 ovs_ct_get_mark(const struct nf_conn *ct)
 {
 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
-       return ct ? ct->mark : 0;
+       return ct ? READ_ONCE(ct->mark) : 0;
 #else
        return 0;
 #endif
@@ -340,9 +340,9 @@ static int ovs_ct_set_mark(struct nf_conn *ct, struct sw_flow_key *key,
 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
        u32 new_mark;
 
-       new_mark = ct_mark | (ct->mark & ~(mask));
-       if (ct->mark != new_mark) {
-               ct->mark = new_mark;
+       new_mark = ct_mark | (READ_ONCE(ct->mark) & ~(mask));
+       if (READ_ONCE(ct->mark) != new_mark) {
+               WRITE_ONCE(ct->mark, new_mark);
                if (nf_ct_is_confirmed(ct))
                        nf_conntrack_event_cache(IPCT_MARK, ct);
                key->ct.mark = new_mark;
index 1ad0ec5..8499ceb 100644 (file)
@@ -399,6 +399,7 @@ enum rxrpc_conn_proto_state {
 struct rxrpc_bundle {
        struct rxrpc_conn_parameters params;
        refcount_t              ref;
+       atomic_t                active;         /* Number of active users */
        unsigned int            debug_id;
        bool                    try_upgrade;    /* True if the bundle is attempting upgrade */
        bool                    alloc_conn;     /* True if someone's getting a conn */
index 3c9eeb5..bdb335c 100644 (file)
@@ -40,6 +40,8 @@ __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
 DEFINE_IDR(rxrpc_client_conn_ids);
 static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
 
+static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle);
+
 /*
  * Get a connection ID and epoch for a client connection from the global pool.
  * The connection struct pointer is then recorded in the idr radix tree.  The
@@ -123,6 +125,7 @@ static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp,
                bundle->params = *cp;
                rxrpc_get_peer(bundle->params.peer);
                refcount_set(&bundle->ref, 1);
+               atomic_set(&bundle->active, 1);
                spin_lock_init(&bundle->channel_lock);
                INIT_LIST_HEAD(&bundle->waiting_calls);
        }
@@ -149,7 +152,7 @@ void rxrpc_put_bundle(struct rxrpc_bundle *bundle)
 
        dead = __refcount_dec_and_test(&bundle->ref, &r);
 
-       _debug("PUT B=%x %d", d, r);
+       _debug("PUT B=%x %d", d, r - 1);
        if (dead)
                rxrpc_free_bundle(bundle);
 }
@@ -338,6 +341,7 @@ found_bundle_free:
        rxrpc_free_bundle(candidate);
 found_bundle:
        rxrpc_get_bundle(bundle);
+       atomic_inc(&bundle->active);
        spin_unlock(&local->client_bundles_lock);
        _leave(" = %u [found]", bundle->debug_id);
        return bundle;
@@ -435,6 +439,7 @@ static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp)
                        if (old)
                                trace_rxrpc_client(old, -1, rxrpc_client_replace);
                        candidate->bundle_shift = shift;
+                       atomic_inc(&bundle->active);
                        bundle->conns[i] = candidate;
                        for (j = 0; j < RXRPC_MAXCALLS; j++)
                                set_bit(shift + j, &bundle->avail_chans);
@@ -725,6 +730,7 @@ granted_channel:
        smp_rmb();
 
 out_put_bundle:
+       rxrpc_deactivate_bundle(bundle);
        rxrpc_put_bundle(bundle);
 out:
        _leave(" = %d", ret);
@@ -900,9 +906,8 @@ out:
 static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
 {
        struct rxrpc_bundle *bundle = conn->bundle;
-       struct rxrpc_local *local = bundle->params.local;
        unsigned int bindex;
-       bool need_drop = false, need_put = false;
+       bool need_drop = false;
        int i;
 
        _enter("C=%x", conn->debug_id);
@@ -921,15 +926,22 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
        }
        spin_unlock(&bundle->channel_lock);
 
-       /* If there are no more connections, remove the bundle */
-       if (!bundle->avail_chans) {
-               _debug("maybe unbundle");
-               spin_lock(&local->client_bundles_lock);
+       if (need_drop) {
+               rxrpc_deactivate_bundle(bundle);
+               rxrpc_put_connection(conn);
+       }
+}
 
-               for (i = 0; i < ARRAY_SIZE(bundle->conns); i++)
-                       if (bundle->conns[i])
-                               break;
-               if (i == ARRAY_SIZE(bundle->conns) && !bundle->params.exclusive) {
+/*
+ * Drop the active count on a bundle.
+ */
+static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle)
+{
+       struct rxrpc_local *local = bundle->params.local;
+       bool need_put = false;
+
+       if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) {
+               if (!bundle->params.exclusive) {
                        _debug("erase bundle");
                        rb_erase(&bundle->local_node, &local->client_bundles);
                        need_put = true;
@@ -939,10 +951,6 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
                if (need_put)
                        rxrpc_put_bundle(bundle);
        }
-
-       if (need_drop)
-               rxrpc_put_connection(conn);
-       _leave("");
 }
 
 /*
index 1e8ab47..4662a6c 100644 (file)
@@ -976,7 +976,7 @@ config NET_ACT_TUNNEL_KEY
 
 config NET_ACT_CT
        tristate "connection tracking tc action"
-       depends on NET_CLS_ACT && NF_CONNTRACK && NF_NAT && NF_FLOW_TABLE
+       depends on NET_CLS_ACT && NF_CONNTRACK && (!NF_NAT || NF_NAT) && NF_FLOW_TABLE
        help
          Say Y here to allow sending the packets to conntrack module.
 
index 66b143b..d41002e 100644 (file)
@@ -61,7 +61,7 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a,
 
        c = nf_ct_get(skb, &ctinfo);
        if (c) {
-               skb->mark = c->mark;
+               skb->mark = READ_ONCE(c->mark);
                /* using overlimits stats to count how many packets marked */
                ca->tcf_qstats.overlimits++;
                goto out;
@@ -81,7 +81,7 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a,
        c = nf_ct_tuplehash_to_ctrack(thash);
        /* using overlimits stats to count how many packets marked */
        ca->tcf_qstats.overlimits++;
-       skb->mark = c->mark;
+       skb->mark = READ_ONCE(c->mark);
        nf_ct_put(c);
 
 out:
index b38d91d..4c7f786 100644 (file)
@@ -178,7 +178,7 @@ static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
        entry = tcf_ct_flow_table_flow_action_get_next(action);
        entry->id = FLOW_ACTION_CT_METADATA;
 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
-       entry->ct_metadata.mark = ct->mark;
+       entry->ct_metadata.mark = READ_ONCE(ct->mark);
 #endif
        ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
                                             IP_CT_ESTABLISHED_REPLY;
@@ -936,9 +936,9 @@ static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
        if (!mask)
                return;
 
-       new_mark = mark | (ct->mark & ~(mask));
-       if (ct->mark != new_mark) {
-               ct->mark = new_mark;
+       new_mark = mark | (READ_ONCE(ct->mark) & ~(mask));
+       if (READ_ONCE(ct->mark) != new_mark) {
+               WRITE_ONCE(ct->mark, new_mark);
                if (nf_ct_is_confirmed(ct))
                        nf_conntrack_event_cache(IPCT_MARK, ct);
        }
index d4102f0..eaa02f0 100644 (file)
@@ -32,7 +32,7 @@ static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
 {
        u8 dscp, newdscp;
 
-       newdscp = (((ct->mark & cp->dscpmask) >> cp->dscpmaskshift) << 2) &
+       newdscp = (((READ_ONCE(ct->mark) & cp->dscpmask) >> cp->dscpmaskshift) << 2) &
                     ~INET_ECN_MASK;
 
        switch (proto) {
@@ -72,7 +72,7 @@ static void tcf_ctinfo_cpmark_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
                                  struct sk_buff *skb)
 {
        ca->stats_cpmark_set++;
-       skb->mark = ct->mark & cp->cpmarkmask;
+       skb->mark = READ_ONCE(ct->mark) & cp->cpmarkmask;
 }
 
 static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a,
@@ -130,7 +130,7 @@ static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a,
        }
 
        if (cp->mode & CTINFO_MODE_DSCP)
-               if (!cp->dscpstatemask || (ct->mark & cp->dscpstatemask))
+               if (!cp->dscpstatemask || (READ_ONCE(ct->mark) & cp->dscpstatemask))
                        tcf_ctinfo_dscp_set(ct, ca, cp, skb, wlen, proto);
 
        if (cp->mode & CTINFO_MODE_CPMARK)
index e863070..e8dcdf2 100644 (file)
@@ -211,7 +211,10 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
        u32 self;
        int err;
 
-       skb_linearize(skb);
+       if (skb_linearize(skb)) {
+               kfree_skb(skb);
+               return;
+       }
        hdr = buf_msg(skb);
 
        if (caps & TIPC_NODE_ID128)
index d92ec92..e3b427a 100644 (file)
@@ -176,7 +176,7 @@ static void tipc_conn_close(struct tipc_conn *con)
        conn_put(con);
 }
 
-static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s)
+static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s, struct socket *sock)
 {
        struct tipc_conn *con;
        int ret;
@@ -202,10 +202,12 @@ static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s)
        }
        con->conid = ret;
        s->idr_in_use++;
-       spin_unlock_bh(&s->idr_lock);
 
        set_bit(CF_CONNECTED, &con->flags);
        con->server = s;
+       con->sock = sock;
+       conn_get(con);
+       spin_unlock_bh(&s->idr_lock);
 
        return con;
 }
@@ -467,7 +469,7 @@ static void tipc_topsrv_accept(struct work_struct *work)
                ret = kernel_accept(lsock, &newsock, O_NONBLOCK);
                if (ret < 0)
                        return;
-               con = tipc_conn_alloc(srv);
+               con = tipc_conn_alloc(srv, newsock);
                if (IS_ERR(con)) {
                        ret = PTR_ERR(con);
                        sock_release(newsock);
@@ -479,11 +481,11 @@ static void tipc_topsrv_accept(struct work_struct *work)
                newsk->sk_data_ready = tipc_conn_data_ready;
                newsk->sk_write_space = tipc_conn_write_space;
                newsk->sk_user_data = con;
-               con->sock = newsock;
                write_unlock_bh(&newsk->sk_callback_lock);
 
                /* Wake up receive process in case of 'SYN+' message */
                newsk->sk_data_ready(newsk);
+               conn_put(con);
        }
 }
 
@@ -577,17 +579,17 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
        sub.filter = filter;
        *(u64 *)&sub.usr_handle = (u64)port;
 
-       con = tipc_conn_alloc(tipc_topsrv(net));
+       con = tipc_conn_alloc(tipc_topsrv(net), NULL);
        if (IS_ERR(con))
                return false;
 
        *conid = con->conid;
-       con->sock = NULL;
        rc = tipc_conn_rcv_sub(tipc_topsrv(net), con, &sub);
-       if (rc >= 0)
-               return true;
+       if (rc)
+               conn_put(con);
+
        conn_put(con);
-       return false;
+       return !rc;
 }
 
 void tipc_topsrv_kern_unsubscr(struct net *net, int conid)
index cdb391a..7fbb1d0 100644 (file)
@@ -346,7 +346,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
                salt = tls_ctx->crypto_send.aes_gcm_256.salt;
                break;
        default:
-               return NULL;
+               goto free_req;
        }
        cipher_sz = &tls_cipher_size_desc[tls_ctx->crypto_send.info.cipher_type];
        buf_len = cipher_sz->salt + cipher_sz->iv + TLS_AAD_SPACE_SIZE +
@@ -492,7 +492,8 @@ int tls_sw_fallback_init(struct sock *sk,
                key = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->key;
                break;
        default:
-               return -EINVAL;
+               rc = -EINVAL;
+               goto free_aead;
        }
        cipher_sz = &tls_cipher_size_desc[crypto_info->cipher_type];
 
index 5259ef8..748d863 100644 (file)
@@ -117,7 +117,7 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
 
        if (!pskb_may_pull(skb, 1)) {
                x25_neigh_put(nb);
-               return 0;
+               goto drop;
        }
 
        switch (skb->data[0]) {
index 5f5aafd..21269e8 100644 (file)
@@ -97,6 +97,18 @@ static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
        }
 }
 
+static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb)
+{
+       struct xfrm_offload *xo = xfrm_offload(skb);
+       __u32 seq = xo->seq.low;
+
+       seq += skb_shinfo(skb)->gso_segs;
+       if (unlikely(seq < xo->seq.low))
+               return true;
+
+       return false;
+}
+
 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
 {
        int err;
@@ -134,7 +146,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
                return skb;
        }
 
-       if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) {
+       if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) ||
+                               unlikely(xmit_xfrm_check_overflow(skb)))) {
                struct sk_buff *segs;
 
                /* Packet got rerouted, fixup features and segment it. */
index 9f4d42e..ce56d65 100644 (file)
@@ -714,7 +714,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
                        oseq += skb_shinfo(skb)->gso_segs;
                }
 
-               if (unlikely(oseq < replay_esn->oseq)) {
+               if (unlikely(xo->seq.low < replay_esn->oseq)) {
                        XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi;
                        xo->seq.hi = oseq_hi;
                        replay_esn->oseq_hi = oseq_hi;
index 8bbcced..2a90139 100644 (file)
@@ -30,8 +30,8 @@ KBUILD_PKG_ROOTCMD ?="fakeroot -u"
 export KDEB_SOURCENAME
 # Include only those top-level files that are needed by make, plus the GPL copy
 TAR_CONTENT := Documentation LICENSES arch block certs crypto drivers fs \
-               include init io_uring ipc kernel lib mm net samples scripts \
-               security sound tools usr virt \
+               include init io_uring ipc kernel lib mm net rust \
+               samples scripts security sound tools usr virt \
                .config .scmversion Makefile \
                Kbuild Kconfig COPYING $(wildcard localversion*)
 MKSPEC     := $(srctree)/scripts/package/mkspec
index 60a2a63..a3ac5a7 100755 (executable)
@@ -90,7 +90,7 @@ if [ -n "$KDEB_PKGVERSION" ]; then
        packageversion=$KDEB_PKGVERSION
        revision=${packageversion##*-}
 else
-       revision=$(cat .version 2>/dev/null||echo 1)
+       revision=$($srctree/init/build-version)
        packageversion=$version-$revision
 fi
 sourcename=$KDEB_SOURCENAME
index b7aee23..47ef6bc 100644 (file)
@@ -113,15 +113,19 @@ EXPORT_SYMBOL(snd_seq_dump_var_event);
  * expand the variable length event to linear buffer space.
  */
 
-static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
+static int seq_copy_in_kernel(void *ptr, void *src, int size)
 {
+       char **bufptr = ptr;
+
        memcpy(*bufptr, src, size);
        *bufptr += size;
        return 0;
 }
 
-static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
+static int seq_copy_in_user(void *ptr, void *src, int size)
 {
+       char __user **bufptr = ptr;
+
        if (copy_to_user(*bufptr, src, size))
                return -EFAULT;
        *bufptr += size;
@@ -151,8 +155,7 @@ int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char
                return newlen;
        }
        err = snd_seq_dump_var_event(event,
-                                    in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
-                                    (snd_seq_dump_func_t)seq_copy_in_user,
+                                    in_kernel ? seq_copy_in_kernel : seq_copy_in_user,
                                     &buf);
        return err < 0 ? err : newlen;
 }
index b9eb320..ae31bb1 100644 (file)
@@ -321,6 +321,11 @@ static const struct config_entry config_table[] = {
                }
        },
        {
+               .flags = FLAG_SOF,
+               .device = 0x34c8,
+               .codec_hid =  &essx_83x6,
+       },
+       {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x34c8,
        },
index e18499d..e5c0363 100644 (file)
@@ -9436,6 +9436,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_AMP),
        SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_AMP),
        SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_AMP),
+       SND_PCI_QUIRK(0x144d, 0xc1a3, "Samsung Galaxy Book Pro (NP935XDB-KC1SE)", ALC298_FIXUP_SAMSUNG_AMP),
+       SND_PCI_QUIRK(0x144d, 0xc1a6, "Samsung Galaxy Book Pro 360 (NP930QBD)", ALC298_FIXUP_SAMSUNG_AMP),
        SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
        SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_AMP),
        SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP),
index 6c0f1de..d9715be 100644 (file)
@@ -206,6 +206,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "UM5302TA"),
                }
        },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Alienware"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17 R5 AMD"),
+               }
+       },
        {}
 };
 
index fc19c34..b655609 100644 (file)
@@ -14,7 +14,7 @@ enum {
        HDAC_HDMI_1_DAI_ID,
        HDAC_HDMI_2_DAI_ID,
        HDAC_HDMI_3_DAI_ID,
-       HDAC_LAST_DAI_ID = HDAC_HDMI_3_DAI_ID,
+       HDAC_DAI_ID_NUM
 };
 
 struct hdac_hda_pcm {
@@ -24,7 +24,7 @@ struct hdac_hda_pcm {
 
 struct hdac_hda_priv {
        struct hda_codec *codec;
-       struct hdac_hda_pcm pcm[HDAC_LAST_DAI_ID];
+       struct hdac_hda_pcm pcm[HDAC_DAI_ID_NUM];
        bool need_display_power;
 };
 
index 3e04c7f..ec0905d 100644 (file)
@@ -549,6 +549,10 @@ static int max98373_i2c_probe(struct i2c_client *i2c)
        max98373->cache = devm_kcalloc(&i2c->dev, max98373->cache_num,
                                       sizeof(*max98373->cache),
                                       GFP_KERNEL);
+       if (!max98373->cache) {
+               ret = -ENOMEM;
+               return ret;
+       }
 
        for (i = 0; i < max98373->cache_num; i++)
                max98373->cache[i].reg = max98373_i2c_cache_reg[i];
index 1a25a37..362663a 100644 (file)
@@ -298,13 +298,14 @@ static int rt5514_spi_pcm_new(struct snd_soc_component *component,
 }
 
 static const struct snd_soc_component_driver rt5514_spi_component = {
-       .name           = DRV_NAME,
-       .probe          = rt5514_spi_pcm_probe,
-       .open           = rt5514_spi_pcm_open,
-       .hw_params      = rt5514_spi_hw_params,
-       .hw_free        = rt5514_spi_hw_free,
-       .pointer        = rt5514_spi_pcm_pointer,
-       .pcm_construct  = rt5514_spi_pcm_new,
+       .name                   = DRV_NAME,
+       .probe                  = rt5514_spi_pcm_probe,
+       .open                   = rt5514_spi_pcm_open,
+       .hw_params              = rt5514_spi_hw_params,
+       .hw_free                = rt5514_spi_hw_free,
+       .pointer                = rt5514_spi_pcm_pointer,
+       .pcm_construct          = rt5514_spi_pcm_new,
+       .legacy_dai_naming      = 1,
 };
 
 /**
index 8f3993a..d25703d 100644 (file)
@@ -396,15 +396,16 @@ static int rt5677_spi_pcm_probe(struct snd_soc_component *component)
 }
 
 static const struct snd_soc_component_driver rt5677_spi_dai_component = {
-       .name           = DRV_NAME,
-       .probe          = rt5677_spi_pcm_probe,
-       .open           = rt5677_spi_pcm_open,
-       .close          = rt5677_spi_pcm_close,
-       .hw_params      = rt5677_spi_hw_params,
-       .hw_free        = rt5677_spi_hw_free,
-       .prepare        = rt5677_spi_prepare,
-       .pointer        = rt5677_spi_pcm_pointer,
-       .pcm_construct  = rt5677_spi_pcm_new,
+       .name                   = DRV_NAME,
+       .probe                  = rt5677_spi_pcm_probe,
+       .open                   = rt5677_spi_pcm_open,
+       .close                  = rt5677_spi_pcm_close,
+       .hw_params              = rt5677_spi_hw_params,
+       .hw_free                = rt5677_spi_hw_free,
+       .prepare                = rt5677_spi_prepare,
+       .pointer                = rt5677_spi_pcm_pointer,
+       .pcm_construct          = rt5677_spi_pcm_new,
+       .legacy_dai_naming      = 1,
 };
 
 /* Select a suitable transfer command for the next transfer to ensure
index 4120842..88a8392 100644 (file)
@@ -230,7 +230,7 @@ static int rt711_sdca_read_prop(struct sdw_slave *slave)
        }
 
        /* set the timeout values */
-       prop->clk_stop_timeout = 20;
+       prop->clk_stop_timeout = 700;
 
        /* wake-up event */
        prop->wake_capable = 1;
index 4b2135e..a916f46 100644 (file)
@@ -1794,6 +1794,7 @@ static void sgtl5000_i2c_remove(struct i2c_client *client)
 {
        struct sgtl5000_priv *sgtl5000 = i2c_get_clientdata(client);
 
+       regmap_write(sgtl5000->regmap, SGTL5000_CHIP_CLK_CTRL, SGTL5000_CHIP_CLK_CTRL_DEFAULT);
        regmap_write(sgtl5000->regmap, SGTL5000_CHIP_DIG_POWER, SGTL5000_DIG_POWER_DEFAULT);
        regmap_write(sgtl5000->regmap, SGTL5000_CHIP_ANA_POWER, SGTL5000_ANA_POWER_DEFAULT);
 
index 51b87a9..2e0ed3e 100644 (file)
@@ -438,20 +438,13 @@ static int tas2764_set_dai_tdm_slot(struct snd_soc_dai *dai,
        if (tx_mask == 0 || rx_mask != 0)
                return -EINVAL;
 
-       if (slots == 1) {
-               if (tx_mask != 1)
-                       return -EINVAL;
-               left_slot = 0;
-               right_slot = 0;
+       left_slot = __ffs(tx_mask);
+       tx_mask &= ~(1 << left_slot);
+       if (tx_mask == 0) {
+               right_slot = left_slot;
        } else {
-               left_slot = __ffs(tx_mask);
-               tx_mask &= ~(1 << left_slot);
-               if (tx_mask == 0) {
-                       right_slot = left_slot;
-               } else {
-                       right_slot = __ffs(tx_mask);
-                       tx_mask &= ~(1 << right_slot);
-               }
+               right_slot = __ffs(tx_mask);
+               tx_mask &= ~(1 << right_slot);
        }
 
        if (tx_mask != 0 || left_slot >= slots || right_slot >= slots)
index b676523..8557759 100644 (file)
@@ -395,21 +395,13 @@ static int tas2770_set_dai_tdm_slot(struct snd_soc_dai *dai,
        if (tx_mask == 0 || rx_mask != 0)
                return -EINVAL;
 
-       if (slots == 1) {
-               if (tx_mask != 1)
-                       return -EINVAL;
-
-               left_slot = 0;
-               right_slot = 0;
+       left_slot = __ffs(tx_mask);
+       tx_mask &= ~(1 << left_slot);
+       if (tx_mask == 0) {
+               right_slot = left_slot;
        } else {
-               left_slot = __ffs(tx_mask);
-               tx_mask &= ~(1 << left_slot);
-               if (tx_mask == 0) {
-                       right_slot = left_slot;
-               } else {
-                       right_slot = __ffs(tx_mask);
-                       tx_mask &= ~(1 << right_slot);
-               }
+               right_slot = __ffs(tx_mask);
+               tx_mask &= ~(1 << right_slot);
        }
 
        if (tx_mask != 0 || left_slot >= slots || right_slot >= slots)
index a6db6f0..afdf0c8 100644 (file)
@@ -380,20 +380,13 @@ static int tas2780_set_dai_tdm_slot(struct snd_soc_dai *dai,
        if (tx_mask == 0 || rx_mask != 0)
                return -EINVAL;
 
-       if (slots == 1) {
-               if (tx_mask != 1)
-                       return -EINVAL;
-               left_slot = 0;
-               right_slot = 0;
+       left_slot = __ffs(tx_mask);
+       tx_mask &= ~(1 << left_slot);
+       if (tx_mask == 0) {
+               right_slot = left_slot;
        } else {
-               left_slot = __ffs(tx_mask);
-               tx_mask &= ~(1 << left_slot);
-               if (tx_mask == 0) {
-                       right_slot = left_slot;
-               } else {
-                       right_slot = __ffs(tx_mask);
-                       tx_mask &= ~(1 << right_slot);
-               }
+               right_slot = __ffs(tx_mask);
+               tx_mask &= ~(1 << right_slot);
        }
 
        if (tx_mask != 0 || left_slot >= slots || right_slot >= slots)
index b4b4355..b901e4c 100644 (file)
@@ -2503,6 +2503,14 @@ static void wm8962_configure_bclk(struct snd_soc_component *component)
                snd_soc_component_update_bits(component, WM8962_CLOCKING2,
                                WM8962_SYSCLK_ENA_MASK, WM8962_SYSCLK_ENA);
 
+       /* DSPCLK_DIV field in WM8962_CLOCKING1 register is used to generate
+        * correct frequency of LRCLK and BCLK. Sometimes the read-only value
+        * can't be updated timely after enabling SYSCLK. This results in wrong
+        * calculation values. Delay is introduced here to wait for newest
+        * value from register. The time of the delay should be at least
+        * 500~1000us according to test.
+        */
+       usleep_range(500, 1000);
        dspclk = snd_soc_component_read(component, WM8962_CLOCKING1);
 
        if (snd_soc_component_get_bias_level(component) != SND_SOC_BIAS_ON)
index 936aef5..e16e7b3 100644 (file)
@@ -1232,7 +1232,7 @@ static int fsl_asrc_probe(struct platform_device *pdev)
        }
 
        ret = pm_runtime_put_sync(&pdev->dev);
-       if (ret < 0)
+       if (ret < 0 && ret != -ENOSYS)
                goto err_pm_get_sync;
 
        ret = devm_snd_soc_register_component(&pdev->dev, &fsl_asrc_component,
index 5c21fc4..17fefd2 100644 (file)
@@ -1069,7 +1069,7 @@ static int fsl_esai_probe(struct platform_device *pdev)
        regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0);
 
        ret = pm_runtime_put_sync(&pdev->dev);
-       if (ret < 0)
+       if (ret < 0 && ret != -ENOSYS)
                goto err_pm_get_sync;
 
        /*
index 81f89f6..e60c7b3 100644 (file)
@@ -1446,7 +1446,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
        }
 
        ret = pm_runtime_put_sync(dev);
-       if (ret < 0)
+       if (ret < 0 && ret != -ENOSYS)
                goto err_pm_get_sync;
 
        /*
index 6432b83..a935c5f 100644 (file)
@@ -443,6 +443,13 @@ static const struct dmi_system_id byt_cht_es8316_quirk_table[] = {
                                        | BYT_CHT_ES8316_INTMIC_IN2_MAP
                                        | BYT_CHT_ES8316_JD_INVERTED),
        },
+       {       /* Nanote UMPC-01 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "RWC CO.,LTD"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "UMPC-01"),
+               },
+               .driver_data = (void *)BYT_CHT_ES8316_INTMIC_IN1_MAP,
+       },
        {       /* Teclast X98 Plus II */
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
index fbb42e5..70713e4 100644 (file)
@@ -63,6 +63,7 @@ struct sof_es8336_private {
        struct snd_soc_jack jack;
        struct list_head hdmi_pcm_list;
        bool speaker_en;
+       struct delayed_work pcm_pop_work;
 };
 
 struct sof_hdmi_pcm {
@@ -111,6 +112,46 @@ static void log_quirks(struct device *dev)
                dev_info(dev, "quirk headset at mic1 port enabled\n");
 }
 
+static void pcm_pop_work_events(struct work_struct *work)
+{
+       struct sof_es8336_private *priv =
+               container_of(work, struct sof_es8336_private, pcm_pop_work.work);
+
+       gpiod_set_value_cansleep(priv->gpio_speakers, priv->speaker_en);
+
+       if (quirk & SOF_ES8336_HEADPHONE_GPIO)
+               gpiod_set_value_cansleep(priv->gpio_headphone, priv->speaker_en);
+
+}
+
+static int sof_8336_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+       struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+       struct snd_soc_card *card = rtd->card;
+       struct sof_es8336_private *priv = snd_soc_card_get_drvdata(card);
+
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+       case SNDRV_PCM_TRIGGER_RESUME:
+               break;
+
+       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+       case SNDRV_PCM_TRIGGER_SUSPEND:
+       case SNDRV_PCM_TRIGGER_STOP:
+               if (priv->speaker_en == false)
+                       if (substream->stream == 0) {
+                               cancel_delayed_work(&priv->pcm_pop_work);
+                               gpiod_set_value_cansleep(priv->gpio_speakers, true);
+                       }
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int sof_es8316_speaker_power_event(struct snd_soc_dapm_widget *w,
                                          struct snd_kcontrol *kcontrol, int event)
 {
@@ -122,19 +163,7 @@ static int sof_es8316_speaker_power_event(struct snd_soc_dapm_widget *w,
 
        priv->speaker_en = !SND_SOC_DAPM_EVENT_ON(event);
 
-       if (SND_SOC_DAPM_EVENT_ON(event))
-               msleep(70);
-
-       gpiod_set_value_cansleep(priv->gpio_speakers, priv->speaker_en);
-
-       if (!(quirk & SOF_ES8336_HEADPHONE_GPIO))
-               return 0;
-
-       if (SND_SOC_DAPM_EVENT_ON(event))
-               msleep(70);
-
-       gpiod_set_value_cansleep(priv->gpio_headphone, priv->speaker_en);
-
+       queue_delayed_work(system_wq, &priv->pcm_pop_work, msecs_to_jiffies(70));
        return 0;
 }
 
@@ -344,6 +373,7 @@ static int sof_es8336_hw_params(struct snd_pcm_substream *substream,
 /* machine stream operations */
 static struct snd_soc_ops sof_es8336_ops = {
        .hw_params = sof_es8336_hw_params,
+       .trigger = sof_8336_trigger,
 };
 
 static struct snd_soc_dai_link_component platform_component[] = {
@@ -723,7 +753,8 @@ static int sof_es8336_probe(struct platform_device *pdev)
        }
 
        INIT_LIST_HEAD(&priv->hdmi_pcm_list);
-
+       INIT_DELAYED_WORK(&priv->pcm_pop_work,
+                               pcm_pop_work_events);
        snd_soc_card_set_drvdata(card, priv);
 
        if (mach->mach_params.dmic_num > 0) {
@@ -752,6 +783,7 @@ static int sof_es8336_remove(struct platform_device *pdev)
        struct snd_soc_card *card = platform_get_drvdata(pdev);
        struct sof_es8336_private *priv = snd_soc_card_get_drvdata(card);
 
+       cancel_delayed_work(&priv->pcm_pop_work);
        gpiod_put(priv->gpio_speakers);
        device_remove_software_node(priv->codec_dev);
        put_device(priv->codec_dev);
index b032bc0..d0062f2 100644 (file)
 #include <sound/soc-acpi-intel-match.h>
 #include "../skylake/skl.h"
 
+static const struct snd_soc_acpi_codecs essx_83x6 = {
+       .num_codecs = 3,
+       .codecs = { "ESSX8316", "ESSX8326", "ESSX8336"},
+};
+
 static struct skl_machine_pdata icl_pdata = {
        .use_tplg_pcm = true,
 };
@@ -27,6 +32,14 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[] = {
                .drv_name = "sof_rt5682",
                .sof_tplg_filename = "sof-icl-rt5682.tplg",
        },
+       {
+               .comp_ids = &essx_83x6,
+               .drv_name = "sof-essx8336",
+               .sof_tplg_filename = "sof-icl-es8336", /* the tplg suffix is added at run time */
+               .tplg_quirk_mask = SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER |
+                                       SND_SOC_ACPI_TPLG_INTEL_SSP_MSB |
+                                       SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER,
+       },
        {},
 };
 EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_icl_machines);
index 12a82f5..a409fbe 100644 (file)
@@ -3477,10 +3477,23 @@ EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_link_cpus);
 
 static int __init snd_soc_init(void)
 {
+       int ret;
+
        snd_soc_debugfs_init();
-       snd_soc_util_init();
+       ret = snd_soc_util_init();
+       if (ret)
+               goto err_util_init;
 
-       return platform_driver_register(&soc_driver);
+       ret = platform_driver_register(&soc_driver);
+       if (ret)
+               goto err_register;
+       return 0;
+
+err_register:
+       snd_soc_util_exit();
+err_util_init:
+       snd_soc_debugfs_exit();
+       return ret;
 }
 module_init(snd_soc_init);
 
index d515e7a..879cf1b 100644 (file)
@@ -3645,7 +3645,7 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
 
        switch (w->id) {
        case snd_soc_dapm_regulator_supply:
-               w->regulator = devm_regulator_get(dapm->dev, w->name);
+               w->regulator = devm_regulator_get(dapm->dev, widget->name);
                if (IS_ERR(w->regulator)) {
                        ret = PTR_ERR(w->regulator);
                        goto request_failed;
index fb87d6d..35a16c3 100644 (file)
@@ -822,11 +822,6 @@ static int __soc_pcm_open(struct snd_soc_pcm_runtime *rtd,
                ret = snd_soc_dai_startup(dai, substream);
                if (ret < 0)
                        goto err;
-
-               if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-                       dai->tx_mask = 0;
-               else
-                       dai->rx_mask = 0;
        }
 
        /* Dynamic PCM DAI links compat checks use dynamic capabilities */
@@ -1252,6 +1247,8 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
                return;
 
        be_substream = snd_soc_dpcm_get_substream(be, stream);
+       if (!be_substream)
+               return;
 
        for_each_dpcm_fe(be, stream, dpcm) {
                if (dpcm->fe == fe)
index a3b6df2..a4dba0b 100644 (file)
@@ -264,7 +264,7 @@ int __init snd_soc_util_init(void)
        return ret;
 }
 
-void __exit snd_soc_util_exit(void)
+void snd_soc_util_exit(void)
 {
        platform_driver_unregister(&soc_dummy_driver);
        platform_device_unregister(soc_dummy_dev);
index c148715..0720e1e 100644 (file)
@@ -2275,6 +2275,7 @@ static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verif
        struct sof_ipc_fw_version *v = &sdev->fw_ready.version;
        struct snd_sof_widget *swidget;
        struct snd_sof_route *sroute;
+       bool dyn_widgets = false;
        int ret;
 
        /*
@@ -2284,12 +2285,14 @@ static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verif
         * topology loading the sound card unavailable to open PCMs.
         */
        list_for_each_entry(swidget, &sdev->widget_list, list) {
-               if (swidget->dynamic_pipeline_widget)
+               if (swidget->dynamic_pipeline_widget) {
+                       dyn_widgets = true;
                        continue;
+               }
 
-               /* Do not free widgets for static pipelines with FW ABI older than 3.19 */
+               /* Do not free widgets for static pipelines with FW older than SOF2.2 */
                if (!verify && !swidget->dynamic_pipeline_widget &&
-                   v->abi_version < SOF_ABI_VER(3, 19, 0)) {
+                   SOF_FW_VER(v->major, v->minor, v->micro) < SOF_FW_VER(2, 2, 0)) {
                        swidget->use_count = 0;
                        swidget->complete = 0;
                        continue;
@@ -2303,9 +2306,11 @@ static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verif
        /*
         * Tear down all pipelines associated with PCMs that did not get suspended
         * and unset the prepare flag so that they can be set up again during resume.
-        * Skip this step for older firmware.
+        * Skip this step for older firmware unless topology has any
+        * dynamic pipeline (in which case the step is mandatory).
         */
-       if (!verify && v->abi_version >= SOF_ABI_VER(3, 19, 0)) {
+       if (!verify && (dyn_widgets || SOF_FW_VER(v->major, v->minor, v->micro) >=
+           SOF_FW_VER(2, 2, 0))) {
                ret = sof_tear_down_left_over_pipelines(sdev);
                if (ret < 0) {
                        dev_err(sdev->dev, "failed to tear down paused pipelines\n");
index 38855dd..6a0e7f3 100644 (file)
@@ -1344,16 +1344,6 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index,
                break;
        }
 
-       if (sof_debug_check_flag(SOF_DBG_DISABLE_MULTICORE)) {
-               swidget->core = SOF_DSP_PRIMARY_CORE;
-       } else {
-               int core = sof_get_token_value(SOF_TKN_COMP_CORE_ID, swidget->tuples,
-                                              swidget->num_tuples);
-
-               if (core >= 0)
-                       swidget->core = core;
-       }
-
        /* check token parsing reply */
        if (ret < 0) {
                dev_err(scomp->dev,
@@ -1365,6 +1355,16 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index,
                return ret;
        }
 
+       if (sof_debug_check_flag(SOF_DBG_DISABLE_MULTICORE)) {
+               swidget->core = SOF_DSP_PRIMARY_CORE;
+       } else {
+               int core = sof_get_token_value(SOF_TKN_COMP_CORE_ID, swidget->tuples,
+                                              swidget->num_tuples);
+
+               if (core >= 0)
+                       swidget->core = core;
+       }
+
        /* bind widget to external event */
        if (tw->event_type) {
                if (widget_ops[w->id].bind_event) {
index 643fc8a..837c184 100644 (file)
@@ -304,6 +304,11 @@ static int stm32_adfsdm_dummy_cb(const void *data, void *private)
        return 0;
 }
 
+static void stm32_adfsdm_cleanup(void *data)
+{
+       iio_channel_release_all_cb(data);
+}
+
 static struct snd_soc_component_driver stm32_adfsdm_soc_platform = {
        .open           = stm32_adfsdm_pcm_open,
        .close          = stm32_adfsdm_pcm_close,
@@ -350,6 +355,12 @@ static int stm32_adfsdm_probe(struct platform_device *pdev)
        if (IS_ERR(priv->iio_cb))
                return PTR_ERR(priv->iio_cb);
 
+       ret = devm_add_action_or_reset(&pdev->dev, stm32_adfsdm_cleanup, priv->iio_cb);
+       if (ret < 0)  {
+               dev_err(&pdev->dev, "Unable to add action\n");
+               return ret;
+       }
+
        component = devm_kzalloc(&pdev->dev, sizeof(*component), GFP_KERNEL);
        if (!component)
                return -ENOMEM;
index ce7f694..f3dd9f8 100644 (file)
@@ -1077,7 +1077,7 @@ static int stm32_i2s_parse_dt(struct platform_device *pdev,
        if (irq < 0)
                return irq;
 
-       ret = devm_request_irq(&pdev->dev, irq, stm32_i2s_isr, IRQF_ONESHOT,
+       ret = devm_request_irq(&pdev->dev, irq, stm32_i2s_isr, 0,
                               dev_name(&pdev->dev), i2s);
        if (ret) {
                dev_err(&pdev->dev, "irq request returned %d\n", ret);
index bbff092..2839f6b 100644 (file)
@@ -1133,10 +1133,8 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
                                        port = &umidi->endpoints[i].out->ports[j];
                                        break;
                                }
-       if (!port) {
-               snd_BUG();
+       if (!port)
                return -ENXIO;
-       }
 
        substream->runtime->private_data = port;
        port->state = STATE_UNKNOWN;
index 10ac527..f17ade0 100644 (file)
 #define MSR_AMD64_CPUID_FN_1           0xc0011004
 #define MSR_AMD64_LS_CFG               0xc0011020
 #define MSR_AMD64_DC_CFG               0xc0011022
+
+#define MSR_AMD64_DE_CFG               0xc0011029
+#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT  1
+#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE      BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
+
 #define MSR_AMD64_BU_CFG2              0xc001102a
 #define MSR_AMD64_IBSFETCHCTL          0xc0011030
 #define MSR_AMD64_IBSFETCHLINAD                0xc0011031
 #define FAM10H_MMIO_CONF_BASE_MASK     0xfffffffULL
 #define FAM10H_MMIO_CONF_BASE_SHIFT    20
 #define MSR_FAM10H_NODE_ID             0xc001100c
-#define MSR_F10H_DECFG                 0xc0011029
-#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT    1
-#define MSR_F10H_DECFG_LFENCE_SERIALIZE                BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
 
 /* K8 MSRs */
 #define MSR_K8_TOP_MEM1                        0xc001001a
index 2491c54..f8deae4 100644 (file)
@@ -715,12 +715,12 @@ int main(int argc, char **argv)
                                continue;
                        }
 
-                       toread = buf_len;
                } else {
                        usleep(timedelay);
-                       toread = 64;
                }
 
+               toread = buf_len;
+
                read_size = read(buf_fd, data, toread * scan_size);
                if (read_size < 0) {
                        if (errno == EAGAIN) {
index dd324b4..4d7056f 100644 (file)
@@ -63,6 +63,13 @@ void test_varlen(void)
        CHECK_VAL(data->total4, size1 + size2);
        CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check",
              "doesn't match!\n");
+
+       CHECK_VAL(bss->ret_bad_read, -EFAULT);
+       CHECK_VAL(data->payload_bad[0], 0x42);
+       CHECK_VAL(data->payload_bad[1], 0x42);
+       CHECK_VAL(data->payload_bad[2], 0);
+       CHECK_VAL(data->payload_bad[3], 0x42);
+       CHECK_VAL(data->payload_bad[4], 0x42);
 cleanup:
        test_varlen__destroy(skel);
 }
index 3987ff1..20eb7d4 100644 (file)
@@ -19,6 +19,7 @@ __u64 payload1_len1 = 0;
 __u64 payload1_len2 = 0;
 __u64 total1 = 0;
 char payload1[MAX_LEN + MAX_LEN] = {};
+__u64 ret_bad_read = 0;
 
 /* .data */
 int payload2_len1 = -1;
@@ -36,6 +37,8 @@ int payload4_len2 = -1;
 int total4= -1;
 char payload4[MAX_LEN + MAX_LEN] = { 1 };
 
+char payload_bad[5] = { 0x42, 0x42, 0x42, 0x42, 0x42 };
+
 SEC("raw_tp/sys_enter")
 int handler64_unsigned(void *regs)
 {
@@ -61,6 +64,8 @@ int handler64_unsigned(void *regs)
 
        total1 = payload - (void *)payload1;
 
+       ret_bad_read = bpf_probe_read_kernel_str(payload_bad + 2, 1, (void *) -1);
+
        return 0;
 }
 
index 0e9a47f..3fef451 100644 (file)
@@ -1010,7 +1010,7 @@ static inline const char *str_msg(const struct msg *msg, char *buf)
                        msg->subtest_done.have_log);
                break;
        case MSG_TEST_LOG:
-               sprintf(buf, "MSG_TEST_LOG (cnt: %ld, last: %d)",
+               sprintf(buf, "MSG_TEST_LOG (cnt: %zu, last: %d)",
                        strlen(msg->test_log.log_buf),
                        msg->test_log.is_last);
                break;
index 2dbcbf3..b605a70 100644 (file)
@@ -1260,7 +1260,7 @@ static int get_xlated_program(int fd_prog, struct bpf_insn **buf, int *cnt)
 
        bzero(&info, sizeof(info));
        info.xlated_prog_len = xlated_prog_len;
-       info.xlated_prog_insns = (__u64)*buf;
+       info.xlated_prog_insns = (__u64)(unsigned long)*buf;
        if (bpf_obj_get_info_by_fd(fd_prog, &info, &info_len)) {
                perror("second bpf_obj_get_info_by_fd failed");
                goto out_free_buf;
index 2f0d705..05d980f 100644 (file)
@@ -41,6 +41,7 @@
 /x86_64/svm_vmcall_test
 /x86_64/svm_int_ctl_test
 /x86_64/svm_nested_soft_inject_test
+/x86_64/svm_nested_shutdown_test
 /x86_64/sync_regs_test
 /x86_64/tsc_msrs_test
 /x86_64/tsc_scaling_sync
index 0172eb6..4a2caef 100644 (file)
@@ -101,6 +101,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/state_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
+TEST_GEN_PROGS_x86_64 += x86_64/svm_nested_shutdown_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_nested_soft_inject_test
 TEST_GEN_PROGS_x86_64 += x86_64/tsc_scaling_sync
 TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
index e8ca0d8..5da0c5e 100644 (file)
@@ -748,6 +748,19 @@ struct ex_regs {
        uint64_t rflags;
 };
 
+struct idt_entry {
+       uint16_t offset0;
+       uint16_t selector;
+       uint16_t ist : 3;
+       uint16_t : 5;
+       uint16_t type : 4;
+       uint16_t : 1;
+       uint16_t dpl : 2;
+       uint16_t p : 1;
+       uint16_t offset1;
+       uint32_t offset2; uint32_t reserved;
+};
+
 void vm_init_descriptor_tables(struct kvm_vm *vm);
 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
index 39c4409..41c1c73 100644 (file)
@@ -1074,19 +1074,6 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
        }
 }
 
-struct idt_entry {
-       uint16_t offset0;
-       uint16_t selector;
-       uint16_t ist : 3;
-       uint16_t : 5;
-       uint16_t type : 4;
-       uint16_t : 1;
-       uint16_t dpl : 2;
-       uint16_t p : 1;
-       uint16_t offset1;
-       uint32_t offset2; uint32_t reserved;
-};
-
 static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
                          int dpl, unsigned short selector)
 {
diff --git a/tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c b/tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c
new file mode 100644 (file)
index 0000000..e73fcde
--- /dev/null
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * svm_nested_shutdown_test
+ *
+ * Copyright (C) 2022, Red Hat, Inc.
+ *
+ * Nested SVM testing: test that unintercepted shutdown in L2 doesn't crash the host
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+
+static void l2_guest_code(struct svm_test_data *svm)
+{
+       __asm__ __volatile__("ud2");
+}
+
+static void l1_guest_code(struct svm_test_data *svm, struct idt_entry *idt)
+{
+       #define L2_GUEST_STACK_SIZE 64
+       unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+       struct vmcb *vmcb = svm->vmcb;
+
+       generic_svm_setup(svm, l2_guest_code,
+                         &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+       vmcb->control.intercept &= ~(BIT(INTERCEPT_SHUTDOWN));
+
+       idt[6].p   = 0; // #UD is intercepted but its injection will cause #NP
+       idt[11].p  = 0; // #NP is not intercepted and will cause another
+                       // #NP that will be converted to #DF
+       idt[8].p   = 0; // #DF will cause #NP which will cause SHUTDOWN
+
+       run_guest(vmcb, svm->vmcb_gpa);
+
+       /* should not reach here */
+       GUEST_ASSERT(0);
+}
+
+int main(int argc, char *argv[])
+{
+       struct kvm_vcpu *vcpu;
+       struct kvm_run *run;
+       vm_vaddr_t svm_gva;
+       struct kvm_vm *vm;
+
+       TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
+
+       vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
+       vm_init_descriptor_tables(vm);
+       vcpu_init_descriptor_tables(vcpu);
+
+       vcpu_alloc_svm(vm, &svm_gva);
+
+       vcpu_args_set(vcpu, 2, svm_gva, vm->idt);
+       run = vcpu->run;
+
+       vcpu_run(vcpu);
+       TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
+                   "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
+                   run->exit_reason,
+                   exit_reason_str(run->exit_reason));
+
+       kvm_vm_free(vm);
+}
index 70b44f0..ead5d87 100644 (file)
@@ -3,6 +3,7 @@
 #include "kvm_util.h"
 #include "processor.h"
 #include "vmx.h"
+#include "svm_util.h"
 
 #include <string.h>
 #include <sys/ioctl.h>
@@ -20,10 +21,11 @@ static void l2_guest_code(void)
                     : : [port] "d" (ARBITRARY_IO_PORT) : "rax");
 }
 
-void l1_guest_code(struct vmx_pages *vmx)
-{
 #define L2_GUEST_STACK_SIZE 64
-       unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+void l1_guest_code_vmx(struct vmx_pages *vmx)
+{
 
        GUEST_ASSERT(vmx->vmcs_gpa);
        GUEST_ASSERT(prepare_for_vmx_operation(vmx));
@@ -38,24 +40,53 @@ void l1_guest_code(struct vmx_pages *vmx)
        GUEST_DONE();
 }
 
+void l1_guest_code_svm(struct svm_test_data *svm)
+{
+       struct vmcb *vmcb = svm->vmcb;
+
+       generic_svm_setup(svm, l2_guest_code,
+                       &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+       /* don't intercept shutdown to test the case of SVM allowing to do so */
+       vmcb->control.intercept &= ~(BIT(INTERCEPT_SHUTDOWN));
+
+       run_guest(vmcb, svm->vmcb_gpa);
+
+       /* should not reach here, L1 should crash  */
+       GUEST_ASSERT(0);
+}
+
 int main(void)
 {
        struct kvm_vcpu *vcpu;
        struct kvm_run *run;
        struct kvm_vcpu_events events;
-       vm_vaddr_t vmx_pages_gva;
        struct ucall uc;
 
-       TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+       bool has_vmx = kvm_cpu_has(X86_FEATURE_VMX);
+       bool has_svm = kvm_cpu_has(X86_FEATURE_SVM);
+
+       TEST_REQUIRE(has_vmx || has_svm);
 
        TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_TRIPLE_FAULT_EVENT));
 
-       vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
-       vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1);
 
+       if (has_vmx) {
+               vm_vaddr_t vmx_pages_gva;
+
+               vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_vmx);
+               vcpu_alloc_vmx(vm, &vmx_pages_gva);
+               vcpu_args_set(vcpu, 1, vmx_pages_gva);
+       } else {
+               vm_vaddr_t svm_gva;
+
+               vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code_svm);
+               vcpu_alloc_svm(vm, &svm_gva);
+               vcpu_args_set(vcpu, 1, svm_gva);
+       }
+
+       vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1);
        run = vcpu->run;
-       vcpu_alloc_vmx(vm, &vmx_pages_gva);
-       vcpu_args_set(vcpu, 1, vmx_pages_gva);
        vcpu_run(vcpu);
 
        TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
@@ -78,13 +109,21 @@ int main(void)
                    "No triple fault pending");
        vcpu_run(vcpu);
 
-       switch (get_ucall(vcpu, &uc)) {
-       case UCALL_DONE:
-               break;
-       case UCALL_ABORT:
-               REPORT_GUEST_ASSERT(uc);
-       default:
-               TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
-       }
 
+       if (has_svm) {
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
+                           "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
+                           run->exit_reason,
+                           exit_reason_str(run->exit_reason));
+       } else {
+               switch (get_ucall(vcpu, &uc)) {
+               case UCALL_DONE:
+                       break;
+               case UCALL_ABORT:
+                       REPORT_GUEST_ASSERT(uc);
+               default:
+                       TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
+               }
+       }
+       return 0;
 }
index 31c3b6e..21ca914 100755 (executable)
@@ -4196,10 +4196,13 @@ elif [ "$TESTS" = "ipv6" ]; then
        TESTS="$TESTS_IPV6"
 fi
 
-which nettest >/dev/null
-if [ $? -ne 0 ]; then
-       echo "'nettest' command not found; skipping tests"
-       exit $ksft_skip
+# nettest can be run from PATH or from same directory as this selftest
+if ! which nettest >/dev/null; then
+       PATH=$PWD:$PATH
+       if ! which nettest >/dev/null; then
+               echo "'nettest' command not found; skipping tests"
+               exit $ksft_skip
+       fi
 fi
 
 declare -i nfail=0
index f3dd5f2..2eeaf4a 100755 (executable)
@@ -2152,7 +2152,7 @@ remove_tests()
                pm_nl_set_limits $ns2 1 3
                pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
                pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
-               run_tests $ns1 $ns2 10.0.1.1 0 -1 -2 slow
+               run_tests $ns1 $ns2 10.0.1.1 0 -1 -2 speed_10
                chk_join_nr 3 3 3
                chk_add_nr 1 1
                chk_rm_nr 2 2
@@ -2165,7 +2165,7 @@ remove_tests()
                pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
                pm_nl_add_endpoint $ns1 10.0.4.1 flags signal
                pm_nl_set_limits $ns2 3 3
-               run_tests $ns1 $ns2 10.0.1.1 0 -3 0 slow
+               run_tests $ns1 $ns2 10.0.1.1 0 -3 0 speed_10
                chk_join_nr 3 3 3
                chk_add_nr 3 3
                chk_rm_nr 3 3 invert
@@ -2178,7 +2178,7 @@ remove_tests()
                pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
                pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
                pm_nl_set_limits $ns2 3 3
-               run_tests $ns1 $ns2 10.0.1.1 0 -3 0 slow
+               run_tests $ns1 $ns2 10.0.1.1 0 -3 0 speed_10
                chk_join_nr 1 1 1
                chk_add_nr 3 3
                chk_rm_nr 3 1 invert
index 0879da9..80d36f7 100755 (executable)
@@ -35,8 +35,9 @@ init()
 
        ns1="ns1-$rndh"
        ns2="ns2-$rndh"
+       ns_sbox="ns_sbox-$rndh"
 
-       for netns in "$ns1" "$ns2";do
+       for netns in "$ns1" "$ns2" "$ns_sbox";do
                ip netns add $netns || exit $ksft_skip
                ip -net $netns link set lo up
                ip netns exec $netns sysctl -q net.mptcp.enabled=1
@@ -73,7 +74,7 @@ init()
 
 cleanup()
 {
-       for netns in "$ns1" "$ns2"; do
+       for netns in "$ns1" "$ns2" "$ns_sbox"; do
                ip netns del $netns
        done
        rm -f "$cin" "$cout"
@@ -243,7 +244,7 @@ do_mptcp_sockopt_tests()
 {
        local lret=0
 
-       ./mptcp_sockopt
+       ip netns exec "$ns_sbox" ./mptcp_sockopt
        lret=$?
 
        if [ $lret -ne 0 ]; then
@@ -252,7 +253,7 @@ do_mptcp_sockopt_tests()
                return
        fi
 
-       ./mptcp_sockopt -6
+       ip netns exec "$ns_sbox" ./mptcp_sockopt -6
        lret=$?
 
        if [ $lret -ne 0 ]; then
index ffa13a9..40aeb5a 100755 (executable)
@@ -247,9 +247,10 @@ run_test()
        tc -n $ns2 qdisc add dev ns2eth1 root netem rate ${rate1}mbit $delay1
        tc -n $ns2 qdisc add dev ns2eth2 root netem rate ${rate2}mbit $delay2
 
-       # time is measured in ms, account for transfer size, affegated link speed
+       # time is measured in ms, account for transfer size, aggregated link speed
        # and header overhead (10%)
-       local time=$((size * 8 * 1000 * 10 / (( $rate1 + $rate2) * 1024 *1024 * 9) ))
+       #              ms    byte -> bit   10%        mbit      -> kbit -> bit  10%
+       local time=$((1000 * size  *  8  * 10 / ((rate1 + rate2) * 1000 * 1000 * 9) ))
 
        # mptcp_connect will do some sleeps to allow the mp_join handshake
        # completion (see mptcp_connect): 200ms on each side, add some slack
index 736e358..dfe3d28 100755 (executable)
@@ -686,10 +686,12 @@ setup_xfrm() {
 }
 
 setup_nettest_xfrm() {
-       which nettest >/dev/null
-       if [ $? -ne 0 ]; then
-               echo "'nettest' command not found; skipping tests"
-               return 1
+       if ! which nettest >/dev/null; then
+               PATH=$PWD:$PATH
+               if ! which nettest >/dev/null; then
+                       echo "'nettest' command not found; skipping tests"
+                       return 1
+               fi
        fi
 
        [ ${1} -eq 6 ] && proto="-6" || proto=""
index 6a443ca..0c74375 100755 (executable)
@@ -5,6 +5,8 @@
 
 readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
 
+BPF_FILE="../bpf/xdp_dummy.bpf.o"
+
 # set global exit status, but never reset nonzero one.
 check_err()
 {
@@ -34,7 +36,7 @@ cfg_veth() {
        ip -netns "${PEER_NS}" addr add dev veth1 192.168.1.1/24
        ip -netns "${PEER_NS}" addr add dev veth1 2001:db8::1/64 nodad
        ip -netns "${PEER_NS}" link set dev veth1 up
-       ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp
+       ip -n "${PEER_NS}" link set veth1 xdp object ${BPF_FILE} section xdp
 }
 
 run_one() {
@@ -195,8 +197,8 @@ run_all() {
        return $ret
 }
 
-if [ ! -f ../bpf/xdp_dummy.o ]; then
-       echo "Missing xdp_dummy helper. Build bpf selftest first"
+if [ ! -f ${BPF_FILE} ]; then
+       echo "Missing ${BPF_FILE}. Build bpf selftest first"
        exit -1
 fi
 
index 8a1109a..8949728 100755 (executable)
@@ -5,6 +5,8 @@
 
 readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
 
+BPF_FILE="../bpf/xdp_dummy.bpf.o"
+
 cleanup() {
        local -r jobs="$(jobs -p)"
        local -r ns="$(ip netns list|grep $PEER_NS)"
@@ -34,7 +36,7 @@ run_one() {
        ip -netns "${PEER_NS}" addr add dev veth1 2001:db8::1/64 nodad
        ip -netns "${PEER_NS}" link set dev veth1 up
 
-       ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp
+       ip -n "${PEER_NS}" link set veth1 xdp object ${BPF_FILE} section xdp
        ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r &
        ip netns exec "${PEER_NS}" ./udpgso_bench_rx -t ${rx_args} -r &
 
@@ -80,8 +82,8 @@ run_all() {
        run_udp "${ipv6_args}"
 }
 
-if [ ! -f ../bpf/xdp_dummy.o ]; then
-       echo "Missing xdp_dummy helper. Build bpf selftest first"
+if [ ! -f ${BPF_FILE} ]; then
+       echo "Missing ${BPF_FILE}. Build bpf selftest first"
        exit -1
 fi
 
index 7fe85ba..c9c4b9d 100755 (executable)
@@ -5,6 +5,8 @@
 
 readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
 
+BPF_FILE="../bpf/xdp_dummy.bpf.o"
+
 cleanup() {
        local -r jobs="$(jobs -p)"
        local -r ns="$(ip netns list|grep $PEER_NS)"
@@ -36,7 +38,7 @@ run_one() {
        ip netns exec "${PEER_NS}" ethtool -K veth1 rx-gro-list on
 
 
-       ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp
+       ip -n "${PEER_NS}" link set veth1 xdp object ${BPF_FILE} section xdp
        tc -n "${PEER_NS}" qdisc add dev veth1 clsact
        tc -n "${PEER_NS}" filter add dev veth1 ingress prio 4 protocol ipv6 bpf object-file ../bpf/nat6to4.o section schedcls/ingress6/nat_6  direct-action
        tc -n "${PEER_NS}" filter add dev veth1 egress prio 4 protocol ip bpf object-file ../bpf/nat6to4.o section schedcls/egress4/snat4 direct-action
@@ -81,8 +83,8 @@ run_all() {
        run_udp "${ipv6_args}"
 }
 
-if [ ! -f ../bpf/xdp_dummy.o ]; then
-       echo "Missing xdp_dummy helper. Build bpf selftest first"
+if [ ! -f ${BPF_FILE} ]; then
+       echo "Missing ${BPF_FILE}. Build bpf selftest first"
        exit -1
 fi
 
index 1bcd82e..c079565 100755 (executable)
@@ -1,6 +1,7 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
+BPF_FILE="../bpf/xdp_dummy.bpf.o"
 readonly BASE="ns-$(mktemp -u XXXXXX)"
 readonly SRC=2
 readonly DST=1
@@ -46,7 +47,7 @@ create_ns() {
                ip -n $BASE$ns addr add dev veth$ns $BM_NET_V4$ns/24
                ip -n $BASE$ns addr add dev veth$ns $BM_NET_V6$ns/64 nodad
        done
-       ip -n $NS_DST link set veth$DST xdp object ../bpf/xdp_dummy.o section xdp 2>/dev/null
+       ip -n $NS_DST link set veth$DST xdp object ${BPF_FILE} section xdp 2>/dev/null
 }
 
 create_vxlan_endpoint() {
index 430895d..2d07359 100755 (executable)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
+BPF_FILE="../bpf/xdp_dummy.bpf.o"
 readonly STATS="$(mktemp -p /tmp ns-XXXXXX)"
 readonly BASE=`basename $STATS`
 readonly SRC=2
@@ -216,8 +217,8 @@ while getopts "hs:" option; do
        esac
 done
 
-if [ ! -f ../bpf/xdp_dummy.o ]; then
-       echo "Missing xdp_dummy helper. Build bpf selftest first"
+if [ ! -f ${BPF_FILE} ]; then
+       echo "Missing ${BPF_FILE}. Build bpf selftest first"
        exit 1
 fi
 
@@ -288,14 +289,14 @@ if [ $CPUS -gt 1 ]; then
        ip netns exec $NS_DST ethtool -L veth$DST rx 1 tx 2 2>/dev/null
        ip netns exec $NS_SRC ethtool -L veth$SRC rx 1 tx 2 2>/dev/null
        printf "%-60s" "bad setting: XDP with RX nr less than TX"
-       ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o \
+       ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} \
                section xdp 2>/dev/null &&\
                echo "fail - set operation successful ?!?" || echo " ok "
 
        # the following tests will run with multiple channels active
        ip netns exec $NS_SRC ethtool -L veth$SRC rx 2
        ip netns exec $NS_DST ethtool -L veth$DST rx 2
-       ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o \
+       ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} \
                section xdp 2>/dev/null
        printf "%-60s" "bad setting: reducing RX nr below peer TX with XDP set"
        ip netns exec $NS_DST ethtool -L veth$DST rx 1 2>/dev/null &&\
@@ -311,7 +312,7 @@ if [ $CPUS -gt 2 ]; then
        chk_channels "setting invalid channels nr" $DST 2 2
 fi
 
-ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o section xdp 2>/dev/null
+ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} section xdp 2>/dev/null
 chk_gro_flag "with xdp attached - gro flag" $DST on
 chk_gro_flag "        - peer gro flag" $SRC off
 chk_tso_flag "        - tso flag" $SRC off
index 25d7872..fab4d37 100644 (file)
@@ -1198,8 +1198,6 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
                        goto out_err_no_arch_destroy_vm;
        }
 
-       kvm->max_halt_poll_ns = halt_poll_ns;
-
        r = kvm_arch_init_vm(kvm, type);
        if (r)
                goto out_err_no_arch_destroy_vm;
@@ -3377,9 +3375,6 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
        if (val < grow_start)
                val = grow_start;
 
-       if (val > vcpu->kvm->max_halt_poll_ns)
-               val = vcpu->kvm->max_halt_poll_ns;
-
        vcpu->halt_poll_ns = val;
 out:
        trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
@@ -3483,6 +3478,24 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
        }
 }
 
+static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+
+       if (kvm->override_halt_poll_ns) {
+               /*
+                * Ensure kvm->max_halt_poll_ns is not read before
+                * kvm->override_halt_poll_ns.
+                *
+                * Pairs with the smp_wmb() when enabling KVM_CAP_HALT_POLL.
+                */
+               smp_rmb();
+               return READ_ONCE(kvm->max_halt_poll_ns);
+       }
+
+       return READ_ONCE(halt_poll_ns);
+}
+
 /*
  * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc...  If halt
  * polling is enabled, busy wait for a short time before blocking to avoid the
@@ -3491,12 +3504,18 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
  */
 void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
 {
+       unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
        bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
-       bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
        ktime_t start, cur, poll_end;
        bool waited = false;
+       bool do_halt_poll;
        u64 halt_ns;
 
+       if (vcpu->halt_poll_ns > max_halt_poll_ns)
+               vcpu->halt_poll_ns = max_halt_poll_ns;
+
+       do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
+
        start = cur = poll_end = ktime_get();
        if (do_halt_poll) {
                ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
@@ -3535,18 +3554,21 @@ out:
                update_halt_poll_stats(vcpu, start, poll_end, !waited);
 
        if (halt_poll_allowed) {
+               /* Recompute the max halt poll time in case it changed. */
+               max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
+
                if (!vcpu_valid_wakeup(vcpu)) {
                        shrink_halt_poll_ns(vcpu);
-               } else if (vcpu->kvm->max_halt_poll_ns) {
+               } else if (max_halt_poll_ns) {
                        if (halt_ns <= vcpu->halt_poll_ns)
                                ;
                        /* we had a long block, shrink polling */
                        else if (vcpu->halt_poll_ns &&
-                                halt_ns > vcpu->kvm->max_halt_poll_ns)
+                                halt_ns > max_halt_poll_ns)
                                shrink_halt_poll_ns(vcpu);
                        /* we had a short halt and our poll time is too small */
-                       else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns &&
-                                halt_ns < vcpu->kvm->max_halt_poll_ns)
+                       else if (vcpu->halt_poll_ns < max_halt_poll_ns &&
+                                halt_ns < max_halt_poll_ns)
                                grow_halt_poll_ns(vcpu);
                } else {
                        vcpu->halt_poll_ns = 0;
@@ -4581,6 +4603,16 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
                        return -EINVAL;
 
                kvm->max_halt_poll_ns = cap->args[0];
+
+               /*
+                * Ensure kvm->override_halt_poll_ns does not become visible
+                * before kvm->max_halt_poll_ns.
+                *
+                * Pairs with the smp_rmb() in kvm_vcpu_max_halt_poll_ns().
+                */
+               smp_wmb();
+               kvm->override_halt_poll_ns = true;
+
                return 0;
        }
        case KVM_CAP_DIRTY_LOG_RING:
index 346e47f..7c24819 100644 (file)
@@ -297,7 +297,12 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
        if (!gpc->valid || old_uhva != gpc->uhva) {
                ret = hva_to_pfn_retry(kvm, gpc);
        } else {
-               /* If the HVA→PFN mapping was already valid, don't unmap it. */
+               /*
+                * If the HVA→PFN mapping was already valid, don't unmap it.
+                * But do update gpc->khva because the offset within the page
+                * may have changed.
+                */
+               gpc->khva = old_khva + page_offset;
                old_pfn = KVM_PFN_ERR_FAULT;
                old_khva = NULL;
                ret = 0;