Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
authorJakub Kicinski <kuba@kernel.org>
Sat, 28 Jan 2023 07:32:02 +0000 (23:32 -0800)
committerJakub Kicinski <kuba@kernel.org>
Sat, 28 Jan 2023 07:32:03 +0000 (23:32 -0800)
Daniel Borkmann says:

====================
bpf 2023-01-27

We've added 10 non-merge commits during the last 9 day(s) which contain
a total of 10 files changed, 170 insertions(+), 59 deletions(-).

The main changes are:

1) Fix preservation of register's parent/live fields when copying
   range-info, from Eduard Zingerman.

2) Fix an off-by-one bug in bpf_mem_cache_idx() to select the right
   cache, from Hou Tao.

3) Fix stack overflow from infinite recursion in sock_map_close(),
   from Jakub Sitnicki.

4) Fix missing btf_put() in register_btf_id_dtor_kfuncs()'s error path,
   from Jiri Olsa.

5) Fix a splat from bpf_setsockopt() via lsm_cgroup/socket_sock_rcv_skb,
   from Kui-Feng Lee.

6) Fix bpf_send_signal[_thread]() helpers to hold a reference on the task,
   from Yonghong Song.

* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  bpf: Fix the kernel crash caused by bpf_setsockopt().
  selftests/bpf: Cover listener cloning with progs attached to sockmap
  selftests/bpf: Pass BPF skeleton to sockmap_listen ops tests
  bpf, sockmap: Check for any of tcp_bpf_prots when cloning a listener
  bpf, sockmap: Don't let sock_map_{close,destroy,unhash} call itself
  bpf: Add missing btf_put to register_btf_id_dtor_kfuncs
  selftests/bpf: Verify copy_register_state() preserves parent/live fields
  bpf: Fix to preserve reg parent/live fields when copying range info
  bpf: Fix a possible task gone issue with bpf_send_signal[_thread]() helpers
  bpf: Fix off-by-one error in bpf_mem_cache_idx()
====================

Link: https://lore.kernel.org/r/20230127215820.4993-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
603 files changed:
.mailmap
CREDITS
Documentation/admin-guide/mm/zswap.rst
Documentation/arm64/silicon-errata.rst
Documentation/conf.py
Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml
Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml
Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml
Documentation/devicetree/bindings/display/msm/qcom,qcm2290-mdss.yaml
Documentation/devicetree/bindings/display/msm/qcom,sm6115-mdss.yaml
Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml [moved from Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml with 85% similarity]
Documentation/devicetree/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml [moved from Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml with 82% similarity]
Documentation/devicetree/bindings/phy/qcom,usb-hs-28nm.yaml
Documentation/devicetree/bindings/soc/qcom/qcom,apr-services.yaml
Documentation/devicetree/bindings/sound/mt8186-mt6366-rt1019-rt5682s.yaml
Documentation/devicetree/bindings/sound/qcom,lpass-tx-macro.yaml
Documentation/devicetree/bindings/sound/qcom,lpass-wsa-macro.yaml
Documentation/filesystems/erofs.rst
Documentation/kbuild/makefiles.rst
Documentation/networking/bridge.rst
Documentation/networking/nf_conntrack-sysctl.rst
Documentation/sphinx/load_config.py
Documentation/virt/kvm/api.rst
Documentation/virt/kvm/locking.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/armada-38x.dtsi
arch/arm/boot/dts/armada-39x.dtsi
arch/arm/boot/dts/imx53-ppd.dts
arch/arm/boot/dts/imx6qdl-gw560x.dtsi
arch/arm/boot/dts/imx6ul-pico-dwarf.dts
arch/arm/boot/dts/imx7d-pico-dwarf.dts
arch/arm/boot/dts/imx7d-pico-nymph.dts
arch/arm/boot/dts/qcom-apq8084-ifc6540.dts
arch/arm/boot/dts/qcom-apq8084.dtsi
arch/arm/boot/dts/sam9x60.dtsi
arch/arm/boot/dts/stm32mp151a-prtt1l.dtsi
arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
arch/arm/mach-footbridge/isa-rtc.c
arch/arm/mach-imx/cpu-imx25.c
arch/arm/mach-imx/cpu-imx27.c
arch/arm/mach-imx/cpu-imx31.c
arch/arm/mach-imx/cpu-imx35.c
arch/arm/mach-imx/cpu-imx5.c
arch/arm/mach-omap1/Kconfig
arch/arm/mach-omap1/Makefile
arch/arm/mach-omap1/gpio15xx.c
arch/arm/mach-omap1/io.c
arch/arm/mach-omap1/mcbsp.c
arch/arm/mach-omap1/pm.h
arch/arm/mach-pxa/Kconfig
arch/arm64/Kconfig
arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts
arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts
arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi
arch/arm64/boot/dts/freescale/imx8mp-evk.dts
arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
arch/arm64/boot/dts/freescale/imx8mp.dtsi
arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts
arch/arm64/boot/dts/freescale/imx8mq-thor96.dts
arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
arch/arm64/boot/dts/qcom/msm8992.dtsi
arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
arch/arm64/boot/dts/qcom/sc8280xp.dtsi
arch/arm64/boot/dts/qcom/sm8250.dtsi
arch/arm64/boot/dts/qcom/sm8350.dtsi
arch/arm64/include/asm/atomic_ll_sc.h
arch/arm64/include/asm/atomic_lse.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/esr.h
arch/arm64/include/asm/hugetlb.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/stacktrace.h
arch/arm64/include/asm/uprobes.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/efi-rt-wrapper.S
arch/arm64/kernel/efi.c
arch/arm64/kernel/elfcore.c
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/signal.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kvm/guest.c
arch/arm64/kvm/hyp/include/hyp/fault.h
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/mmu.c
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/vgic/vgic-v3.c
arch/arm64/kvm/vgic/vgic-v4.c
arch/arm64/kvm/vgic/vgic.h
arch/arm64/mm/hugetlbpage.c
arch/arm64/mm/mmu.c
arch/arm64/tools/cpucaps
arch/ia64/kernel/elfcore.c
arch/loongarch/include/asm/ftrace.h
arch/loongarch/include/asm/inst.h
arch/loongarch/include/asm/unwind.h
arch/loongarch/kernel/Makefile
arch/loongarch/kernel/alternative.c
arch/loongarch/kernel/cpu-probe.c
arch/loongarch/kernel/genex.S
arch/loongarch/kernel/inst.c
arch/loongarch/kernel/process.c
arch/loongarch/kernel/traps.c
arch/loongarch/kernel/unwind.c [new file with mode: 0644]
arch/loongarch/kernel/unwind_guess.c
arch/loongarch/kernel/unwind_prologue.c
arch/loongarch/mm/tlb.c
arch/powerpc/boot/wrapper
arch/powerpc/include/asm/imc-pmu.h
arch/powerpc/mm/book3s64/hash_utils.c
arch/powerpc/perf/imc-pmu.c
arch/riscv/boot/dts/sifive/fu740-c000.dtsi
arch/s390/kernel/setup.c
arch/x86/boot/bioscall.S
arch/x86/events/intel/core.c
arch/x86/events/intel/cstate.c
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/cpu/aperfmperf.c
arch/x86/kernel/cpu/resctrl/monitor.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/xen.c
arch/x86/mm/init.c
arch/x86/mm/pat/memtype.c
arch/x86/pci/mmconfig-shared.c
arch/x86/um/elfcore.c
block/bfq-cgroup.c
block/bfq-iosched.h
block/blk-cgroup.c
block/blk-core.c
block/blk-mq.c
drivers/accessibility/speakup/spk_ttyio.c
drivers/acpi/glue.c
drivers/acpi/prmt.c
drivers/acpi/resource.c
drivers/acpi/scan.c
drivers/acpi/video_detect.c
drivers/ata/Kconfig
drivers/base/property.c
drivers/base/test/test_async_driver_probe.c
drivers/block/pktcdvd.c
drivers/block/rnbd/rnbd-clt.c
drivers/comedi/drivers/adv_pci1760.c
drivers/cpufreq/amd-pstate.c
drivers/cpufreq/apple-soc-cpufreq.c
drivers/cpufreq/armada-37xx-cpufreq.c
drivers/cpufreq/cppc_cpufreq.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/qcom-cpufreq-hw.c
drivers/dma/dmaengine.c
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
drivers/dma/idxd/device.c
drivers/dma/imx-sdma.c
drivers/dma/lgm/lgm-dma.c
drivers/dma/ptdma/ptdma-dev.c
drivers/dma/ptdma/ptdma.h
drivers/dma/qcom/gpi.c
drivers/dma/tegra186-gpc-dma.c
drivers/dma/tegra210-adma.c
drivers/dma/ti/k3-udma.c
drivers/dma/xilinx/xilinx_dma.c
drivers/edac/edac_device.c
drivers/edac/edac_module.h
drivers/edac/highbank_mc_edac.c
drivers/edac/qcom_edac.c
drivers/firmware/arm_scmi/driver.c
drivers/firmware/arm_scmi/shmem.c
drivers/firmware/arm_scmi/virtio.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/runtime-wrappers.c
drivers/firmware/google/coreboot_table.c
drivers/firmware/google/coreboot_table.h
drivers/firmware/google/gsmi.c
drivers/firmware/psci/psci.c
drivers/gpio/gpio-mxc.c
drivers/gpio/gpiolib-acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/drm_buddy.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_panel_orientation_quirks.c
drivers/gpu/drm/i915/display/skl_universal_plane.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
drivers/gpu/drm/i915/gt/intel_gt_regs.h
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/i915_driver.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_switcheroo.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.h
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
drivers/gpu/drm/msm/dp/dp_aux.c
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_mdss.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c [deleted file]
drivers/gpu/drm/panfrost/Kconfig
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/vc4/vc4_bo.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/vmwgfx/ttm_object.c
drivers/gpu/drm/vmwgfx/ttm_object.h
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h [changed mode: 0755->0644]
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/hid/amd-sfh-hid/amd_sfh_client.c
drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
drivers/hid/hid-betopff.c
drivers/hid/hid-bigbenff.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-playstation.c
drivers/hid/hid-quirks.c
drivers/hid/hid-uclogic-core.c
drivers/hid/hid-uclogic-params.c
drivers/hid/intel-ish-hid/ishtp/dma-if.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/hfi1/user_exp_rcv.c
drivers/infiniband/hw/hfi1/user_exp_rcv.h
drivers/infiniband/sw/rxe/rxe_param.h
drivers/infiniband/sw/rxe/rxe_pool.c
drivers/interconnect/qcom/icc-rpm.c
drivers/interconnect/qcom/msm8996.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
drivers/iommu/arm/arm-smmu/arm-smmu.c
drivers/iommu/iommu.c
drivers/iommu/iova.c
drivers/iommu/mtk_iommu_v1.c
drivers/md/md.c
drivers/memory/atmel-sdramc.c
drivers/memory/mvebu-devbus.c
drivers/memory/omap-gpmc.c
drivers/memory/tegra/tegra186.c
drivers/misc/fastrpc.c
drivers/misc/mei/bus.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/pci-me.c
drivers/misc/vmw_vmci/vmci_guest.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sunxi-mmc.c
drivers/net/dsa/Kconfig
drivers/net/dsa/microchip/ksz9477.c
drivers/net/dsa/microchip/ksz9477_i2c.c
drivers/net/ethernet/adi/adin1110.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/engleder/tsnep_main.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/intel/iavf/iavf.h
drivers/net/ethernet/intel/iavf/iavf_ethtool.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/qos.c
drivers/net/ethernet/mellanox/mlx5/core/qos.h
drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
drivers/net/ethernet/microsoft/mana/gdma_main.c
drivers/net/ethernet/qlogic/qede/qede_fp.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/rswitch.c
drivers/net/ethernet/renesas/rswitch.h
drivers/net/ethernet/stmicro/stmmac/dwmac5.c
drivers/net/mdio/mdio-mux-meson-g12a.c
drivers/net/team/team.c
drivers/net/virtio_net.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/mediatek/mt76/dma.c
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
drivers/net/wireless/mediatek/mt76/tx.c
drivers/net/wireless/rndis_wlan.c
drivers/nvme/host/apple.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/pci.c
drivers/pci/controller/dwc/Kconfig
drivers/phy/freescale/phy-fsl-imx8m-pcie.c
drivers/phy/phy-can-transceiver.c
drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c
drivers/phy/renesas/r8a779f0-ether-serdes.c
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
drivers/phy/sunplus/phy-sunplus-usb2.c
drivers/phy/ti/Kconfig
drivers/pinctrl/nomadik/pinctrl-ab8500.c
drivers/pinctrl/nomadik/pinctrl-ab8505.c
drivers/pinctrl/nomadik/pinctrl-abx500.c
drivers/pinctrl/nomadik/pinctrl-abx500.h
drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
drivers/pinctrl/nomadik/pinctrl-nomadik.c
drivers/pinctrl/nomadik/pinctrl-nomadik.h
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/sunplus/sppctl.c
drivers/platform/surface/aggregator/controller.c
drivers/platform/surface/aggregator/ssh_request_layer.c
drivers/platform/x86/amd/pmc.c
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/asus-wmi.h
drivers/platform/x86/dell/dell-wmi-privacy.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel/int3472/clk_and_regulator.c
drivers/platform/x86/intel/int3472/discrete.c
drivers/platform/x86/intel/pmc/core.c
drivers/platform/x86/simatic-ipc.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/touchscreen_dmi.c
drivers/reset/Kconfig
drivers/reset/reset-uniphier-glue.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/hpsa.c
drivers/scsi/iscsi_tcp.c
drivers/scsi/libiscsi.c
drivers/soc/imx/imx8mp-blk-ctrl.c
drivers/soc/imx/soc-imx8m.c
drivers/soc/qcom/apr.c
drivers/soc/qcom/cpr.c
drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
drivers/target/target_core_tmr.c
drivers/thermal/thermal_core.c
drivers/thunderbolt/retimer.c
drivers/thunderbolt/tb.c
drivers/thunderbolt/tunnel.c
drivers/thunderbolt/xdomain.c
drivers/tty/serial/8250/8250_exar.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/kgdboc.c
drivers/tty/serial/pch_uart.c
drivers/tty/serial/qcom_geni_serial.c
drivers/tty/serial/serial_core.c
drivers/ufs/core/ufshcd.c
drivers/usb/cdns3/cdns3-gadget.c
drivers/usb/chipidea/core.c
drivers/usb/core/hub.c
drivers/usb/core/usb-acpi.c
drivers/usb/dwc3/Kconfig
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_ncm.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/legacy/webcam.c
drivers/usb/host/ehci-fsl.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/iowarrior.c
drivers/usb/misc/onboard_usb_hub.c
drivers/usb/musb/omap2430.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/option.c
drivers/usb/storage/uas-detect.h
drivers/usb/storage/unusual_uas.h
drivers/usb/typec/altmodes/displayport.c
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/ucsi/ucsi.c
drivers/usb/typec/ucsi/ucsi.h
drivers/vfio/vfio_iommu_type1.c
drivers/w1/w1.c
drivers/w1/w1_int.c
fs/affs/file.c
fs/binfmt_elf.c
fs/binfmt_elf_fdpic.c
fs/btrfs/disk-io.c
fs/btrfs/file.c
fs/btrfs/fs.h
fs/btrfs/qgroup.c
fs/btrfs/space-info.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/zoned.c
fs/cifs/cifsencrypt.c
fs/cifs/connect.c
fs/cifs/dfs_cache.c
fs/cifs/dfs_cache.h
fs/cifs/link.c
fs/cifs/sess.c
fs/cifs/smb1ops.c
fs/cifs/smb2pdu.c
fs/erofs/super.c
fs/erofs/zdata.c
fs/erofs/zmap.c
fs/ext4/xattr.c
fs/fuse/acl.c
fs/fuse/dir.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/fuse/xattr.c
fs/gfs2/log.c
fs/nfsd/filecache.c
fs/nfsd/netns.h
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4state.c
fs/nfsd/nfsctl.c
fs/nfsd/nfsd.h
fs/nilfs2/btree.c
fs/userfaultfd.c
fs/zonefs/super.c
include/acpi/acpi_bus.h
include/linux/elfcore.h
include/linux/firmware/xlnx-zynqmp.h
include/linux/mm.h
include/linux/mm_inline.h
include/linux/mm_types.h
include/linux/page_ref.h
include/linux/platform_data/x86/simatic-ipc.h
include/linux/soc/ti/omap1-io.h
include/linux/tpm_eventlog.h
include/linux/usb.h
include/net/mana/gdma.h
include/scsi/libiscsi.h
include/soc/bcm2835/raspberrypi-firmware.h
include/uapi/linux/netfilter/nf_conntrack_sctp.h
include/uapi/linux/netfilter/nfnetlink_cttimeout.h
include/uapi/linux/psci.h
include/ufs/ufshcd.h
init/Kconfig
init/Makefile
init/version-timestamp.c
io_uring/fdinfo.c
io_uring/io-wq.c
io_uring/io_uring.c
io_uring/msg_ring.c
io_uring/poll.c
io_uring/rw.c
kernel/gen_kheaders.sh
kernel/kallsyms_selftest.c
kernel/module/main.c
kernel/printk/printk.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sys.c
lib/lockref.c
lib/nlattr.c
lib/scatterlist.c
lib/win_minmax.c
mm/hugetlb.c
mm/kasan/report.c
mm/khugepaged.c
mm/madvise.c
mm/memblock.c
mm/mmap.c
mm/nommu.c
mm/shmem.c
mm/slab.c
net/core/gro.c
net/core/net_namespace.c
net/ipv4/fib_semantics.c
net/ipv4/inet_hashtables.c
net/ipv4/inet_timewait_sock.c
net/ipv4/metrics.c
net/ipv4/tcp.c
net/ipv4/tcp_ulp.c
net/ipv6/ip6_output.c
net/mac80211/iface.c
net/mctp/af_mctp.c
net/mctp/route.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nft_set_rbtree.c
net/netlink/af_netlink.c
net/netrom/nr_timer.c
net/sched/sch_gred.c
net/sched/sch_taprio.c
net/sctp/bind_addr.c
net/x25/af_x25.c
rust/kernel/print.rs
scripts/Makefile.vmlinux
scripts/jobserver-exec
scripts/kconfig/.gitignore
scripts/kconfig/Makefile
scripts/package/mkspec
security/tomoyo/Kconfig
security/tomoyo/Makefile
sound/core/control.c
sound/core/control_led.c
sound/pci/hda/cs35l41_hda.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/yc/acp6x-mach.c
sound/soc/codecs/es8326.c [changed mode: 0755->0644]
sound/soc/codecs/es8326.h [changed mode: 0755->0644]
sound/soc/codecs/rt9120.c
sound/soc/codecs/wm8904.c
sound/soc/fsl/fsl-asoc-card.c
sound/soc/fsl/fsl_micfil.c
sound/soc/fsl/fsl_ssi.c
sound/soc/intel/boards/Kconfig
sound/soc/intel/boards/sof_nau8825.c
sound/soc/intel/common/soc-acpi-intel-adl-match.c
sound/soc/intel/common/soc-acpi-intel-rpl-match.c
sound/soc/mediatek/Kconfig
sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
sound/soc/qcom/Kconfig
sound/soc/qcom/Makefile
sound/soc/qcom/common.c
sound/soc/qcom/common.h
sound/soc/qcom/lpass-cpu.c
sound/soc/qcom/sc8280xp.c
sound/soc/qcom/sdw.c [new file with mode: 0644]
sound/soc/qcom/sdw.h [new file with mode: 0644]
sound/soc/qcom/sm8250.c
sound/soc/sof/debug.c
sound/soc/sof/pm.c
sound/usb/implicit.c
sound/usb/pcm.c
sound/usb/stream.c
tools/arch/arm64/include/asm/cputype.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/include/linux/build_bug.h
tools/include/uapi/linux/kvm.h
tools/perf/tests/shell/buildid.sh
tools/perf/trace/beauty/include/linux/socket.h
tools/perf/util/build-id.c
tools/perf/util/expr.l
tools/testing/memblock/internal.h
tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
tools/testing/selftests/lib.mk
tools/testing/selftests/net/toeplitz.c
tools/testing/selftests/proc/proc-empty-vm.c
tools/testing/selftests/proc/proc-pid-vm.c
virt/kvm/kvm_main.c
virt/kvm/vfio.c

index 562f70d..8deff4c 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -371,6 +371,7 @@ Rémi Denis-Courmont <rdenis@simphalempin.com>
 Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com>
 Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org>
 Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
+Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org>
 Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
 Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
 Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
diff --git a/CREDITS b/CREDITS
index 4e302a4..acac06b 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -2489,6 +2489,13 @@ D: XF86_Mach8
 D: XF86_8514
 D: cfdisk (curses based disk partitioning program)
 
+N: Mat Martineau
+E: mat@martineau.name
+D: MPTCP subsystem co-maintainer 2020-2023
+D: Keyctl restricted keyring and Diffie-Hellman UAPI
+D: Bluetooth L2CAP ERTM mode and AMP
+S: USA
+
 N: John S. Marvin
 E: jsm@fc.hp.com
 D: PA-RISC port
index f67de48..6dd74a1 100644 (file)
@@ -70,9 +70,7 @@ e.g. ``zswap.zpool=zbud``. It can also be changed at runtime using the sysfs
 The zbud type zpool allocates exactly 1 page to store 2 compressed pages, which
 means the compression ratio will always be 2:1 or worse (because of half-full
 zbud pages).  The zsmalloc type zpool has a more complex compressed page
-storage method, and it can achieve greater storage densities.  However,
-zsmalloc does not implement compressed page eviction, so once zswap fills it
-cannot evict the oldest page, it can only reject new pages.
+storage method, and it can achieve greater storage densities.
 
 When a swap page is passed from frontswap to zswap, zswap maintains a mapping
 of the swap entry, a combination of the swap type and swap offset, to the zpool
index 808ade4..ec5f889 100644 (file)
@@ -120,6 +120,8 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A710     | #2224489        | ARM64_ERRATUM_2224489       |
 +----------------+-----------------+-----------------+-----------------------------+
+| ARM            | Cortex-A715     | #2645198        | ARM64_ERRATUM_2645198       |
++----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-X2       | #2119858        | ARM64_ERRATUM_2119858       |
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-X2       | #2224489        | ARM64_ERRATUM_2224489       |
index a5c45df..d927737 100644 (file)
@@ -31,6 +31,12 @@ def have_command(cmd):
 # Get Sphinx version
 major, minor, patch = sphinx.version_info[:3]
 
+#
+# Warn about older versions that we don't want to support for much
+# longer.
+#
+if (major < 2) or (major == 2 and minor < 4):
+    print('WARNING: support for Sphinx < 2.4 will be removed soon.')
 
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
@@ -339,7 +345,11 @@ html_use_smartypants = False
 
 # Custom sidebar templates, maps document names to template names.
 # Note that the RTD theme ignores this
-html_sidebars = { '**': ["about.html", 'searchbox.html', 'localtoc.html', 'sourcelink.html']}
+html_sidebars = { '**': ['searchbox.html', 'localtoc.html', 'sourcelink.html']}
+
+# about.html is available for alabaster theme. Add it at the front.
+if html_theme == 'alabaster':
+    html_sidebars['**'].insert(0, 'about.html')
 
 # Output file base name for HTML help builder.
 htmlhelp_basename = 'TheLinuxKerneldoc'
index 903b311..99e159b 100644 (file)
@@ -54,6 +54,17 @@ properties:
       - const: xo
       - const: alternate
 
+  interrupts:
+    minItems: 1
+    maxItems: 3
+
+  interrupt-names:
+    minItems: 1
+    items:
+      - const: dcvsh-irq-0
+      - const: dcvsh-irq-1
+      - const: dcvsh-irq-2
+
   '#freq-domain-cells':
     const: 1
 
index f2c1437..6e2fd6e 100644 (file)
@@ -32,7 +32,7 @@ properties:
       - description: Display byte clock
       - description: Display byte interface clock
       - description: Display pixel clock
-      - description: Display escape clock
+      - description: Display core clock
       - description: Display AHB clock
       - description: Display AXI clock
 
@@ -137,8 +137,6 @@ required:
   - phys
   - assigned-clocks
   - assigned-clock-parents
-  - power-domains
-  - operating-points-v2
   - ports
 
 additionalProperties: false
index 3d8540a..2f1fd14 100644 (file)
@@ -34,6 +34,10 @@ properties:
   vddio-supply:
     description: Phandle to vdd-io regulator device node.
 
+  qcom,dsi-phy-regulator-ldo-mode:
+    type: boolean
+    description: Indicates if the LDO mode PHY regulator is wanted.
+
 required:
   - compatible
   - reg
index d6f043a..4795e13 100644 (file)
@@ -72,7 +72,7 @@ examples:
     #include <dt-bindings/interconnect/qcom,qcm2290.h>
     #include <dt-bindings/power/qcom-rpmpd.h>
 
-    mdss@5e00000 {
+    display-subsystem@5e00000 {
         #address-cells = <1>;
         #size-cells = <1>;
         compatible = "qcom,qcm2290-mdss";
index a86d7f5..886858e 100644 (file)
@@ -62,7 +62,7 @@ examples:
     #include <dt-bindings/interrupt-controller/arm-gic.h>
     #include <dt-bindings/power/qcom-rpmpd.h>
 
-    mdss@5e00000 {
+    display-subsystem@5e00000 {
         #address-cells = <1>;
         #size-cells = <1>;
         compatible = "qcom,sm6115-mdss";
index 4b37aa8..5e6be4e 100644 (file)
@@ -84,7 +84,6 @@ allOf:
               - qcom,msm8939-pcnoc
               - qcom,msm8939-snoc
               - qcom,msm8996-a1noc
-              - qcom,msm8996-a2noc
               - qcom,msm8996-bimc
               - qcom,msm8996-cnoc
               - qcom,msm8996-pnoc
@@ -191,6 +190,29 @@ allOf:
         compatible:
           contains:
             enum:
+              - qcom,msm8996-a2noc
+
+    then:
+      properties:
+        clock-names:
+          items:
+            - const: bus
+            - const: bus_a
+            - const: aggre2_ufs_axi
+            - const: ufs_axi
+
+        clocks:
+          items:
+            - description: Bus Clock
+            - description: Bus A Clock
+            - description: Aggregate2 NoC UFS AXI Clock
+            - description: UFS AXI Clock
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
               - qcom,sdm660-a2noc
 
     then:
@@ -2,7 +2,7 @@
 # Copyright 2019 BayLibre, SAS
 %YAML 1.2
 ---
-$id: "http://devicetree.org/schemas/phy/amlogic,meson-g12a-usb2-phy.yaml#"
+$id: "http://devicetree.org/schemas/phy/amlogic,g12a-usb2-phy.yaml#"
 $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 
 title: Amlogic G12A USB2 PHY
@@ -13,8 +13,8 @@ maintainers:
 properties:
   compatible:
     enum:
-      - amlogic,meson-g12a-usb2-phy
-      - amlogic,meson-a1-usb2-phy
+      - amlogic,g12a-usb2-phy
+      - amlogic,a1-usb2-phy
 
   reg:
     maxItems: 1
@@ -68,7 +68,7 @@ additionalProperties: false
 examples:
   - |
     phy@36000 {
-          compatible = "amlogic,meson-g12a-usb2-phy";
+          compatible = "amlogic,g12a-usb2-phy";
           reg = <0x36000 0x2000>;
           clocks = <&xtal>;
           clock-names = "xtal";
@@ -2,7 +2,7 @@
 # Copyright 2019 BayLibre, SAS
 %YAML 1.2
 ---
-$id: "http://devicetree.org/schemas/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml#"
+$id: "http://devicetree.org/schemas/phy/amlogic,g12a-usb3-pcie-phy.yaml#"
 $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 
 title: Amlogic G12A USB3 + PCIE Combo PHY
@@ -13,7 +13,7 @@ maintainers:
 properties:
   compatible:
     enum:
-      - amlogic,meson-g12a-usb3-pcie-phy
+      - amlogic,g12a-usb3-pcie-phy
 
   reg:
     maxItems: 1
@@ -49,7 +49,7 @@ additionalProperties: false
 examples:
   - |
     phy@46000 {
-          compatible = "amlogic,meson-g12a-usb3-pcie-phy";
+          compatible = "amlogic,g12a-usb3-pcie-phy";
           reg = <0x46000 0x2000>;
           clocks = <&ref_clk>;
           clock-names = "ref_clk";
index abcc437..ca6a083 100644 (file)
@@ -16,7 +16,6 @@ properties:
   compatible:
     enum:
       - qcom,usb-hs-28nm-femtophy
-      - qcom,usb-hs-28nm-mdm9607
 
   reg:
     maxItems: 1
index 2905554..bdf482d 100644 (file)
@@ -39,8 +39,8 @@ properties:
   qcom,protection-domain:
     $ref: /schemas/types.yaml#/definitions/string-array
     description: |
-      Protection domain service name and path for APR service
-      possible values are::
+      Protection domain service name and path for APR service (if supported).
+      Possible values are::
       "avs/audio", "msm/adsp/audio_pd".
       "kernel/elf_loader", "msm/modem/wlan_pd".
       "tms/servreg", "msm/adsp/audio_pd".
@@ -49,6 +49,5 @@ properties:
 
 required:
   - reg
-  - qcom,protection-domain
 
 additionalProperties: true
index 9d31399..aa23b00 100644 (file)
@@ -16,6 +16,7 @@ properties:
   compatible:
     enum:
       - mediatek,mt8186-mt6366-rt1019-rt5682s-sound
+      - mediatek,mt8186-mt6366-rt5682s-max98360-sound
 
   mediatek,platform:
     $ref: "/schemas/types.yaml#/definitions/phandle"
index 66431aa..da5f709 100644 (file)
@@ -30,7 +30,9 @@ properties:
     const: 0
 
   clocks:
-    maxItems: 5
+    oneOf:
+      - maxItems: 3
+      - maxItems: 5
 
   clock-names:
     oneOf:
index 2bf8d08..66cbb1f 100644 (file)
@@ -9,9 +9,6 @@ title: LPASS(Low Power Audio Subsystem) VA Macro audio codec
 maintainers:
   - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
 
-allOf:
-  - $ref: dai-common.yaml#
-
 properties:
   compatible:
     enum:
@@ -30,15 +27,12 @@ properties:
     const: 0
 
   clocks:
-    maxItems: 5
+    minItems: 5
+    maxItems: 6
 
   clock-names:
-    items:
-      - const: mclk
-      - const: npl
-      - const: macro
-      - const: dcodec
-      - const: fsgen
+    minItems: 5
+    maxItems: 6
 
   clock-output-names:
     maxItems: 1
@@ -55,10 +49,51 @@ required:
   - reg
   - "#sound-dai-cells"
 
+allOf:
+  - $ref: dai-common.yaml#
+
+  - if:
+      properties:
+        compatible:
+          enum:
+            - qcom,sc7280-lpass-wsa-macro
+            - qcom,sm8450-lpass-wsa-macro
+            - qcom,sc8280xp-lpass-wsa-macro
+    then:
+      properties:
+        clocks:
+          maxItems: 5
+        clock-names:
+          items:
+            - const: mclk
+            - const: npl
+            - const: macro
+            - const: dcodec
+            - const: fsgen
+
+  - if:
+      properties:
+        compatible:
+          enum:
+            - qcom,sm8250-lpass-wsa-macro
+    then:
+      properties:
+        clocks:
+          minItems: 6
+        clock-names:
+          items:
+            - const: mclk
+            - const: npl
+            - const: macro
+            - const: dcodec
+            - const: va
+            - const: fsgen
+
 unevaluatedProperties: false
 
 examples:
   - |
+    #include <dt-bindings/clock/qcom,sm8250-lpass-aoncc.h>
     #include <dt-bindings/sound/qcom,q6afe.h>
     codec@3240000 {
       compatible = "qcom,sm8250-lpass-wsa-macro";
@@ -69,7 +104,8 @@ examples:
                <&audiocc 0>,
                <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
                <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+               <&aoncc LPASS_CDC_VA_MCLK>,
                <&vamacro>;
-      clock-names = "mclk", "npl", "macro", "dcodec", "fsgen";
+      clock-names = "mclk", "npl", "macro", "dcodec", "va", "fsgen";
       clock-output-names = "mclk";
     };
index 067fd16..a43aacf 100644 (file)
@@ -120,6 +120,8 @@ dax={always,never}     Use direct access (no page cache).  See
 dax                    A legacy option which is an alias for ``dax=always``.
 device=%s              Specify a path to an extra device to be used together.
 fsid=%s                Specify a filesystem image ID for Fscache back-end.
+domain_id=%s           Specify a domain ID in fscache mode so that different images
+                       with the same blobs under a given domain ID can share storage.
 ===================    =========================================================
 
 Sysfs Entries
index 6b7368d..38bc74e 100644 (file)
@@ -1042,7 +1042,7 @@ $(clean-files).
 
 When executing "make clean", the file "crc32table.h" will be deleted.
 Kbuild will assume files to be in the same relative directory as the
-Makefile, except if prefixed with $(objtree).
+Makefile.
 
 To exclude certain files or directories from make clean, use the
 $(no-clean-files) variable.
index 4aef9cd..c859f3c 100644 (file)
@@ -8,7 +8,7 @@ In order to use the Ethernet bridging functionality, you'll need the
 userspace tools.
 
 Documentation for Linux bridging is on:
-   http://www.linuxfoundation.org/collaborate/workgroups/networking/bridge
+   https://wiki.linuxfoundation.org/networking/bridge
 
 The bridge-utilities are maintained at:
    git://git.kernel.org/pub/scm/linux/kernel/git/shemminger/bridge-utils.git
index 49db1d1..8b1045c 100644 (file)
@@ -173,7 +173,9 @@ nf_conntrack_sctp_timeout_cookie_echoed - INTEGER (seconds)
        default 3
 
 nf_conntrack_sctp_timeout_established - INTEGER (seconds)
-       default 432000 (5 days)
+       default 210
+
+       Default is set to (hb_interval * path_max_retrans + rto_max)
 
 nf_conntrack_sctp_timeout_shutdown_sent - INTEGER (seconds)
        default 0.3
@@ -190,12 +192,6 @@ nf_conntrack_sctp_timeout_heartbeat_sent - INTEGER (seconds)
        This timeout is used to setup conntrack entry on secondary paths.
        Default is set to hb_interval.
 
-nf_conntrack_sctp_timeout_heartbeat_acked - INTEGER (seconds)
-       default 210
-
-       This timeout is used to setup conntrack entry on secondary paths.
-       Default is set to (hb_interval * path_max_retrans + rto_max)
-
 nf_conntrack_udp_timeout - INTEGER (seconds)
        default 30
 
index eeb394b..8b416bf 100644 (file)
@@ -3,7 +3,7 @@
 
 import os
 import sys
-from sphinx.util.pycompat import execfile_
+from sphinx.util.osutil import fs_encoding
 
 # ------------------------------------------------------------------------------
 def loadConfig(namespace):
@@ -48,7 +48,9 @@ def loadConfig(namespace):
             sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
             config = namespace.copy()
             config['__file__'] = config_file
-            execfile_(config_file, config)
+            with open(config_file, 'rb') as f:
+                code = compile(f.read(), fs_encoding, 'exec')
+                exec(code, config)
             del config['__file__']
             namespace.update(config)
         else:
index deb494f..9807b05 100644 (file)
@@ -1354,6 +1354,14 @@ the memory region are automatically reflected into the guest.  For example, an
 mmap() that affects the region will be made visible immediately.  Another
 example is madvise(MADV_DROP).
 
+Note: On arm64, a write generated by the page-table walker (to update
+the Access and Dirty flags, for example) never results in a
+KVM_EXIT_MMIO exit when the slot has the KVM_MEM_READONLY flag. This
+is because KVM cannot provide the data that would be written by the
+page-table walker, making it impossible to emulate the access.
+Instead, an abort (data abort if the cause of the page-table update
+was a load or a store, instruction abort if it was an instruction
+fetch) is injected in the guest.
 
 4.36 KVM_SET_TSS_ADDR
 ---------------------
@@ -8310,6 +8318,20 @@ CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by ``KVM_GET_SUPPORTED_CPUID``
 It can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER`` is present and the kernel
 has enabled in-kernel emulation of the local APIC.
 
+CPU topology
+~~~~~~~~~~~~
+
+Several CPUID values include topology information for the host CPU:
+0x0b and 0x1f for Intel systems, 0x8000001e for AMD systems.  Different
+versions of KVM return different values for this information and userspace
+should not rely on it.  Currently they return all zeroes.
+
+If userspace wishes to set up a guest topology, it should be careful that
+the values of these three leaves differ for each CPU.  In particular,
+the APIC ID is found in EDX for all subleaves of 0x0b and 0x1f, and in EAX
+for 0x8000001e; the latter also encodes the core id and node id in bits
+7:0 of EBX and ECX respectively.
+
 Obsolete ioctls and capabilities
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
index a3ca76f..a014679 100644 (file)
@@ -24,21 +24,22 @@ The acquisition orders for mutexes are as follows:
 
 For SRCU:
 
-- ``synchronize_srcu(&kvm->srcu)`` is called _inside_
-  the kvm->slots_lock critical section, therefore kvm->slots_lock
-  cannot be taken inside a kvm->srcu read-side critical section.
-  Instead, kvm->slots_arch_lock is released before the call
-  to ``synchronize_srcu()`` and _can_ be taken inside a
-  kvm->srcu read-side critical section.
-
-- kvm->lock is taken inside kvm->srcu, therefore
-  ``synchronize_srcu(&kvm->srcu)`` cannot be called inside
-  a kvm->lock critical section.  If you cannot delay the
-  call until after kvm->lock is released, use ``call_srcu``.
+- ``synchronize_srcu(&kvm->srcu)`` is called inside critical sections
+  for kvm->lock, vcpu->mutex and kvm->slots_lock.  These locks _cannot_
+  be taken inside a kvm->srcu read-side critical section; that is, the
+  following is broken::
+
+      srcu_read_lock(&kvm->srcu);
+      mutex_lock(&kvm->slots_lock);
+
+- kvm->slots_arch_lock instead is released before the call to
+  ``synchronize_srcu()``.  It _can_ therefore be taken inside a
+  kvm->srcu read-side critical section, for example while processing
+  a vmexit.
 
 On x86:
 
-- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
+- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock and kvm->arch.xen.xen_lock
 
 - kvm->arch.mmu_lock is an rwlock.  kvm->arch.tdp_mmu_pages_lock and
   kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
index e5c43cb..7c88455 100644 (file)
@@ -383,7 +383,7 @@ ACPI COMPONENT ARCHITECTURE (ACPICA)
 M:     Robert Moore <robert.moore@intel.com>
 M:     "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
 L:     linux-acpi@vger.kernel.org
-L:     devel@acpica.org
+L:     acpica-devel@lists.linuxfoundation.org
 S:     Supported
 W:     https://acpica.org/
 W:     https://github.com/acpica/acpica/
@@ -6947,7 +6947,7 @@ F:        drivers/gpu/drm/atmel-hlcdc/
 DRM DRIVERS FOR BRIDGE CHIPS
 M:     Andrzej Hajda <andrzej.hajda@intel.com>
 M:     Neil Armstrong <neil.armstrong@linaro.org>
-M:     Robert Foss <robert.foss@linaro.org>
+M:     Robert Foss <rfoss@kernel.org>
 R:     Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
 R:     Jonas Karlman <jonas@kwiboo.se>
 R:     Jernej Skrabec <jernej.skrabec@gmail.com>
@@ -7615,7 +7615,6 @@ S:        Maintained
 F:     drivers/firmware/efi/test/
 
 EFI VARIABLE FILESYSTEM
-M:     Matthew Garrett <matthew.garrett@nebula.com>
 M:     Jeremy Kerr <jk@ozlabs.org>
 M:     Ard Biesheuvel <ardb@kernel.org>
 L:     linux-efi@vger.kernel.org
@@ -8467,16 +8466,16 @@ F:      fs/fscache/
 F:     include/linux/fscache*.h
 
 FSCRYPT: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
+M:     Eric Biggers <ebiggers@kernel.org>
 M:     Theodore Y. Ts'o <tytso@mit.edu>
 M:     Jaegeuk Kim <jaegeuk@kernel.org>
-M:     Eric Biggers <ebiggers@kernel.org>
 L:     linux-fscrypt@vger.kernel.org
 S:     Supported
 Q:     https://patchwork.kernel.org/project/linux-fscrypt/list/
-T:     git git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt.git
+T:     git https://git.kernel.org/pub/scm/fs/fscrypt/linux.git
 F:     Documentation/filesystems/fscrypt.rst
 F:     fs/crypto/
-F:     include/linux/fscrypt*.h
+F:     include/linux/fscrypt.h
 F:     include/uapi/linux/fscrypt.h
 
 FSI SUBSYSTEM
@@ -8519,10 +8518,10 @@ F:      include/linux/fsnotify*.h
 FSVERITY: READ-ONLY FILE-BASED AUTHENTICITY PROTECTION
 M:     Eric Biggers <ebiggers@kernel.org>
 M:     Theodore Y. Ts'o <tytso@mit.edu>
-L:     linux-fscrypt@vger.kernel.org
+L:     fsverity@lists.linux.dev
 S:     Supported
-Q:     https://patchwork.kernel.org/project/linux-fscrypt/list/
-T:     git git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt.git fsverity
+Q:     https://patchwork.kernel.org/project/fsverity/list/
+T:     git https://git.kernel.org/pub/scm/fs/fsverity/linux.git
 F:     Documentation/filesystems/fsverity.rst
 F:     fs/verity/
 F:     include/linux/fsverity.h
@@ -9298,7 +9297,7 @@ F:        net/dsa/tag_hellcreek.c
 
 HISILICON DMA DRIVER
 M:     Zhou Wang <wangzhou1@hisilicon.com>
-M:     Jie Hai <haijie1@hisilicon.com>
+M:     Jie Hai <haijie1@huawei.com>
 L:     dmaengine@vger.kernel.org
 S:     Maintained
 F:     drivers/dma/hisi_dma.c
@@ -11355,9 +11354,9 @@ F:      virt/kvm/*
 KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
 M:     Marc Zyngier <maz@kernel.org>
 R:     James Morse <james.morse@arm.com>
-R:     Alexandru Elisei <alexandru.elisei@arm.com>
 R:     Suzuki K Poulose <suzuki.poulose@arm.com>
 R:     Oliver Upton <oliver.upton@linux.dev>
+R:     Zenghui Yu <yuzenghui@huawei.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     kvmarm@lists.linux.dev
 L:     kvmarm@lists.cs.columbia.edu (deprecated, moderated for non-subscribers)
@@ -14633,7 +14632,6 @@ F:      net/netfilter/xt_SECMARK.c
 F:     net/netlabel/
 
 NETWORKING [MPTCP]
-M:     Mat Martineau <mathew.j.martineau@linux.intel.com>
 M:     Matthieu Baerts <matthieu.baerts@tessares.net>
 L:     netdev@vger.kernel.org
 L:     mptcp@lists.linux.dev
@@ -14918,7 +14916,8 @@ T:      git://git.infradead.org/nvme.git
 F:     Documentation/nvme/
 F:     drivers/nvme/host/
 F:     drivers/nvme/common/
-F:     include/linux/nvme*
+F:     include/linux/nvme.h
+F:     include/linux/nvme-*.h
 F:     include/uapi/linux/nvme_ioctl.h
 
 NVM EXPRESS FABRICS AUTHENTICATION
@@ -15748,6 +15747,12 @@ S:     Maintained
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/p54
 F:     drivers/net/wireless/intersil/p54/
 
+PACKET SOCKETS
+M:     Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+S:     Maintained
+F:     include/uapi/linux/if_packet.h
+F:     net/packet/af_packet.c
+
 PACKING
 M:     Vladimir Oltean <olteanv@gmail.com>
 L:     netdev@vger.kernel.org
@@ -17236,7 +17241,7 @@ F:      Documentation/devicetree/bindings/net/qcom,bam-dmux.yaml
 F:     drivers/net/wwan/qcom_bam_dmux.c
 
 QUALCOMM CAMERA SUBSYSTEM DRIVER
-M:     Robert Foss <robert.foss@linaro.org>
+M:     Robert Foss <rfoss@kernel.org>
 M:     Todor Tomov <todor.too@gmail.com>
 L:     linux-media@vger.kernel.org
 S:     Maintained
@@ -17316,7 +17321,7 @@ F:      drivers/dma/qcom/hidma*
 
 QUALCOMM I2C CCI DRIVER
 M:     Loic Poulain <loic.poulain@linaro.org>
-M:     Robert Foss <robert.foss@linaro.org>
+M:     Robert Foss <rfoss@kernel.org>
 L:     linux-i2c@vger.kernel.org
 L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
@@ -19324,6 +19329,13 @@ L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Orphan
 F:     sound/soc/uniphier/
 
+SOCKET TIMESTAMPING
+M:     Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+S:     Maintained
+F:     Documentation/networking/timestamping.rst
+F:     include/uapi/linux/net_tstamp.h
+F:     tools/testing/selftests/net/so_txtime.c
+
 SOEKRIS NET48XX LED SUPPORT
 M:     Chris Boot <bootc@bootc.net>
 S:     Maintained
@@ -21744,6 +21756,13 @@ T:     git git://linuxtv.org/media_tree.git
 F:     Documentation/admin-guide/media/zr364xx*
 F:     drivers/staging/media/deprecated/zr364xx/
 
+USER DATAGRAM PROTOCOL (UDP)
+M:     Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+S:     Maintained
+F:     include/linux/udp.h
+F:     net/ipv4/udp.c
+F:     net/ipv6/udp.c
+
 USER-MODE LINUX (UML)
 M:     Richard Weinberger <richard@nod.at>
 M:     Anton Ivanov <anton.ivanov@cambridgegreys.com>
index 4607163..c1ead4c 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc5
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
@@ -549,7 +549,7 @@ LDFLAGS_MODULE  =
 CFLAGS_KERNEL  =
 RUSTFLAGS_KERNEL =
 AFLAGS_KERNEL  =
-export LDFLAGS_vmlinux =
+LDFLAGS_vmlinux =
 
 # Use USERINCLUDE when you must reference the UAPI directories only.
 USERINCLUDE    := \
@@ -1248,6 +1248,18 @@ vmlinux.o modules.builtin.modinfo modules.builtin: vmlinux_o
        @:
 
 PHONY += vmlinux
+# LDFLAGS_vmlinux in the top Makefile defines linker flags for the top vmlinux,
+# not for decompressors. LDFLAGS_vmlinux in arch/*/boot/compressed/Makefile is
+# unrelated; the decompressors just happen to have the same base name,
+# arch/*/boot/compressed/vmlinux.
+# Export LDFLAGS_vmlinux only to scripts/Makefile.vmlinux.
+#
+# _LDFLAGS_vmlinux is a workaround for the 'private export' bug:
+#   https://savannah.gnu.org/bugs/?61463
+# For Make > 4.4, the following simple code will work:
+#  vmlinux: private export LDFLAGS_vmlinux := $(LDFLAGS_vmlinux)
+vmlinux: private _LDFLAGS_vmlinux := $(LDFLAGS_vmlinux)
+vmlinux: export LDFLAGS_vmlinux = $(_LDFLAGS_vmlinux)
 vmlinux: vmlinux.o $(KBUILD_LDS) modpost
        $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.vmlinux
 
@@ -1533,6 +1545,7 @@ endif
 # *.ko are usually independent of vmlinux, but CONFIG_DEBUG_INFOBTF_MODULES
 # is an exception.
 ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+KBUILD_BUILTIN := 1
 modules: vmlinux
 endif
 
index 12933ef..446861b 100644 (file)
                        };
 
                        gpio0: gpio@18100 {
-                               compatible = "marvell,armadaxp-gpio",
+                               compatible = "marvell,armada-370-gpio",
                                             "marvell,orion-gpio";
                                reg = <0x18100 0x40>, <0x181c0 0x08>;
                                reg-names = "gpio", "pwm";
                        };
 
                        gpio1: gpio@18140 {
-                               compatible = "marvell,armadaxp-gpio",
+                               compatible = "marvell,armada-370-gpio",
                                             "marvell,orion-gpio";
                                reg = <0x18140 0x40>, <0x181c8 0x08>;
                                reg-names = "gpio", "pwm";
index 1e05208..9d1cac4 100644 (file)
                        };
 
                        gpio0: gpio@18100 {
-                               compatible = "marvell,armadaxp-gpio", "marvell,orion-gpio";
+                               compatible = "marvell,orion-gpio";
                                reg = <0x18100 0x40>;
                                ngpios = <32>;
                                gpio-controller;
                        };
 
                        gpio1: gpio@18140 {
-                               compatible = "marvell,armadaxp-gpio", "marvell,orion-gpio";
+                               compatible = "marvell,orion-gpio";
                                reg = <0x18140 0x40>;
                                ngpios = <28>;
                                gpio-controller;
index 37d0cff..70c4a48 100644 (file)
        scl-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
        status = "okay";
 
-       i2c-switch@70 {
+       i2c-mux@70 {
                compatible = "nxp,pca9547";
                #address-cells = <1>;
                #size-cells = <0>;
index 4bc4371..4b81a97 100644 (file)
 &uart1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_uart1>;
-       uart-has-rtscts;
        rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
index 162dc25..5a74c7f 100644 (file)
@@ -32,7 +32,7 @@
 };
 
 &i2c2 {
-       clock_frequency = <100000>;
+       clock-frequency = <100000>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_i2c2>;
        status = "okay";
index 5162fe2..fdc1056 100644 (file)
@@ -32,7 +32,7 @@
 };
 
 &i2c1 {
-       clock_frequency = <100000>;
+       clock-frequency = <100000>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_i2c1>;
        status = "okay";
@@ -52,7 +52,7 @@
 };
 
 &i2c4 {
-       clock_frequency = <100000>;
+       clock-frequency = <100000>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_i2c1>;
        status = "okay";
index 104a852..5afb167 100644 (file)
@@ -43,7 +43,7 @@
 };
 
 &i2c1 {
-       clock_frequency = <100000>;
+       clock-frequency = <100000>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_i2c1>;
        status = "okay";
@@ -64,7 +64,7 @@
 };
 
 &i2c2 {
-       clock_frequency = <100000>;
+       clock-frequency = <100000>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_i2c2>;
        status = "okay";
index 44cd72f..116e59a 100644 (file)
                serial@f995e000 {
                        status = "okay";
                };
+       };
+};
 
-               sdhci@f9824900 {
-                       bus-width = <8>;
-                       non-removable;
-                       status = "okay";
-               };
+&sdhc_1 {
+       bus-width = <8>;
+       non-removable;
+       status = "okay";
+};
 
-               sdhci@f98a4900 {
-                       cd-gpios = <&tlmm 122 GPIO_ACTIVE_LOW>;
-                       bus-width = <4>;
-               };
-       };
+&sdhc_2 {
+       cd-gpios = <&tlmm 122 GPIO_ACTIVE_LOW>;
+       bus-width = <4>;
 };
index fe30abf..4b0d2b4 100644 (file)
                        status = "disabled";
                };
 
-               mmc@f9824900 {
+               sdhc_1: mmc@f9824900 {
                        compatible = "qcom,apq8084-sdhci", "qcom,sdhci-msm-v4";
                        reg = <0xf9824900 0x11c>, <0xf9824000 0x800>;
                        reg-names = "hc", "core";
                        status = "disabled";
                };
 
-               mmc@f98a4900 {
+               sdhc_2: mmc@f98a4900 {
                        compatible = "qcom,apq8084-sdhci", "qcom,sdhci-msm-v4";
                        reg = <0xf98a4900 0x11c>, <0xf98a4000 0x800>;
                        reg-names = "hc", "core";
index 8f5477e..37a5d96 100644 (file)
                        mpddrc: mpddrc@ffffe800 {
                                compatible = "microchip,sam9x60-ddramc", "atmel,sama5d3-ddramc";
                                reg = <0xffffe800 0x200>;
-                               clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_CORE PMC_MCK>;
+                               clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_PERIPHERAL 49>;
                                clock-names = "ddrck", "mpddr";
                        };
 
index d865ab5..dd23de8 100644 (file)
 
 &qspi {
        pinctrl-names = "default", "sleep";
-       pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
-       pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
+       pinctrl-0 = <&qspi_clk_pins_a
+                    &qspi_bk1_pins_a
+                    &qspi_cs1_pins_a>;
+       pinctrl-1 = <&qspi_clk_sleep_pins_a
+                    &qspi_bk1_sleep_pins_a
+                    &qspi_cs1_sleep_pins_a>;
        reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
        #address-cells = <1>;
        #size-cells = <0>;
index aef02e6..7d11c50 100644 (file)
 
 &qspi {
        pinctrl-names = "default", "sleep";
-       pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
-       pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
+       pinctrl-0 = <&qspi_clk_pins_a
+                    &qspi_bk1_pins_a
+                    &qspi_cs1_pins_a>;
+       pinctrl-1 = <&qspi_clk_sleep_pins_a
+                    &qspi_bk1_sleep_pins_a
+                    &qspi_cs1_sleep_pins_a>;
        reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
        #address-cells = <1>;
        #size-cells = <0>;
index 002f221..c06edd2 100644 (file)
 
 &qspi {
        pinctrl-names = "default", "sleep";
-       pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
-       pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
+       pinctrl-0 = <&qspi_clk_pins_a
+                    &qspi_bk1_pins_a
+                    &qspi_cs1_pins_a>;
+       pinctrl-1 = <&qspi_clk_sleep_pins_a
+                    &qspi_bk1_sleep_pins_a
+                    &qspi_cs1_sleep_pins_a>;
        reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
        #address-cells = <1>;
        #size-cells = <0>;
index 134a798..bb40fb4 100644 (file)
 
 &qspi {
        pinctrl-names = "default", "sleep";
-       pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
-       pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
+       pinctrl-0 = <&qspi_clk_pins_a
+                    &qspi_bk1_pins_a
+                    &qspi_cs1_pins_a>;
+       pinctrl-1 = <&qspi_clk_sleep_pins_a
+                    &qspi_bk1_sleep_pins_a
+                    &qspi_cs1_sleep_pins_a>;
        reg = <0x58003000 0x1000>, <0x70000000 0x200000>;
        #address-cells = <1>;
        #size-cells = <0>;
index 42ed4a0..6280c5e 100644 (file)
 };
 
 &i2c2 {
-       tca9548@70 {
+       i2c-mux@70 {
                compatible = "nxp,pca9548";
                pinctrl-0 = <&pinctrl_i2c_mux_reset>;
                pinctrl-names = "default";
index f892977..c00d395 100644 (file)
 };
 
 &i2c2 {
-       tca9548@70 {
+       i2c-mux@70 {
                compatible = "nxp,pca9548";
                pinctrl-0 = <&pinctrl_i2c_mux_reset>;
                pinctrl-names = "default";
index b8f741a..237b828 100644 (file)
@@ -20,7 +20,6 @@
 
 #include <linux/init.h>
 #include <linux/mc146818rtc.h>
-#include <linux/bcd.h>
 #include <linux/io.h>
 
 #include "common.h"
index 3e63445..cc86977 100644 (file)
@@ -23,6 +23,7 @@ static int mx25_read_cpu_rev(void)
 
        np = of_find_compatible_node(NULL, NULL, "fsl,imx25-iim");
        iim_base = of_iomap(np, 0);
+       of_node_put(np);
        BUG_ON(!iim_base);
        rev = readl(iim_base + MXC_IIMSREV);
        iounmap(iim_base);
index bf70e13..1d28939 100644 (file)
@@ -28,6 +28,7 @@ static int mx27_read_cpu_rev(void)
 
        np = of_find_compatible_node(NULL, NULL, "fsl,imx27-ccm");
        ccm_base = of_iomap(np, 0);
+       of_node_put(np);
        BUG_ON(!ccm_base);
        /*
         * now we have access to the IO registers. As we need
index b9c24b8..35c5449 100644 (file)
@@ -39,6 +39,7 @@ static int mx31_read_cpu_rev(void)
 
        np = of_find_compatible_node(NULL, NULL, "fsl,imx31-iim");
        iim_base = of_iomap(np, 0);
+       of_node_put(np);
        BUG_ON(!iim_base);
 
        /* read SREV register from IIM module */
index 80e7d8a..1fe75b3 100644 (file)
@@ -21,6 +21,7 @@ static int mx35_read_cpu_rev(void)
 
        np = of_find_compatible_node(NULL, NULL, "fsl,imx35-iim");
        iim_base = of_iomap(np, 0);
+       of_node_put(np);
        BUG_ON(!iim_base);
 
        rev = imx_readl(iim_base + MXC_IIMSREV);
index ad56263..a67c89b 100644 (file)
@@ -28,6 +28,7 @@ static u32 imx5_read_srev_reg(const char *compat)
 
        np = of_find_compatible_node(NULL, NULL, compat);
        iim_base = of_iomap(np, 0);
+       of_node_put(np);
        WARN_ON(!iim_base);
 
        srev = readl(iim_base + IIM_SREV) & 0xff;
index 538a960..7ec7ada 100644 (file)
@@ -4,6 +4,7 @@ menuconfig ARCH_OMAP1
        depends on ARCH_MULTI_V4T || ARCH_MULTI_V5
        depends on CPU_LITTLE_ENDIAN
        depends on ATAGS
+       select ARCH_OMAP
        select ARCH_HAS_HOLES_MEMORYMODEL
        select ARCH_OMAP
        select CLKSRC_MMIO
@@ -45,10 +46,6 @@ config ARCH_OMAP16XX
        select CPU_ARM926T
        select OMAP_DM_TIMER
 
-config ARCH_OMAP1_ANY
-       select ARCH_OMAP
-       def_bool ARCH_OMAP730 || ARCH_OMAP850 || ARCH_OMAP15XX || ARCH_OMAP16XX
-
 config ARCH_OMAP
        bool
 
index 506074b..0615cb0 100644 (file)
@@ -3,8 +3,6 @@
 # Makefile for the linux kernel.
 #
 
-ifdef CONFIG_ARCH_OMAP1_ANY
-
 # Common support
 obj-y := io.o id.o sram-init.o sram.o time.o irq.o mux.o flash.o \
         serial.o devices.o dma.o omap-dma.o fb.o
@@ -59,5 +57,3 @@ obj-$(CONFIG_ARCH_OMAP730)            += gpio7xx.o
 obj-$(CONFIG_ARCH_OMAP850)             += gpio7xx.o
 obj-$(CONFIG_ARCH_OMAP15XX)            += gpio15xx.o
 obj-$(CONFIG_ARCH_OMAP16XX)            += gpio16xx.o
-
-endif
index c675f11..61fa26e 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/gpio.h>
 #include <linux/platform_data/gpio-omap.h>
 #include <linux/soc/ti/omap1-soc.h>
+#include <asm/irq.h>
 
 #include "irqs.h"
 
index d2db9b8..0074b01 100644 (file)
  * The machine specific code may provide the extra mapping besides the
  * default mapping provided here.
  */
-static struct map_desc omap_io_desc[] __initdata = {
+#if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
+static struct map_desc omap7xx_io_desc[] __initdata = {
        {
                .virtual        = OMAP1_IO_VIRT,
                .pfn            = __phys_to_pfn(OMAP1_IO_PHYS),
                .length         = OMAP1_IO_SIZE,
                .type           = MT_DEVICE
-       }
-};
-
-#if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
-static struct map_desc omap7xx_io_desc[] __initdata = {
+       },
        {
                .virtual        = OMAP7XX_DSP_BASE,
                .pfn            = __phys_to_pfn(OMAP7XX_DSP_START),
@@ -50,6 +47,12 @@ static struct map_desc omap7xx_io_desc[] __initdata = {
 #ifdef CONFIG_ARCH_OMAP15XX
 static struct map_desc omap1510_io_desc[] __initdata = {
        {
+               .virtual        = OMAP1_IO_VIRT,
+               .pfn            = __phys_to_pfn(OMAP1_IO_PHYS),
+               .length         = OMAP1_IO_SIZE,
+               .type           = MT_DEVICE
+       },
+       {
                .virtual        = OMAP1510_DSP_BASE,
                .pfn            = __phys_to_pfn(OMAP1510_DSP_START),
                .length         = OMAP1510_DSP_SIZE,
@@ -66,6 +69,12 @@ static struct map_desc omap1510_io_desc[] __initdata = {
 #if defined(CONFIG_ARCH_OMAP16XX)
 static struct map_desc omap16xx_io_desc[] __initdata = {
        {
+               .virtual        = OMAP1_IO_VIRT,
+               .pfn            = __phys_to_pfn(OMAP1_IO_PHYS),
+               .length         = OMAP1_IO_SIZE,
+               .type           = MT_DEVICE
+       },
+       {
                .virtual        = OMAP16XX_DSP_BASE,
                .pfn            = __phys_to_pfn(OMAP16XX_DSP_START),
                .length         = OMAP16XX_DSP_SIZE,
@@ -79,18 +88,9 @@ static struct map_desc omap16xx_io_desc[] __initdata = {
 };
 #endif
 
-/*
- * Maps common IO regions for omap1
- */
-static void __init omap1_map_common_io(void)
-{
-       iotable_init(omap_io_desc, ARRAY_SIZE(omap_io_desc));
-}
-
 #if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
 void __init omap7xx_map_io(void)
 {
-       omap1_map_common_io();
        iotable_init(omap7xx_io_desc, ARRAY_SIZE(omap7xx_io_desc));
 }
 #endif
@@ -98,7 +98,6 @@ void __init omap7xx_map_io(void)
 #ifdef CONFIG_ARCH_OMAP15XX
 void __init omap15xx_map_io(void)
 {
-       omap1_map_common_io();
        iotable_init(omap1510_io_desc, ARRAY_SIZE(omap1510_io_desc));
 }
 #endif
@@ -106,7 +105,6 @@ void __init omap15xx_map_io(void)
 #if defined(CONFIG_ARCH_OMAP16XX)
 void __init omap16xx_map_io(void)
 {
-       omap1_map_common_io();
        iotable_init(omap16xx_io_desc, ARRAY_SIZE(omap16xx_io_desc));
 }
 #endif
index 05c25c4..b1632cb 100644 (file)
@@ -89,7 +89,6 @@ static struct omap_mcbsp_ops omap1_mcbsp_ops = {
 #define OMAP1610_MCBSP2_BASE   0xfffb1000
 #define OMAP1610_MCBSP3_BASE   0xe1017000
 
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
 struct resource omap7xx_mcbsp_res[][6] = {
        {
                {
@@ -159,14 +158,7 @@ static struct omap_mcbsp_platform_data omap7xx_mcbsp_pdata[] = {
 };
 #define OMAP7XX_MCBSP_RES_SZ           ARRAY_SIZE(omap7xx_mcbsp_res[1])
 #define OMAP7XX_MCBSP_COUNT            ARRAY_SIZE(omap7xx_mcbsp_res)
-#else
-#define omap7xx_mcbsp_res_0            NULL
-#define omap7xx_mcbsp_pdata            NULL
-#define OMAP7XX_MCBSP_RES_SZ           0
-#define OMAP7XX_MCBSP_COUNT            0
-#endif
 
-#ifdef CONFIG_ARCH_OMAP15XX
 struct resource omap15xx_mcbsp_res[][6] = {
        {
                {
@@ -266,14 +258,7 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = {
 };
 #define OMAP15XX_MCBSP_RES_SZ          ARRAY_SIZE(omap15xx_mcbsp_res[1])
 #define OMAP15XX_MCBSP_COUNT           ARRAY_SIZE(omap15xx_mcbsp_res)
-#else
-#define omap15xx_mcbsp_res_0           NULL
-#define omap15xx_mcbsp_pdata           NULL
-#define OMAP15XX_MCBSP_RES_SZ          0
-#define OMAP15XX_MCBSP_COUNT           0
-#endif
 
-#ifdef CONFIG_ARCH_OMAP16XX
 struct resource omap16xx_mcbsp_res[][6] = {
        {
                {
@@ -373,12 +358,6 @@ static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = {
 };
 #define OMAP16XX_MCBSP_RES_SZ          ARRAY_SIZE(omap16xx_mcbsp_res[1])
 #define OMAP16XX_MCBSP_COUNT           ARRAY_SIZE(omap16xx_mcbsp_res)
-#else
-#define omap16xx_mcbsp_res_0           NULL
-#define omap16xx_mcbsp_pdata           NULL
-#define OMAP16XX_MCBSP_RES_SZ          0
-#define OMAP16XX_MCBSP_COUNT           0
-#endif
 
 static void omap_mcbsp_register_board_cfg(struct resource *res, int res_count,
                        struct omap_mcbsp_platform_data *config, int size)
index d916570..0d1f092 100644 (file)
 #define OMAP7XX_IDLECT3                0xfffece24
 #define OMAP7XX_IDLE_LOOP_REQUEST      0x0C00
 
-#if     !defined(CONFIG_ARCH_OMAP730) && \
-       !defined(CONFIG_ARCH_OMAP850) && \
-       !defined(CONFIG_ARCH_OMAP15XX) && \
-       !defined(CONFIG_ARCH_OMAP16XX)
-#warning "Power management for this processor not implemented yet"
-#endif
-
 #ifndef __ASSEMBLER__
 
 #include <linux/clk.h>
index b90d98b..03e25af 100644 (file)
@@ -45,6 +45,8 @@ config MACH_PXA27X_DT
 config MACH_PXA3XX_DT
        bool "Support PXA3xx platforms from device tree"
        select CPU_PXA300
+       select CPU_PXA310
+       select CPU_PXA320
        select PINCTRL
        select POWER_SUPPLY
        select PXA3xx
index 0393480..c5ccca2 100644 (file)
@@ -184,8 +184,6 @@ config ARM64
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_CONTIGUOUS
        select HAVE_DYNAMIC_FTRACE
-       select HAVE_DYNAMIC_FTRACE_WITH_ARGS \
-               if $(cc-option,-fpatchable-function-entry=2)
        select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
                if DYNAMIC_FTRACE_WITH_ARGS
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -972,6 +970,22 @@ config ARM64_ERRATUM_2457168
 
          If unsure, say Y.
 
+config ARM64_ERRATUM_2645198
+       bool "Cortex-A715: 2645198: Workaround possible [ESR|FAR]_ELx corruption"
+       default y
+       help
+         This option adds the workaround for ARM Cortex-A715 erratum 2645198.
+
+         If a Cortex-A715 cpu sees a page mapping permissions change from executable
+         to non-executable, it may corrupt the ESR_ELx and FAR_ELx registers on the
+         next instruction abort caused by permission fault.
+
+         Only user-space does executable to non-executable permission transition via
+         mprotect() system call. Workaround the problem by doing a break-before-make
+         TLB invalidation, for all changes to executable user space mappings.
+
+         If unsure, say Y.
+
 config CAVIUM_ERRATUM_22375
        bool "Cavium erratum 22375, 24313"
        default y
index e3486f6..a1f0c38 100644 (file)
 };
 
 &usb {
-       phys = <&usb2_phy1>;
-       phy-names = "usb2-phy1";
-};
-
-&usb2_phy0 {
-       status = "disabled";
+       phys = <&usb2_phy0>, <&usb2_phy1>;
+       phy-names = "usb2-phy0", "usb2-phy1";
 };
index 5a8d85a..bbdf989 100644 (file)
 &i2c0 {
        status = "okay";
 
-       pca9547@77 {
+       i2c-mux@77 {
                compatible = "nxp,pca9547";
                reg = <0x77>;
                #address-cells = <1>;
index 9b726c2..dda27ed 100644 (file)
@@ -89,7 +89,7 @@
 &i2c0 {
        status = "okay";
 
-       pca9547@77 {
+       i2c-mux@77 {
                compatible = "nxp,pca9547";
                reg = <0x77>;
                #address-cells = <1>;
index b2fcbba..3b0ed93 100644 (file)
@@ -88,7 +88,7 @@
 &i2c0 {
        status = "okay";
 
-       pca9547@77 {
+       i2c-mux@77 {
                compatible = "nxp,pca9547";
                reg = <0x77>;
                #address-cells = <1>;
index 41d8b15..aa52ff7 100644 (file)
@@ -53,7 +53,7 @@
 &i2c0 {
        status = "okay";
 
-       i2c-switch@77 {
+       i2c-mux@77 {
                compatible = "nxp,pca9547";
                reg = <0x77>;
                #address-cells = <1>;
index 1bfbce6..ee8e932 100644 (file)
 &i2c0 {
        status = "okay";
 
-       i2c-switch@77 {
+       i2c-mux@77 {
                compatible = "nxp,pca9547";
                reg = <0x77>;
                #address-cells = <1>;
index ef6c896..d4867d6 100644 (file)
 &i2c3 {
        status = "okay";
 
-       i2c-switch@70 {
+       i2c-mux@70 {
                compatible = "nxp,pca9540";
                #address-cells = <1>;
                #size-cells = <0>;
index f598669..52c5a43 100644 (file)
 
 &i2c0 {
        status = "okay";
-       pca9547@77 {
+       i2c-mux@77 {
                compatible = "nxp,pca9547";
                reg = <0x77>;
                #address-cells = <1>;
index 3d9647b..537cecb 100644 (file)
@@ -44,7 +44,7 @@
 
 &i2c0 {
        status = "okay";
-       pca9547@75 {
+       i2c-mux@75 {
                compatible = "nxp,pca9547";
                reg = <0x75>;
                #address-cells = <1>;
index afb4552..d32a52a 100644 (file)
@@ -54,7 +54,7 @@
 &i2c0 {
        status = "okay";
 
-       i2c-switch@77 {
+       i2c-mux@77 {
                compatible = "nxp,pca9547";
                #address-cells = <1>;
                #size-cells = <0>;
index 03266bd..169f047 100644 (file)
 &ecspi2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_espi2>;
-       cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
+       cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>;
        status = "okay";
 
        eeprom@0 {
                        MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK            0x82
                        MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI            0x82
                        MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO            0x82
-                       MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9               0x41
+                       MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13              0x41
                >;
        };
 
index 24f61db..752f409 100644 (file)
                compatible = "rohm,bd71847";
                reg = <0x4b>;
                #clock-cells = <0>;
-               clocks = <&clk_xtal32k 0>;
+               clocks = <&clk_xtal32k>;
                clock-output-names = "clk-32k-out";
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_pmic>;
index 74c0989..6357078 100644 (file)
        pinctrl-0 = <&pinctrl_i2c3>;
        status = "okay";
 
-       i2cmux@70 {
+       i2c-mux@70 {
                compatible = "nxp,pca9540";
                reg = <0x70>;
                #address-cells = <1>;
index 750a1f0..6433c20 100644 (file)
 &usbotg2 {
        dr_mode = "host";
        vbus-supply = <&reg_usb2_vbus>;
+       over-current-active-low;
        status = "okay";
 };
 
index c2a5c2f..7c3f5c5 100644 (file)
@@ -9,6 +9,7 @@
                simple-audio-card,bitclock-master = <&dailink_master>;
                simple-audio-card,format = "i2s";
                simple-audio-card,frame-master = <&dailink_master>;
+               simple-audio-card,mclk-fs = <256>;
                simple-audio-card,name = "imx8mm-wm8904";
                simple-audio-card,routing =
                        "Headphone Jack", "HPOUTL",
index 73cc3fa..b2bcd22 100644 (file)
@@ -11,6 +11,7 @@
                simple-audio-card,bitclock-master = <&dailink_master>;
                simple-audio-card,format = "i2s";
                simple-audio-card,frame-master = <&dailink_master>;
+               simple-audio-card,mclk-fs = <256>;
                simple-audio-card,name = "imx8mm-nau8822";
                simple-audio-card,routing =
                        "Headphones", "LHP",
index d4c7ca1..f2d9343 100644 (file)
@@ -36,8 +36,8 @@
 
        pcie0_refclk: pcie0-refclk {
                compatible = "fixed-clock";
-                       #clock-cells = <0>;
-                       clock-frequency = <100000000>;
+               #clock-cells = <0>;
+               clock-frequency = <100000000>;
        };
 
        reg_can1_stby: regulator-can1-stby {
index 79b290a..ecc4bce 100644 (file)
@@ -99,7 +99,6 @@
 
                regulators {
                        buck1: BUCK1 {
-                               regulator-compatible = "BUCK1";
                                regulator-min-microvolt = <600000>;
                                regulator-max-microvolt = <2187500>;
                                regulator-boot-on;
                        };
 
                        buck2: BUCK2 {
-                               regulator-compatible = "BUCK2";
                                regulator-min-microvolt = <600000>;
                                regulator-max-microvolt = <2187500>;
                                regulator-boot-on;
                        };
 
                        buck4: BUCK4 {
-                               regulator-compatible = "BUCK4";
                                regulator-min-microvolt = <600000>;
                                regulator-max-microvolt = <3400000>;
                                regulator-boot-on;
                        };
 
                        buck5: BUCK5 {
-                               regulator-compatible = "BUCK5";
                                regulator-min-microvolt = <600000>;
                                regulator-max-microvolt = <3400000>;
                                regulator-boot-on;
                        };
 
                        buck6: BUCK6 {
-                               regulator-compatible = "BUCK6";
                                regulator-min-microvolt = <600000>;
                                regulator-max-microvolt = <3400000>;
                                regulator-boot-on;
                        };
 
                        ldo1: LDO1 {
-                               regulator-compatible = "LDO1";
                                regulator-min-microvolt = <1600000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-boot-on;
                        };
 
                        ldo2: LDO2 {
-                               regulator-compatible = "LDO2";
                                regulator-min-microvolt = <800000>;
                                regulator-max-microvolt = <1150000>;
                                regulator-boot-on;
                        };
 
                        ldo3: LDO3 {
-                               regulator-compatible = "LDO3";
                                regulator-min-microvolt = <800000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-boot-on;
                        };
 
                        ldo4: LDO4 {
-                               regulator-compatible = "LDO4";
                                regulator-min-microvolt = <800000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo5: LDO5 {
-                               regulator-compatible = "LDO5";
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-boot-on;
index 7a6e622..03034b4 100644 (file)
                                compatible = "fsl,imx8mp-gpc";
                                reg = <0x303a0000 0x1000>;
                                interrupt-parent = <&gic>;
+                               interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
                                interrupt-controller;
                                #interrupt-cells = <3>;
 
                                                reg = <IMX8MP_POWER_DOMAIN_MIPI_PHY2>;
                                        };
 
-                                       pgc_hsiomix: power-domains@17 {
+                                       pgc_hsiomix: power-domain@17 {
                                                #power-domain-cells = <0>;
                                                reg = <IMX8MP_POWER_DOMAIN_HSIOMIX>;
                                                clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
                        reg = <0x32f10100 0x8>,
                              <0x381f0000 0x20>;
                        clocks = <&clk IMX8MP_CLK_HSIO_ROOT>,
-                                <&clk IMX8MP_CLK_USB_ROOT>;
+                                <&clk IMX8MP_CLK_USB_SUSP>;
                        clock-names = "hsio", "suspend";
                        interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
                        power-domains = <&hsio_blk_ctrl IMX8MP_HSIOBLK_PD_USB>;
                        usb_dwc3_0: usb@38100000 {
                                compatible = "snps,dwc3";
                                reg = <0x38100000 0x10000>;
-                               clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
+                               clocks = <&clk IMX8MP_CLK_USB_ROOT>,
                                         <&clk IMX8MP_CLK_USB_CORE_REF>,
-                                        <&clk IMX8MP_CLK_USB_ROOT>;
+                                        <&clk IMX8MP_CLK_USB_SUSP>;
                                clock-names = "bus_early", "ref", "suspend";
                                interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
                                phys = <&usb3_phy0>, <&usb3_phy0>;
                        reg = <0x32f10108 0x8>,
                              <0x382f0000 0x20>;
                        clocks = <&clk IMX8MP_CLK_HSIO_ROOT>,
-                                <&clk IMX8MP_CLK_USB_ROOT>;
+                                <&clk IMX8MP_CLK_USB_SUSP>;
                        clock-names = "hsio", "suspend";
                        interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                        power-domains = <&hsio_blk_ctrl IMX8MP_HSIOBLK_PD_USB>;
                        usb_dwc3_1: usb@38200000 {
                                compatible = "snps,dwc3";
                                reg = <0x38200000 0x10000>;
-                               clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
+                               clocks = <&clk IMX8MP_CLK_USB_ROOT>,
                                         <&clk IMX8MP_CLK_USB_CORE_REF>,
-                                        <&clk IMX8MP_CLK_USB_ROOT>;
+                                        <&clk IMX8MP_CLK_USB_SUSP>;
                                clock-names = "bus_early", "ref", "suspend";
                                interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
                                phys = <&usb3_phy1>, <&usb3_phy1>;
index 9dda2a1..8614c18 100644 (file)
        pinctrl-0 = <&pinctrl_i2c1>;
        status = "okay";
 
-       i2cmux@70 {
+       i2c-mux@70 {
                compatible = "nxp,pca9546";
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_i2c1_pca9546>;
        pinctrl-0 = <&pinctrl_i2c4>;
        status = "okay";
 
-       pca9546: i2cmux@70 {
+       pca9546: i2c-mux@70 {
                compatible = "nxp,pca9546";
                reg = <0x70>;
                #address-cells = <1>;
index 5d5aa65..6e61827 100644 (file)
        bus-width = <4>;
        non-removable;
        no-sd;
-       no-emmc;
+       no-mmc;
        status = "okay";
 
        brcmf: wifi@1 {
        cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
        bus-width = <4>;
        no-sdio;
-       no-emmc;
+       no-mmc;
        disable-wp;
        status = "okay";
 };
index 07d8dd8..afa8833 100644 (file)
@@ -61,7 +61,7 @@
        pinctrl-0 = <&pinctrl_lpi2c1 &pinctrl_ioexp_rst>;
        status = "okay";
 
-       i2c-switch@71 {
+       i2c-mux@71 {
                compatible = "nxp,pca9646", "nxp,pca9546";
                #address-cells = <1>;
                #size-cells = <0>;
index 69786c3..27f9a9f 100644 (file)
@@ -74,7 +74,7 @@
 
        pinctrl_usdhc1: usdhc1grp {
                fsl,pins = <
-                       MX93_PAD_SD1_CLK__USDHC1_CLK            0x17fe
+                       MX93_PAD_SD1_CLK__USDHC1_CLK            0x15fe
                        MX93_PAD_SD1_CMD__USDHC1_CMD            0x13fe
                        MX93_PAD_SD1_DATA0__USDHC1_DATA0        0x13fe
                        MX93_PAD_SD1_DATA1__USDHC1_DATA1        0x13fe
@@ -84,7 +84,7 @@
                        MX93_PAD_SD1_DATA5__USDHC1_DATA5        0x13fe
                        MX93_PAD_SD1_DATA6__USDHC1_DATA6        0x13fe
                        MX93_PAD_SD1_DATA7__USDHC1_DATA7        0x13fe
-                       MX93_PAD_SD1_STROBE__USDHC1_STROBE      0x17fe
+                       MX93_PAD_SD1_STROBE__USDHC1_STROBE      0x15fe
                >;
        };
 
 
        pinctrl_usdhc2: usdhc2grp {
                fsl,pins = <
-                       MX93_PAD_SD2_CLK__USDHC2_CLK            0x17fe
+                       MX93_PAD_SD2_CLK__USDHC2_CLK            0x15fe
                        MX93_PAD_SD2_CMD__USDHC2_CMD            0x13fe
                        MX93_PAD_SD2_DATA0__USDHC2_DATA0        0x13fe
                        MX93_PAD_SD2_DATA1__USDHC2_DATA1        0x13fe
index 7308f7b..8bce640 100644 (file)
@@ -98,7 +98,7 @@
 
                        uart1: serial@12100 {
                                compatible = "snps,dw-apb-uart";
-                               reg = <0x11000 0x100>;
+                               reg = <0x12100 0x100>;
                                reg-shift = <2>;
                                interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
                                reg-io-width = <1>;
index 87c90e9..79de9cc 100644 (file)
@@ -3,6 +3,7 @@
  * Copyright (c) 2015, LGE Inc. All rights reserved.
  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  * Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
+ * Copyright (c) 2022, Dominik Kobinski <dominikkobinski314@gmail.com>
  */
 
 /dts-v1/;
                        reg = <0 0x03400000 0 0x1200000>;
                        no-map;
                };
+
+               removed_region: reserved@5000000 {
+                       reg = <0 0x05000000 0 0x2200000>;
+                       no-map;
+               };
        };
 };
 
index b242c27..fcca1ba 100644 (file)
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/input/gpio-keys.h>
 
+/delete-node/ &adsp_mem;
+/delete-node/ &audio_mem;
+/delete-node/ &mpss_mem;
+/delete-node/ &peripheral_region;
+/delete-node/ &rmtfs_mem;
+
 / {
        model = "Xiaomi Mi 4C";
        compatible = "xiaomi,libra", "qcom,msm8992";
                #size-cells = <2>;
                ranges;
 
-               /* This is for getting crash logs using Android downstream kernels */
-               ramoops@dfc00000 {
-                       compatible = "ramoops";
-                       reg = <0x0 0xdfc00000 0x0 0x40000>;
-                       console-size = <0x10000>;
-                       record-size = <0x10000>;
-                       ftrace-size = <0x10000>;
-                       pmsg-size = <0x20000>;
+               memory_hole: hole@6400000 {
+                       reg = <0 0x06400000 0 0x600000>;
+                       no-map;
+               };
+
+               memory_hole2: hole2@6c00000 {
+                       reg = <0 0x06c00000 0 0x2400000>;
+                       no-map;
+               };
+
+               mpss_mem: mpss@9000000 {
+                       reg = <0 0x09000000 0 0x5a00000>;
+                       no-map;
+               };
+
+               tzapp: tzapp@ea00000 {
+                       reg = <0 0x0ea00000 0 0x1900000>;
+                       no-map;
+               };
+
+               mdm_rfsa_mem: mdm-rfsa@ca0b0000 {
+                       reg = <0 0xca0b0000 0 0x10000>;
+                       no-map;
+               };
+
+               rmtfs_mem: rmtfs@ca100000 {
+                       compatible = "qcom,rmtfs-mem";
+                       reg = <0 0xca100000 0 0x180000>;
+                       no-map;
+
+                       qcom,client-id = <1>;
                };
 
-               modem_region: modem_region@9000000 {
-                       reg = <0x0 0x9000000 0x0 0x5a00000>;
+               audio_mem: audio@cb400000 {
+                       reg = <0 0xcb000000 0 0x400000>;
+                       no-mem;
+               };
+
+               qseecom_mem: qseecom@cb400000 {
+                       reg = <0 0xcb400000 0 0x1c00000>;
+                       no-mem;
+               };
+
+               adsp_rfsa_mem: adsp-rfsa@cd000000 {
+                       reg = <0 0xcd000000 0 0x10000>;
                        no-map;
                };
 
-               tzapp: modem_region@ea00000 {
-                       reg = <0x0 0xea00000 0x0 0x1900000>;
+               sensor_rfsa_mem: sensor-rfsa@cd010000 {
+                       reg = <0 0xcd010000 0 0x10000>;
                        no-map;
                };
+
+               ramoops@dfc00000 {
+                       compatible = "ramoops";
+                       reg = <0 0xdfc00000 0 0x40000>;
+                       console-size = <0x10000>;
+                       record-size = <0x10000>;
+                       ftrace-size = <0x10000>;
+                       pmsg-size = <0x20000>;
+               };
        };
 };
 
        status = "okay";
 };
 
-&peripheral_region {
-       reg = <0x0 0x7400000 0x0 0x1c00000>;
-       no-map;
-};
-
 &pm8994_spmi_regulators {
        VDD_APC0: s8 {
                regulator-min-microvolt = <680000>;
index 10adb49..02fc379 100644 (file)
        compatible = "qcom,rpmcc-msm8992", "qcom,rpmcc";
 };
 
-&tcsr_mutex {
-       compatible = "qcom,sfpb-mutex";
-};
-
 &timer {
        interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
index 85abff0..7b0f621 100644 (file)
@@ -9,9 +9,6 @@
 
 #include "msm8994.dtsi"
 
-/* Angler's firmware does not report where the memory is allocated */
-/delete-node/ &cont_splash_mem;
-
 / {
        model = "Huawei Nexus 6P";
        compatible = "huawei,angler", "qcom,msm8994";
        chosen {
                stdout-path = "serial0:115200n8";
        };
+
+       reserved-memory {
+               #address-cells = <2>;
+               #size-cells = <2>;
+               ranges;
+
+               tzapp_mem: tzapp@4800000 {
+                       reg = <0 0x04800000 0 0x1900000>;
+                       no-map;
+               };
+
+               removed_region: reserved@6300000 {
+                       reg = <0 0x06300000 0 0xD00000>;
+                       no-map;
+               };
+       };
 };
 
 &blsp1_uart2 {
index 109c9d2..71cf81a 100644 (file)
@@ -10,6 +10,7 @@
 #include <dt-bindings/interconnect/qcom,sc8280xp.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/mailbox/qcom-ipcc.h>
+#include <dt-bindings/phy/phy-qcom-qmp.h>
 #include <dt-bindings/power/qcom-rpmpd.h>
 #include <dt-bindings/soc/qcom,rpmh-rsc.h>
 #include <dt-bindings/thermal/thermal.h>
                                 <0>,
                                 <0>,
                                 <0>,
-                                <&usb_0_ssphy>,
+                                <&usb_0_qmpphy QMP_USB43DP_USB3_PIPE_CLK>,
                                 <0>,
                                 <0>,
                                 <0>,
                                 <0>,
                                 <0>,
                                 <0>,
-                                <&usb_1_ssphy>,
+                                <&usb_1_qmpphy QMP_USB43DP_USB3_PIPE_CLK>,
                                 <0>,
                                 <0>,
                                 <0>,
                        };
                };
 
-               usb_0_qmpphy: phy-wrapper@88ec000 {
+               usb_0_qmpphy: phy@88eb000 {
                        compatible = "qcom,sc8280xp-qmp-usb43dp-phy";
-                       reg = <0 0x088ec000 0 0x1e4>,
-                             <0 0x088eb000 0 0x40>,
-                             <0 0x088ed000 0 0x1c8>;
-                       #address-cells = <2>;
-                       #size-cells = <2>;
-                       ranges;
+                       reg = <0 0x088eb000 0 0x4000>;
 
                        clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
-                                <&rpmhcc RPMH_CXO_CLK>,
                                 <&gcc GCC_USB4_EUD_CLKREF_CLK>,
-                                <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
-                       clock-names = "aux", "ref_clk_src", "ref", "com_aux";
+                                <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
+                                <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+                       clock-names = "aux", "ref", "com_aux", "usb3_pipe";
+
+                       power-domains = <&gcc USB30_PRIM_GDSC>;
 
                        resets = <&gcc GCC_USB3_PHY_PRIM_BCR>,
-                                <&gcc GCC_USB3_DP_PHY_PRIM_BCR>;
+                                <&gcc GCC_USB4_DP_PHY_PRIM_BCR>;
                        reset-names = "phy", "common";
 
-                       power-domains = <&gcc USB30_PRIM_GDSC>;
+                       #clock-cells = <1>;
+                       #phy-cells = <1>;
 
                        status = "disabled";
-
-                       usb_0_ssphy: usb3-phy@88eb400 {
-                               reg = <0 0x088eb400 0 0x100>,
-                                     <0 0x088eb600 0 0x3ec>,
-                                     <0 0x088ec400 0 0x364>,
-                                     <0 0x088eba00 0 0x100>,
-                                     <0 0x088ebc00 0 0x3ec>,
-                                     <0 0x088ec200 0 0x18>;
-                               #phy-cells = <0>;
-                               #clock-cells = <0>;
-                               clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
-                               clock-names = "pipe0";
-                               clock-output-names = "usb0_phy_pipe_clk_src";
-                       };
                };
 
                usb_1_hsphy: phy@8902000 {
                        status = "disabled";
                };
 
-               usb_1_qmpphy: phy-wrapper@8904000 {
+               usb_1_qmpphy: phy@8903000 {
                        compatible = "qcom,sc8280xp-qmp-usb43dp-phy";
-                       reg = <0 0x08904000 0 0x1e4>,
-                             <0 0x08903000 0 0x40>,
-                             <0 0x08905000 0 0x1c8>;
-                       #address-cells = <2>;
-                       #size-cells = <2>;
-                       ranges;
+                       reg = <0 0x08903000 0 0x4000>;
 
                        clocks = <&gcc GCC_USB3_SEC_PHY_AUX_CLK>,
-                                <&rpmhcc RPMH_CXO_CLK>,
                                 <&gcc GCC_USB4_CLKREF_CLK>,
-                                <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>;
-                       clock-names = "aux", "ref_clk_src", "ref", "com_aux";
+                                <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>,
+                                <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
+                       clock-names = "aux", "ref", "com_aux", "usb3_pipe";
+
+                       power-domains = <&gcc USB30_SEC_GDSC>;
 
                        resets = <&gcc GCC_USB3_PHY_SEC_BCR>,
                                 <&gcc GCC_USB4_1_DP_PHY_PRIM_BCR>;
                        reset-names = "phy", "common";
 
-                       power-domains = <&gcc USB30_SEC_GDSC>;
+                       #clock-cells = <1>;
+                       #phy-cells = <1>;
 
                        status = "disabled";
-
-                       usb_1_ssphy: usb3-phy@8903400 {
-                               reg = <0 0x08903400 0 0x100>,
-                                     <0 0x08903600 0 0x3ec>,
-                                     <0 0x08904400 0 0x364>,
-                                     <0 0x08903a00 0 0x100>,
-                                     <0 0x08903c00 0 0x3ec>,
-                                     <0 0x08904200 0 0x18>;
-                               #phy-cells = <0>;
-                               #clock-cells = <0>;
-                               clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
-                               clock-names = "pipe0";
-                               clock-output-names = "usb1_phy_pipe_clk_src";
-                       };
                };
 
                pmu@9091000 {
                                reg = <0 0x0a600000 0 0xcd00>;
                                interrupts = <GIC_SPI 803 IRQ_TYPE_LEVEL_HIGH>;
                                iommus = <&apps_smmu 0x820 0x0>;
-                               phys = <&usb_0_hsphy>, <&usb_0_ssphy>;
+                               phys = <&usb_0_hsphy>, <&usb_0_qmpphy QMP_USB43DP_USB3_PHY>;
                                phy-names = "usb2-phy", "usb3-phy";
                        };
                };
                                reg = <0 0x0a800000 0 0xcd00>;
                                interrupts = <GIC_SPI 810 IRQ_TYPE_LEVEL_HIGH>;
                                iommus = <&apps_smmu 0x860 0x0>;
-                               phys = <&usb_1_hsphy>, <&usb_1_ssphy>;
+                               phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
                                phy-names = "usb2-phy", "usb3-phy";
                        };
                };
index dab5579..9270328 100644 (file)
                                exit-latency-us = <6562>;
                                min-residency-us = <9987>;
                                local-timer-stop;
-                               status = "disabled";
                        };
                };
        };
index 245dce2..fb3cd20 100644 (file)
                                 <&rpmhcc RPMH_CXO_CLK>;
                        clock-names = "iface", "core", "xo";
                        resets = <&gcc GCC_SDCC2_BCR>;
-                       interconnects = <&aggre2_noc MASTER_SDCC_2 0 &mc_virt SLAVE_EBI1 0>,
-                                       <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_SDCC_2 0>;
+                       interconnects = <&aggre2_noc MASTER_SDCC_2 &mc_virt SLAVE_EBI1>,
+                                       <&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_SDCC_2>;
                        interconnect-names = "sdhc-ddr","cpu-sdhc";
                        iommus = <&apps_smmu 0x4a0 0x0>;
                        power-domains = <&rpmhpd SM8350_CX>;
index 0890e4f..cbb3d96 100644 (file)
@@ -315,7 +315,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1,                   \
        "       cbnz    %w0, 1b\n"                                      \
        "       " #mb "\n"                                              \
        "2:"                                                            \
-       : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr)        \
+       : "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr)          \
        : "r" (old1), "r" (old2), "r" (new1), "r" (new2)                \
        : cl);                                                          \
                                                                        \
index 52075e9..a94d6da 100644 (file)
@@ -311,7 +311,7 @@ __lse__cmpxchg_double##name(unsigned long old1,                             \
        "       eor     %[old2], %[old2], %[oldval2]\n"                 \
        "       orr     %[old1], %[old1], %[old2]"                      \
        : [old1] "+&r" (x0), [old2] "+&r" (x1),                         \
-         [v] "+Q" (*(unsigned long *)ptr)                              \
+         [v] "+Q" (*(__uint128_t *)ptr)                                \
        : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),             \
          [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)              \
        : cl);                                                          \
index 4e8b66c..683ca3a 100644 (file)
 #define APPLE_CPU_PART_M1_FIRESTORM_PRO        0x025
 #define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028
 #define APPLE_CPU_PART_M1_FIRESTORM_MAX        0x029
+#define APPLE_CPU_PART_M2_BLIZZARD     0x032
+#define APPLE_CPU_PART_M2_AVALANCHE    0x033
 
 #define AMPERE_CPU_PART_AMPERE1                0xAC3
 
 #define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO)
 #define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX)
 #define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
+#define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD)
+#define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE)
 #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
 
 /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
index 31d13a6..de4ff90 100644 (file)
@@ -48,8 +48,17 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
 })
 
 extern spinlock_t efi_rt_lock;
+extern u64 *efi_rt_stack_top;
 efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
 
+/*
+ * efi_rt_stack_top[-1] contains the value the stack pointer had before
+ * switching to the EFI runtime stack.
+ */
+#define current_in_efi()                                               \
+       (!preemptible() && efi_rt_stack_top != NULL &&                  \
+        on_task_stack(current, READ_ONCE(efi_rt_stack_top[-1]), 1))
+
 #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
 
 /*
index 15b34fb..206de10 100644 (file)
 #define ESR_ELx_FSC_ACCESS     (0x08)
 #define ESR_ELx_FSC_FAULT      (0x04)
 #define ESR_ELx_FSC_PERM       (0x0C)
+#define ESR_ELx_FSC_SEA_TTW0   (0x14)
+#define ESR_ELx_FSC_SEA_TTW1   (0x15)
+#define ESR_ELx_FSC_SEA_TTW2   (0x16)
+#define ESR_ELx_FSC_SEA_TTW3   (0x17)
+#define ESR_ELx_FSC_SECC       (0x18)
+#define ESR_ELx_FSC_SECC_TTW0  (0x1c)
+#define ESR_ELx_FSC_SECC_TTW1  (0x1d)
+#define ESR_ELx_FSC_SECC_TTW2  (0x1e)
+#define ESR_ELx_FSC_SECC_TTW3  (0x1f)
 
 /* ISS field definitions for Data Aborts */
 #define ESR_ELx_ISV_SHIFT      (24)
index d20f5da..6a4a1ab 100644 (file)
@@ -49,6 +49,15 @@ extern pte_t huge_ptep_get(pte_t *ptep);
 
 void __init arm64_hugetlb_cma_reserve(void);
 
+#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
+extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep);
+
+#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
+extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep,
+                                        pte_t old_pte, pte_t new_pte);
+
 #include <asm-generic/hugetlb.h>
 
 #endif /* __ASM_HUGETLB_H */
index 0df3fc3..26b0c97 100644 (file)
                                 BIT(18) |              \
                                 GENMASK(16, 15))
 
-/* For compatibility with fault code shared with 32-bit */
-#define FSC_FAULT      ESR_ELx_FSC_FAULT
-#define FSC_ACCESS     ESR_ELx_FSC_ACCESS
-#define FSC_PERM       ESR_ELx_FSC_PERM
-#define FSC_SEA                ESR_ELx_FSC_EXTABT
-#define FSC_SEA_TTW0   (0x14)
-#define FSC_SEA_TTW1   (0x15)
-#define FSC_SEA_TTW2   (0x16)
-#define FSC_SEA_TTW3   (0x17)
-#define FSC_SECC       (0x18)
-#define FSC_SECC_TTW0  (0x1c)
-#define FSC_SECC_TTW1  (0x1d)
-#define FSC_SECC_TTW2  (0x1e)
-#define FSC_SECC_TTW3  (0x1f)
-
 /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
 #define HPFAR_MASK     (~UL(0xf))
 /*
index 9bdba47..193583d 100644 (file)
@@ -349,16 +349,16 @@ static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *v
 static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
 {
        switch (kvm_vcpu_trap_get_fault(vcpu)) {
-       case FSC_SEA:
-       case FSC_SEA_TTW0:
-       case FSC_SEA_TTW1:
-       case FSC_SEA_TTW2:
-       case FSC_SEA_TTW3:
-       case FSC_SECC:
-       case FSC_SECC_TTW0:
-       case FSC_SECC_TTW1:
-       case FSC_SECC_TTW2:
-       case FSC_SECC_TTW3:
+       case ESR_ELx_FSC_EXTABT:
+       case ESR_ELx_FSC_SEA_TTW0:
+       case ESR_ELx_FSC_SEA_TTW1:
+       case ESR_ELx_FSC_SEA_TTW2:
+       case ESR_ELx_FSC_SEA_TTW3:
+       case ESR_ELx_FSC_SECC:
+       case ESR_ELx_FSC_SECC_TTW0:
+       case ESR_ELx_FSC_SECC_TTW1:
+       case ESR_ELx_FSC_SECC_TTW2:
+       case ESR_ELx_FSC_SECC_TTW3:
                return true;
        default:
                return false;
@@ -373,8 +373,26 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
 
 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
 {
-       if (kvm_vcpu_abt_iss1tw(vcpu))
-               return true;
+       if (kvm_vcpu_abt_iss1tw(vcpu)) {
+               /*
+                * Only a permission fault on a S1PTW should be
+                * considered as a write. Otherwise, page tables baked
+                * in a read-only memslot will result in an exception
+                * being delivered in the guest.
+                *
+                * The drawback is that we end-up faulting twice if the
+                * guest is using any of HW AF/DB: a translation fault
+                * to map the page containing the PT (read only at
+                * first), then a permission fault to allow the flags
+                * to be set.
+                */
+               switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
+               case ESR_ELx_FSC_PERM:
+                       return true;
+               default:
+                       return false;
+               }
+       }
 
        if (kvm_vcpu_trap_is_iabt(vcpu))
                return false;
index b4bbeed..65e7899 100644 (file)
@@ -681,7 +681,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 #define pud_leaf(pud)          (pud_present(pud) && !pud_table(pud))
 #define pud_valid(pud)         pte_valid(pud_pte(pud))
 #define pud_user(pud)          pte_user(pud_pte(pud))
-
+#define pud_user_exec(pud)     pte_user_exec(pud_pte(pud))
 
 static inline void set_pud(pud_t *pudp, pud_t pud)
 {
@@ -730,6 +730,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
 #else
 
 #define pud_page_paddr(pud)    ({ BUILD_BUG(); 0; })
+#define pud_user_exec(pud)     pud_user(pud) /* Always 0 with folding */
 
 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
 #define pmd_set_fixmap(addr)           NULL
@@ -862,12 +863,12 @@ static inline bool pte_user_accessible_page(pte_t pte)
 
 static inline bool pmd_user_accessible_page(pmd_t pmd)
 {
-       return pmd_leaf(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
+       return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
 }
 
 static inline bool pud_user_accessible_page(pud_t pud)
 {
-       return pud_leaf(pud) && pud_user(pud);
+       return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud));
 }
 #endif
 
@@ -1093,6 +1094,15 @@ static inline bool pud_sect_supported(void)
 }
 
 
+#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
+#define ptep_modify_prot_start ptep_modify_prot_start
+extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
+                                   unsigned long addr, pte_t *ptep);
+
+#define ptep_modify_prot_commit ptep_modify_prot_commit
+extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
+                                   unsigned long addr, pte_t *ptep,
+                                   pte_t old_pte, pte_t new_pte);
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_PGTABLE_H */
index 4e5354b..66ec8ca 100644 (file)
@@ -106,4 +106,19 @@ static inline struct stack_info stackinfo_get_sdei_critical(void)
 #define stackinfo_get_sdei_critical()  stackinfo_get_unknown()
 #endif
 
+#ifdef CONFIG_EFI
+extern u64 *efi_rt_stack_top;
+
+static inline struct stack_info stackinfo_get_efi(void)
+{
+       unsigned long high = (u64)efi_rt_stack_top;
+       unsigned long low = high - THREAD_SIZE;
+
+       return (struct stack_info) {
+               .low = low,
+               .high = high,
+       };
+}
+#endif
+
 #endif /* __ASM_STACKTRACE_H */
index ba4bff5..2b09495 100644 (file)
@@ -16,7 +16,7 @@
 #define UPROBE_SWBP_INSN_SIZE  AARCH64_INSN_SIZE
 #define UPROBE_XOL_SLOT_BYTES  MAX_UINSN_BYTES
 
-typedef u32 uprobe_opcode_t;
+typedef __le32 uprobe_opcode_t;
 
 struct arch_uprobe_task {
 };
index 89ac000..307faa2 100644 (file)
@@ -661,6 +661,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
        },
 #endif
+#ifdef CONFIG_ARM64_ERRATUM_2645198
+       {
+               .desc = "ARM erratum 2645198",
+               .capability = ARM64_WORKAROUND_2645198,
+               ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715)
+       },
+#endif
 #ifdef CONFIG_ARM64_ERRATUM_2077057
        {
                .desc = "ARM erratum 2077057",
index a008864..e8ae803 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/assembler.h>
 
 SYM_FUNC_START(__efi_rt_asm_wrapper)
        stp     x29, x30, [sp, #-112]!
@@ -45,7 +46,10 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
        mov     x4, x6
        blr     x8
 
+       mov     x16, sp
        mov     sp, x29
+       str     xzr, [x16, #8]                  // clear recorded task SP value
+
        ldp     x1, x2, [sp, #16]
        cmp     x2, x18
        ldp     x29, x30, [sp], #112
@@ -70,6 +74,9 @@ SYM_FUNC_END(__efi_rt_asm_wrapper)
 SYM_CODE_START(__efi_rt_asm_recover)
        mov     sp, x30
 
+       ldr_l   x16, efi_rt_stack_top           // clear recorded task SP value
+       str     xzr, [x16, #-8]
+
        ldp     x19, x20, [sp, #32]
        ldp     x21, x22, [sp, #48]
        ldp     x23, x24, [sp, #64]
index fab05de..b273900 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/init.h>
 
 #include <asm/efi.h>
+#include <asm/stacktrace.h>
 
 static bool region_is_misaligned(const efi_memory_desc_t *md)
 {
@@ -154,7 +155,7 @@ asmlinkage efi_status_t __efi_rt_asm_recover(void);
 bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
 {
         /* Check whether the exception occurred while running the firmware */
-       if (current_work() != &efi_rts_work.work || regs->pc >= TASK_SIZE_64)
+       if (!current_in_efi() || regs->pc >= TASK_SIZE_64)
                return false;
 
        pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg);
index 353009d..2e94d20 100644 (file)
@@ -8,28 +8,27 @@
 #include <asm/cpufeature.h>
 #include <asm/mte.h>
 
-#define for_each_mte_vma(vmi, vma)                                     \
+#define for_each_mte_vma(cprm, i, m)                                   \
        if (system_supports_mte())                                      \
-               for_each_vma(vmi, vma)                                  \
-                       if (vma->vm_flags & VM_MTE)
+               for (i = 0, m = cprm->vma_meta;                         \
+                    i < cprm->vma_count;                               \
+                    i++, m = cprm->vma_meta + i)                       \
+                       if (m->flags & VM_MTE)
 
-static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
+static unsigned long mte_vma_tag_dump_size(struct core_vma_metadata *m)
 {
-       if (vma->vm_flags & VM_DONTDUMP)
-               return 0;
-
-       return vma_pages(vma) * MTE_PAGE_TAG_STORAGE;
+       return (m->dump_size >> PAGE_SHIFT) * MTE_PAGE_TAG_STORAGE;
 }
 
 /* Derived from dump_user_range(); start/end must be page-aligned */
 static int mte_dump_tag_range(struct coredump_params *cprm,
-                             unsigned long start, unsigned long end)
+                             unsigned long start, unsigned long len)
 {
        int ret = 1;
        unsigned long addr;
        void *tags = NULL;
 
-       for (addr = start; addr < end; addr += PAGE_SIZE) {
+       for (addr = start; addr < start + len; addr += PAGE_SIZE) {
                struct page *page = get_dump_page(addr);
 
                /*
@@ -65,7 +64,6 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
                mte_save_page_tags(page_address(page), tags);
                put_page(page);
                if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) {
-                       mte_free_tag_storage(tags);
                        ret = 0;
                        break;
                }
@@ -77,13 +75,13 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
        return ret;
 }
 
-Elf_Half elf_core_extra_phdrs(void)
+Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
 {
-       struct vm_area_struct *vma;
+       int i;
+       struct core_vma_metadata *m;
        int vma_count = 0;
-       VMA_ITERATOR(vmi, current->mm, 0);
 
-       for_each_mte_vma(vmi, vma)
+       for_each_mte_vma(cprm, i, m)
                vma_count++;
 
        return vma_count;
@@ -91,18 +89,18 @@ Elf_Half elf_core_extra_phdrs(void)
 
 int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
 {
-       struct vm_area_struct *vma;
-       VMA_ITERATOR(vmi, current->mm, 0);
+       int i;
+       struct core_vma_metadata *m;
 
-       for_each_mte_vma(vmi, vma) {
+       for_each_mte_vma(cprm, i, m) {
                struct elf_phdr phdr;
 
                phdr.p_type = PT_AARCH64_MEMTAG_MTE;
                phdr.p_offset = offset;
-               phdr.p_vaddr = vma->vm_start;
+               phdr.p_vaddr = m->start;
                phdr.p_paddr = 0;
-               phdr.p_filesz = mte_vma_tag_dump_size(vma);
-               phdr.p_memsz = vma->vm_end - vma->vm_start;
+               phdr.p_filesz = mte_vma_tag_dump_size(m);
+               phdr.p_memsz = m->end - m->start;
                offset += phdr.p_filesz;
                phdr.p_flags = 0;
                phdr.p_align = 0;
@@ -114,28 +112,25 @@ int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
        return 1;
 }
 
-size_t elf_core_extra_data_size(void)
+size_t elf_core_extra_data_size(struct coredump_params *cprm)
 {
-       struct vm_area_struct *vma;
+       int i;
+       struct core_vma_metadata *m;
        size_t data_size = 0;
-       VMA_ITERATOR(vmi, current->mm, 0);
 
-       for_each_mte_vma(vmi, vma)
-               data_size += mte_vma_tag_dump_size(vma);
+       for_each_mte_vma(cprm, i, m)
+               data_size += mte_vma_tag_dump_size(m);
 
        return data_size;
 }
 
 int elf_core_write_extra_data(struct coredump_params *cprm)
 {
-       struct vm_area_struct *vma;
-       VMA_ITERATOR(vmi, current->mm, 0);
-
-       for_each_mte_vma(vmi, vma) {
-               if (vma->vm_flags & VM_DONTDUMP)
-                       continue;
+       int i;
+       struct core_vma_metadata *m;
 
-               if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end))
+       for_each_mte_vma(cprm, i, m) {
+               if (!mte_dump_tag_range(cprm, m->start, m->dump_size))
                        return 0;
        }
 
index dcc81e7..b6ef1af 100644 (file)
@@ -385,7 +385,7 @@ static void task_fpsimd_load(void)
        WARN_ON(!system_supports_fpsimd());
        WARN_ON(!have_cpu_fpsimd_context());
 
-       if (system_supports_sve()) {
+       if (system_supports_sve() || system_supports_sme()) {
                switch (current->thread.fp_type) {
                case FP_STATE_FPSIMD:
                        /* Stop tracking SVE for this task until next use. */
index 2686ab1..0c321ad 100644 (file)
@@ -1357,7 +1357,7 @@ enum aarch64_regset {
 #ifdef CONFIG_ARM64_SVE
        REGSET_SVE,
 #endif
-#ifdef CONFIG_ARM64_SVE
+#ifdef CONFIG_ARM64_SME
        REGSET_SSVE,
        REGSET_ZA,
 #endif
index e0d09bf..be279fd 100644 (file)
@@ -281,7 +281,12 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
 
                vl = task_get_sme_vl(current);
        } else {
-               if (!system_supports_sve())
+               /*
+                * A SME only system use SVE for streaming mode so can
+                * have a SVE formatted context with a zero VL and no
+                * payload data.
+                */
+               if (!system_supports_sve() && !system_supports_sme())
                        return -EINVAL;
 
                vl = task_get_sve_vl(current);
@@ -732,7 +737,7 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
                        return err;
        }
 
-       if (system_supports_sve()) {
+       if (system_supports_sve() || system_supports_sme()) {
                unsigned int vq = 0;
 
                if (add_all || test_thread_flag(TIF_SVE) ||
index 117e2c1..8315430 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (C) 2012 ARM Ltd.
  */
 #include <linux/kernel.h>
+#include <linux/efi.h>
 #include <linux/export.h>
 #include <linux/ftrace.h>
 #include <linux/sched.h>
@@ -12,6 +13,7 @@
 #include <linux/sched/task_stack.h>
 #include <linux/stacktrace.h>
 
+#include <asm/efi.h>
 #include <asm/irq.h>
 #include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
@@ -186,6 +188,13 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
                        : stackinfo_get_unknown();              \
        })
 
+#define STACKINFO_EFI                                          \
+       ({                                                      \
+               ((task == current) && current_in_efi())         \
+                       ? stackinfo_get_efi()                   \
+                       : stackinfo_get_unknown();              \
+       })
+
 noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
                              void *cookie, struct task_struct *task,
                              struct pt_regs *regs)
@@ -200,6 +209,9 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
                STACKINFO_SDEI(normal),
                STACKINFO_SDEI(critical),
 #endif
+#ifdef CONFIG_EFI
+               STACKINFO_EFI,
+#endif
        };
        struct unwind_state state = {
                .stacks = stacks,
index 5626ddb..cf4c495 100644 (file)
@@ -1079,7 +1079,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
 
                        /* uaccess failed, don't leave stale tags */
                        if (num_tags != MTE_GRANULES_PER_PAGE)
-                               mte_clear_page_tags(page);
+                               mte_clear_page_tags(maddr);
                        set_page_mte_tagged(page);
 
                        kvm_release_pfn_dirty(pfn);
index 1b8a2dc..9ddcfe2 100644 (file)
@@ -60,7 +60,7 @@ static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
         */
        if (!(esr & ESR_ELx_S1PTW) &&
            (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
-            (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
+            (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM)) {
                if (!__translate_far_to_hpfar(far, &hpfar))
                        return false;
        } else {
index 3330d1b..07d37ff 100644 (file)
@@ -367,7 +367,7 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
        if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
                bool valid;
 
-               valid = kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
+               valid = kvm_vcpu_trap_get_fault_type(vcpu) == ESR_ELx_FSC_FAULT &&
                        kvm_vcpu_dabt_isvalid(vcpu) &&
                        !kvm_vcpu_abt_issea(vcpu) &&
                        !kvm_vcpu_abt_iss1tw(vcpu);
index 31d7fa4..a3ee3b6 100644 (file)
@@ -1212,7 +1212,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
        VM_BUG_ON(write_fault && exec_fault);
 
-       if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
+       if (fault_status == ESR_ELx_FSC_PERM && !write_fault && !exec_fault) {
                kvm_err("Unexpected L2 read permission error\n");
                return -EFAULT;
        }
@@ -1277,7 +1277,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * only exception to this is when dirty logging is enabled at runtime
         * and a write fault needs to collapse a block entry into a table.
         */
-       if (fault_status != FSC_PERM || (logging_active && write_fault)) {
+       if (fault_status != ESR_ELx_FSC_PERM ||
+           (logging_active && write_fault)) {
                ret = kvm_mmu_topup_memory_cache(memcache,
                                                 kvm_mmu_cache_min_pages(kvm));
                if (ret)
@@ -1342,7 +1343,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * backed by a THP and thus use block mapping if possible.
         */
        if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
-               if (fault_status == FSC_PERM && fault_granule > PAGE_SIZE)
+               if (fault_status ==  ESR_ELx_FSC_PERM &&
+                   fault_granule > PAGE_SIZE)
                        vma_pagesize = fault_granule;
                else
                        vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
@@ -1350,7 +1352,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                                                                   &fault_ipa);
        }
 
-       if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
+       if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) {
                /* Check the VMM hasn't introduced a new disallowed VMA */
                if (kvm_vma_mte_allowed(vma)) {
                        sanitise_mte_tags(kvm, pfn, vma_pagesize);
@@ -1376,7 +1378,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * permissions only if vma_pagesize equals fault_granule. Otherwise,
         * kvm_pgtable_stage2_map() should be called to change block size.
         */
-       if (fault_status == FSC_PERM && vma_pagesize == fault_granule)
+       if (fault_status == ESR_ELx_FSC_PERM && vma_pagesize == fault_granule)
                ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
        else
                ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
@@ -1441,7 +1443,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
        fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
        is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
 
-       if (fault_status == FSC_FAULT) {
+       if (fault_status == ESR_ELx_FSC_FAULT) {
                /* Beyond sanitised PARange (which is the IPA limit) */
                if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
                        kvm_inject_size_fault(vcpu);
@@ -1476,8 +1478,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
                              kvm_vcpu_get_hfar(vcpu), fault_ipa);
 
        /* Check the stage-2 fault is trans. fault or write fault */
-       if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
-           fault_status != FSC_ACCESS) {
+       if (fault_status != ESR_ELx_FSC_FAULT &&
+           fault_status != ESR_ELx_FSC_PERM &&
+           fault_status != ESR_ELx_FSC_ACCESS) {
                kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
                        kvm_vcpu_trap_get_class(vcpu),
                        (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
@@ -1539,7 +1542,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
        /* Userspace should not be able to register out-of-bounds IPAs */
        VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
 
-       if (fault_status == FSC_ACCESS) {
+       if (fault_status == ESR_ELx_FSC_ACCESS) {
                handle_access_fault(vcpu, fault_ipa);
                ret = 1;
                goto out_unlock;
index d5ee52d..c6cbfe6 100644 (file)
@@ -646,7 +646,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
                return;
 
        /* Only preserve PMCR_EL0.N, and reset the rest to 0 */
-       pmcr = read_sysreg(pmcr_el0) & ARMV8_PMU_PMCR_N_MASK;
+       pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
        if (!kvm_supports_32bit_el0())
                pmcr |= ARMV8_PMU_PMCR_LC;
 
index 826ff6f..2624963 100644 (file)
@@ -350,26 +350,23 @@ retry:
  * The deactivation of the doorbell interrupt will trigger the
  * unmapping of the associated vPE.
  */
-static void unmap_all_vpes(struct vgic_dist *dist)
+static void unmap_all_vpes(struct kvm *kvm)
 {
-       struct irq_desc *desc;
+       struct vgic_dist *dist = &kvm->arch.vgic;
        int i;
 
-       for (i = 0; i < dist->its_vm.nr_vpes; i++) {
-               desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
-               irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
-       }
+       for (i = 0; i < dist->its_vm.nr_vpes; i++)
+               free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i));
 }
 
-static void map_all_vpes(struct vgic_dist *dist)
+static void map_all_vpes(struct kvm *kvm)
 {
-       struct irq_desc *desc;
+       struct vgic_dist *dist = &kvm->arch.vgic;
        int i;
 
-       for (i = 0; i < dist->its_vm.nr_vpes; i++) {
-               desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
-               irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
-       }
+       for (i = 0; i < dist->its_vm.nr_vpes; i++)
+               WARN_ON(vgic_v4_request_vpe_irq(kvm_get_vcpu(kvm, i),
+                                               dist->its_vm.vpes[i]->irq));
 }
 
 /**
@@ -394,7 +391,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
         * and enabling of the doorbells have already been done.
         */
        if (kvm_vgic_global_state.has_gicv4_1) {
-               unmap_all_vpes(dist);
+               unmap_all_vpes(kvm);
                vlpi_avail = true;
        }
 
@@ -444,7 +441,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
 
 out:
        if (vlpi_avail)
-               map_all_vpes(dist);
+               map_all_vpes(kvm);
 
        return ret;
 }
@@ -616,6 +613,8 @@ static const struct midr_range broken_seis[] = {
        MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
        MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
        MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
+       MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
+       MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
        {},
 };
 
index ad06ba6..a413718 100644 (file)
@@ -222,6 +222,11 @@ void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
        *val = !!(*ptr & mask);
 }
 
+int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
+{
+       return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu);
+}
+
 /**
  * vgic_v4_init - Initialize the GICv4 data structures
  * @kvm:       Pointer to the VM being initialized
@@ -283,8 +288,7 @@ int vgic_v4_init(struct kvm *kvm)
                        irq_flags &= ~IRQ_NOAUTOEN;
                irq_set_status_flags(irq, irq_flags);
 
-               ret = request_irq(irq, vgic_v4_doorbell_handler,
-                                 0, "vcpu", vcpu);
+               ret = vgic_v4_request_vpe_irq(vcpu, irq);
                if (ret) {
                        kvm_err("failed to allocate vcpu IRQ%d\n", irq);
                        /*
index 0c8da72..23e280f 100644 (file)
@@ -331,5 +331,6 @@ int vgic_v4_init(struct kvm *kvm);
 void vgic_v4_teardown(struct kvm *kvm);
 void vgic_v4_configure_vsgis(struct kvm *kvm);
 void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
+int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
 
 #endif
index 35e9a46..95364e8 100644 (file)
@@ -559,3 +559,24 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
 {
        return __hugetlb_valid_size(size);
 }
+
+pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+       if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
+           cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
+               /*
+                * Break-before-make (BBM) is required for all user space mappings
+                * when the permission changes from executable to non-executable
+                * in cases where cpu is affected with errata #2645198.
+                */
+               if (pte_user_exec(READ_ONCE(*ptep)))
+                       return huge_ptep_clear_flush(vma, addr, ptep);
+       }
+       return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+}
+
+void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
+                                 pte_t old_pte, pte_t pte)
+{
+       set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+}
index 14c87e8..d77c9f5 100644 (file)
@@ -1630,3 +1630,24 @@ static int __init prevent_bootmem_remove_init(void)
 }
 early_initcall(prevent_bootmem_remove_init);
 #endif
+
+pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+       if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
+           cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
+               /*
+                * Break-before-make (BBM) is required for all user space mappings
+                * when the permission changes from executable to non-executable
+                * in cases where cpu is affected with errata #2645198.
+                */
+               if (pte_user_exec(READ_ONCE(*ptep)))
+                       return ptep_clear_flush(vma, addr, ptep);
+       }
+       return ptep_get_and_clear(vma->vm_mm, addr, ptep);
+}
+
+void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
+                            pte_t old_pte, pte_t pte)
+{
+       set_pte_at(vma->vm_mm, addr, ptep, pte);
+}
index a86ee37..dfeb2c5 100644 (file)
@@ -71,6 +71,7 @@ WORKAROUND_2038923
 WORKAROUND_2064142
 WORKAROUND_2077057
 WORKAROUND_2457168
+WORKAROUND_2645198
 WORKAROUND_2658417
 WORKAROUND_TRBE_OVERWRITE_FILL_MODE
 WORKAROUND_TSB_FLUSH_FAILURE
index 9468052..8895df1 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/elf.h>
 
 
-Elf64_Half elf_core_extra_phdrs(void)
+Elf64_Half elf_core_extra_phdrs(struct coredump_params *cprm)
 {
        return GATE_EHDR->e_phnum;
 }
@@ -60,7 +60,7 @@ int elf_core_write_extra_data(struct coredump_params *cprm)
        return 1;
 }
 
-size_t elf_core_extra_data_size(void)
+size_t elf_core_extra_data_size(struct coredump_params *cprm)
 {
        const struct elf_phdr *const gate_phdrs =
                (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
index 90f9d33..3418d32 100644 (file)
@@ -10,8 +10,6 @@
 #define FTRACE_REGS_PLT_IDX    1
 #define NR_FTRACE_PLTS         2
 
-#define GRAPH_FAKE_OFFSET (sizeof(struct pt_regs) - offsetof(struct pt_regs, regs[1]))
-
 #ifdef CONFIG_FUNCTION_TRACER
 
 #define MCOUNT_INSN_SIZE 4             /* sizeof mcount call */
index c00e151..7eedd83 100644 (file)
@@ -377,14 +377,6 @@ static inline bool unsigned_imm_check(unsigned long val, unsigned int bit)
        return val < (1UL << bit);
 }
 
-static inline unsigned long sign_extend(unsigned long val, unsigned int idx)
-{
-       if (!is_imm_negative(val, idx + 1))
-               return ((1UL << idx) - 1) & val;
-       else
-               return ~((1UL << idx) - 1) | val;
-}
-
 #define DEF_EMIT_REG0I26_FORMAT(NAME, OP)                              \
 static inline void emit_##NAME(union loongarch_instruction *insn,      \
                               int offset)                              \
@@ -401,6 +393,7 @@ static inline void emit_##NAME(union loongarch_instruction *insn,   \
 }
 
 DEF_EMIT_REG0I26_FORMAT(b, b_op)
+DEF_EMIT_REG0I26_FORMAT(bl, bl_op)
 
 #define DEF_EMIT_REG1I20_FORMAT(NAME, OP)                              \
 static inline void emit_##NAME(union loongarch_instruction *insn,      \
index f2b52b9..b9dce87 100644 (file)
@@ -8,7 +8,9 @@
 #define _ASM_UNWIND_H
 
 #include <linux/sched.h>
+#include <linux/ftrace.h>
 
+#include <asm/ptrace.h>
 #include <asm/stacktrace.h>
 
 enum unwinder_type {
@@ -20,11 +22,13 @@ struct unwind_state {
        char type; /* UNWINDER_XXX */
        struct stack_info stack_info;
        struct task_struct *task;
-       bool first, error, is_ftrace;
+       bool first, error, reset;
        int graph_idx;
        unsigned long sp, pc, ra;
 };
 
+bool default_next_frame(struct unwind_state *state);
+
 void unwind_start(struct unwind_state *state,
                  struct task_struct *task, struct pt_regs *regs);
 bool unwind_next_frame(struct unwind_state *state);
@@ -40,4 +44,39 @@ static inline bool unwind_error(struct unwind_state *state)
        return state->error;
 }
 
+#define GRAPH_FAKE_OFFSET (sizeof(struct pt_regs) - offsetof(struct pt_regs, regs[1]))
+
+static inline unsigned long unwind_graph_addr(struct unwind_state *state,
+                                       unsigned long pc, unsigned long cfa)
+{
+       return ftrace_graph_ret_addr(state->task, &state->graph_idx,
+                                    pc, (unsigned long *)(cfa - GRAPH_FAKE_OFFSET));
+}
+
+static __always_inline void __unwind_start(struct unwind_state *state,
+                                       struct task_struct *task, struct pt_regs *regs)
+{
+       memset(state, 0, sizeof(*state));
+       if (regs) {
+               state->sp = regs->regs[3];
+               state->pc = regs->csr_era;
+               state->ra = regs->regs[1];
+       } else if (task && task != current) {
+               state->sp = thread_saved_fp(task);
+               state->pc = thread_saved_ra(task);
+               state->ra = 0;
+       } else {
+               state->sp = (unsigned long)__builtin_frame_address(0);
+               state->pc = (unsigned long)__builtin_return_address(0);
+               state->ra = 0;
+       }
+       state->task = task;
+       get_stack_info(state->sp, state->task, &state->stack_info);
+       state->pc = unwind_graph_addr(state, state->pc, state->sp);
+}
+
+static __always_inline unsigned long __unwind_get_return_address(struct unwind_state *state)
+{
+       return unwind_done(state) ? 0 : state->pc;
+}
 #endif /* _ASM_UNWIND_H */
index fcaa024..c8cfbd5 100644 (file)
@@ -8,7 +8,7 @@ extra-y         := vmlinux.lds
 obj-y          += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
                   traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \
                   elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \
-                  alternative.o unaligned.o
+                  alternative.o unaligned.o unwind.o
 
 obj-$(CONFIG_ACPI)             += acpi.o
 obj-$(CONFIG_EFI)              += efi.o
index c5aebea..4ad1384 100644 (file)
@@ -74,7 +74,7 @@ static void __init_or_module recompute_jump(union loongarch_instruction *buf,
        switch (src->reg0i26_format.opcode) {
        case b_op:
        case bl_op:
-               jump_addr = cur_pc + sign_extend((si_h << 16 | si_l) << 2, 27);
+               jump_addr = cur_pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
                if (in_alt_jump(jump_addr, start, end))
                        return;
                offset = jump_addr - pc;
@@ -93,7 +93,7 @@ static void __init_or_module recompute_jump(union loongarch_instruction *buf,
                fallthrough;
        case beqz_op:
        case bnez_op:
-               jump_addr = cur_pc + sign_extend((si_h << 16 | si_l) << 2, 22);
+               jump_addr = cur_pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
                if (in_alt_jump(jump_addr, start, end))
                        return;
                offset = jump_addr - pc;
@@ -112,7 +112,7 @@ static void __init_or_module recompute_jump(union loongarch_instruction *buf,
        case bge_op:
        case bltu_op:
        case bgeu_op:
-               jump_addr = cur_pc + sign_extend(si << 2, 17);
+               jump_addr = cur_pc + sign_extend64(si << 2, 17);
                if (in_alt_jump(jump_addr, start, end))
                        return;
                offset = jump_addr - pc;
index 255a098..3a3fce2 100644 (file)
@@ -94,7 +94,7 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
        c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
                     LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
 
-       elf_hwcap |= HWCAP_LOONGARCH_CRC32;
+       elf_hwcap = HWCAP_LOONGARCH_CPUCFG | HWCAP_LOONGARCH_CRC32;
 
        config = read_cpucfg(LOONGARCH_CPUCFG1);
        if (config & CPUCFG1_UAL) {
index 75e5be8..7e5c293 100644 (file)
@@ -67,14 +67,17 @@ SYM_FUNC_END(except_vec_cex)
        .macro  BUILD_HANDLER exception handler prep
        .align  5
        SYM_FUNC_START(handle_\exception)
+       666:
        BACKUP_T0T1
        SAVE_ALL
        build_prep_\prep
        move    a0, sp
        la.abs  t0, do_\handler
        jirl    ra, t0, 0
+       668:
        RESTORE_ALL_AND_RET
        SYM_FUNC_END(handle_\exception)
+       SYM_DATA(unwind_hint_\exception, .word 668b - 666b)
        .endm
 
        BUILD_HANDLER ade ade badv
index 512579d..badc590 100644 (file)
@@ -58,7 +58,6 @@ u32 larch_insn_gen_nop(void)
 u32 larch_insn_gen_b(unsigned long pc, unsigned long dest)
 {
        long offset = dest - pc;
-       unsigned int immediate_l, immediate_h;
        union loongarch_instruction insn;
 
        if ((offset & 3) || offset < -SZ_128M || offset >= SZ_128M) {
@@ -66,15 +65,7 @@ u32 larch_insn_gen_b(unsigned long pc, unsigned long dest)
                return INSN_BREAK;
        }
 
-       offset >>= 2;
-
-       immediate_l = offset & 0xffff;
-       offset >>= 16;
-       immediate_h = offset & 0x3ff;
-
-       insn.reg0i26_format.opcode = b_op;
-       insn.reg0i26_format.immediate_l = immediate_l;
-       insn.reg0i26_format.immediate_h = immediate_h;
+       emit_b(&insn, offset >> 2);
 
        return insn.word;
 }
@@ -82,7 +73,6 @@ u32 larch_insn_gen_b(unsigned long pc, unsigned long dest)
 u32 larch_insn_gen_bl(unsigned long pc, unsigned long dest)
 {
        long offset = dest - pc;
-       unsigned int immediate_l, immediate_h;
        union loongarch_instruction insn;
 
        if ((offset & 3) || offset < -SZ_128M || offset >= SZ_128M) {
@@ -90,15 +80,7 @@ u32 larch_insn_gen_bl(unsigned long pc, unsigned long dest)
                return INSN_BREAK;
        }
 
-       offset >>= 2;
-
-       immediate_l = offset & 0xffff;
-       offset >>= 16;
-       immediate_h = offset & 0x3ff;
-
-       insn.reg0i26_format.opcode = bl_op;
-       insn.reg0i26_format.immediate_l = immediate_l;
-       insn.reg0i26_format.immediate_h = immediate_h;
+       emit_bl(&insn, offset >> 2);
 
        return insn.word;
 }
@@ -107,10 +89,7 @@ u32 larch_insn_gen_or(enum loongarch_gpr rd, enum loongarch_gpr rj, enum loongar
 {
        union loongarch_instruction insn;
 
-       insn.reg3_format.opcode = or_op;
-       insn.reg3_format.rd = rd;
-       insn.reg3_format.rj = rj;
-       insn.reg3_format.rk = rk;
+       emit_or(&insn, rd, rj, rk);
 
        return insn.word;
 }
@@ -124,9 +103,7 @@ u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm)
 {
        union loongarch_instruction insn;
 
-       insn.reg1i20_format.opcode = lu12iw_op;
-       insn.reg1i20_format.rd = rd;
-       insn.reg1i20_format.immediate = imm;
+       emit_lu12iw(&insn, rd, imm);
 
        return insn.word;
 }
@@ -135,9 +112,7 @@ u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm)
 {
        union loongarch_instruction insn;
 
-       insn.reg1i20_format.opcode = lu32id_op;
-       insn.reg1i20_format.rd = rd;
-       insn.reg1i20_format.immediate = imm;
+       emit_lu32id(&insn, rd, imm);
 
        return insn.word;
 }
@@ -146,10 +121,7 @@ u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
 {
        union loongarch_instruction insn;
 
-       insn.reg2i12_format.opcode = lu52id_op;
-       insn.reg2i12_format.rd = rd;
-       insn.reg2i12_format.rj = rj;
-       insn.reg2i12_format.immediate = imm;
+       emit_lu52id(&insn, rd, rj, imm);
 
        return insn.word;
 }
@@ -158,10 +130,7 @@ u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned l
 {
        union loongarch_instruction insn;
 
-       insn.reg2i16_format.opcode = jirl_op;
-       insn.reg2i16_format.rd = rd;
-       insn.reg2i16_format.rj = rj;
-       insn.reg2i16_format.immediate = (dest - pc) >> 2;
+       emit_jirl(&insn, rj, rd, (dest - pc) >> 2);
 
        return insn.word;
 }
index c583b1e..edfd220 100644 (file)
@@ -191,20 +191,14 @@ out:
 
 unsigned long __get_wchan(struct task_struct *task)
 {
-       unsigned long pc;
+       unsigned long pc = 0;
        struct unwind_state state;
 
        if (!try_get_task_stack(task))
                return 0;
 
-       unwind_start(&state, task, NULL);
-       state.sp = thread_saved_fp(task);
-       get_stack_info(state.sp, state.task, &state.stack_info);
-       state.pc = thread_saved_ra(task);
-#ifdef CONFIG_UNWINDER_PROLOGUE
-       state.type = UNWINDER_PROLOGUE;
-#endif
-       for (; !unwind_done(&state); unwind_next_frame(&state)) {
+       for (unwind_start(&state, task, NULL);
+            !unwind_done(&state); unwind_next_frame(&state)) {
                pc = unwind_get_return_address(&state);
                if (!pc)
                        break;
index 7ea62fa..c38a146 100644 (file)
@@ -72,9 +72,6 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
        if (!task)
                task = current;
 
-       if (user_mode(regs))
-               state.type = UNWINDER_GUESS;
-
        printk("%sCall Trace:", loglvl);
        for (unwind_start(&state, task, pregs);
              !unwind_done(&state); unwind_next_frame(&state)) {
diff --git a/arch/loongarch/kernel/unwind.c b/arch/loongarch/kernel/unwind.c
new file mode 100644 (file)
index 0000000..a463d69
--- /dev/null
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2023 Loongson Technology Corporation Limited
+ */
+#include <linux/kernel.h>
+#include <linux/ftrace.h>
+
+#include <asm/unwind.h>
+
+bool default_next_frame(struct unwind_state *state)
+{
+       struct stack_info *info = &state->stack_info;
+       unsigned long addr;
+
+       if (unwind_done(state))
+               return false;
+
+       do {
+               for (state->sp += sizeof(unsigned long);
+                    state->sp < info->end; state->sp += sizeof(unsigned long)) {
+                       addr = *(unsigned long *)(state->sp);
+                       state->pc = unwind_graph_addr(state, addr, state->sp + 8);
+                       if (__kernel_text_address(state->pc))
+                               return true;
+               }
+
+               state->sp = info->next_sp;
+
+       } while (!get_stack_info(state->sp, state->task, info));
+
+       return false;
+}
index e2d2e4f..98379b7 100644 (file)
@@ -2,37 +2,18 @@
 /*
  * Copyright (C) 2022 Loongson Technology Corporation Limited
  */
-#include <linux/kernel.h>
-#include <linux/ftrace.h>
-
 #include <asm/unwind.h>
 
 unsigned long unwind_get_return_address(struct unwind_state *state)
 {
-       if (unwind_done(state))
-               return 0;
-       else if (state->first)
-               return state->pc;
-
-       return *(unsigned long *)(state->sp);
+       return __unwind_get_return_address(state);
 }
 EXPORT_SYMBOL_GPL(unwind_get_return_address);
 
 void unwind_start(struct unwind_state *state, struct task_struct *task,
                    struct pt_regs *regs)
 {
-       memset(state, 0, sizeof(*state));
-
-       if (regs) {
-               state->sp = regs->regs[3];
-               state->pc = regs->csr_era;
-       }
-
-       state->task = task;
-       state->first = true;
-
-       get_stack_info(state->sp, state->task, &state->stack_info);
-
+       __unwind_start(state, task, regs);
        if (!unwind_done(state) && !__kernel_text_address(state->pc))
                unwind_next_frame(state);
 }
@@ -40,30 +21,6 @@ EXPORT_SYMBOL_GPL(unwind_start);
 
 bool unwind_next_frame(struct unwind_state *state)
 {
-       struct stack_info *info = &state->stack_info;
-       unsigned long addr;
-
-       if (unwind_done(state))
-               return false;
-
-       if (state->first)
-               state->first = false;
-
-       do {
-               for (state->sp += sizeof(unsigned long);
-                    state->sp < info->end;
-                    state->sp += sizeof(unsigned long)) {
-                       addr = *(unsigned long *)(state->sp);
-                       state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
-                                       addr, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET));
-                       if (__kernel_text_address(addr))
-                               return true;
-               }
-
-               state->sp = info->next_sp;
-
-       } while (!get_stack_info(state->sp, state->task, info));
-
-       return false;
+       return default_next_frame(state);
 }
 EXPORT_SYMBOL_GPL(unwind_next_frame);
index 0f8d145..9095fde 100644 (file)
 /*
  * Copyright (C) 2022 Loongson Technology Corporation Limited
  */
+#include <linux/cpumask.h>
 #include <linux/ftrace.h>
 #include <linux/kallsyms.h>
 
 #include <asm/inst.h>
+#include <asm/loongson.h>
 #include <asm/ptrace.h>
+#include <asm/setup.h>
 #include <asm/unwind.h>
 
-static inline void unwind_state_fixup(struct unwind_state *state)
-{
-#ifdef CONFIG_DYNAMIC_FTRACE
-       static unsigned long ftrace = (unsigned long)ftrace_call + 4;
-
-       if (state->pc == ftrace)
-               state->is_ftrace = true;
+extern const int unwind_hint_ade;
+extern const int unwind_hint_ale;
+extern const int unwind_hint_bp;
+extern const int unwind_hint_fpe;
+extern const int unwind_hint_fpu;
+extern const int unwind_hint_lsx;
+extern const int unwind_hint_lasx;
+extern const int unwind_hint_lbt;
+extern const int unwind_hint_ri;
+extern const int unwind_hint_watch;
+extern unsigned long eentry;
+#ifdef CONFIG_NUMA
+extern unsigned long pcpu_handlers[NR_CPUS];
 #endif
-}
 
-unsigned long unwind_get_return_address(struct unwind_state *state)
+static inline bool scan_handlers(unsigned long entry_offset)
 {
+       int idx, offset;
 
-       if (unwind_done(state))
-               return 0;
-       else if (state->type)
-               return state->pc;
-       else if (state->first)
-               return state->pc;
-
-       return *(unsigned long *)(state->sp);
+       if (entry_offset >= EXCCODE_INT_START * VECSIZE)
+               return false;
 
+       idx = entry_offset / VECSIZE;
+       offset = entry_offset % VECSIZE;
+       switch (idx) {
+       case EXCCODE_ADE:
+               return offset == unwind_hint_ade;
+       case EXCCODE_ALE:
+               return offset == unwind_hint_ale;
+       case EXCCODE_BP:
+               return offset == unwind_hint_bp;
+       case EXCCODE_FPE:
+               return offset == unwind_hint_fpe;
+       case EXCCODE_FPDIS:
+               return offset == unwind_hint_fpu;
+       case EXCCODE_LSXDIS:
+               return offset == unwind_hint_lsx;
+       case EXCCODE_LASXDIS:
+               return offset == unwind_hint_lasx;
+       case EXCCODE_BTDIS:
+               return offset == unwind_hint_lbt;
+       case EXCCODE_INE:
+               return offset == unwind_hint_ri;
+       case EXCCODE_WATCH:
+               return offset == unwind_hint_watch;
+       default:
+               return false;
+       }
 }
-EXPORT_SYMBOL_GPL(unwind_get_return_address);
 
-static bool unwind_by_guess(struct unwind_state *state)
+static inline bool fix_exception(unsigned long pc)
 {
-       struct stack_info *info = &state->stack_info;
-       unsigned long addr;
-
-       for (state->sp += sizeof(unsigned long);
-            state->sp < info->end;
-            state->sp += sizeof(unsigned long)) {
-               addr = *(unsigned long *)(state->sp);
-               state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
-                               addr, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET));
-               if (__kernel_text_address(addr))
+#ifdef CONFIG_NUMA
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               if (!pcpu_handlers[cpu])
+                       continue;
+               if (scan_handlers(pc - pcpu_handlers[cpu]))
                        return true;
        }
+#endif
+       return scan_handlers(pc - eentry);
+}
 
+/*
+ * As we meet ftrace_regs_entry, reset first flag like first doing
+ * tracing. Prologue analysis will stop soon because PC is at entry.
+ */
+static inline bool fix_ftrace(unsigned long pc)
+{
+#ifdef CONFIG_DYNAMIC_FTRACE
+       return pc == (unsigned long)ftrace_call + LOONGARCH_INSN_SIZE;
+#else
        return false;
+#endif
 }
 
+static inline bool unwind_state_fixup(struct unwind_state *state)
+{
+       if (!fix_exception(state->pc) && !fix_ftrace(state->pc))
+               return false;
+
+       state->reset = true;
+       return true;
+}
+
+/*
+ * LoongArch function prologue is like follows,
+ *     [instructions not use stack var]
+ *     addi.d sp, sp, -imm
+ *     st.d   xx, sp, offset <- save callee saved regs and
+ *     st.d   yy, sp, offset    save ra if function is nest.
+ *     [others instructions]
+ */
 static bool unwind_by_prologue(struct unwind_state *state)
 {
        long frame_ra = -1;
        unsigned long frame_size = 0;
-       unsigned long size, offset, pc = state->pc;
+       unsigned long size, offset, pc;
        struct pt_regs *regs;
        struct stack_info *info = &state->stack_info;
        union loongarch_instruction *ip, *ip_end;
@@ -64,20 +119,21 @@ static bool unwind_by_prologue(struct unwind_state *state)
        if (state->sp >= info->end || state->sp < info->begin)
                return false;
 
-       if (state->is_ftrace) {
-               /*
-                * As we meet ftrace_regs_entry, reset first flag like first doing
-                * tracing. Prologue analysis will stop soon because PC is at entry.
-                */
+       if (state->reset) {
                regs = (struct pt_regs *)state->sp;
                state->first = true;
-               state->is_ftrace = false;
+               state->reset = false;
                state->pc = regs->csr_era;
                state->ra = regs->regs[1];
                state->sp = regs->regs[3];
                return true;
        }
 
+       /*
+        * When first is not set, the PC is a return address in the previous frame.
+        * We need to adjust its value in case overflow to the next symbol.
+        */
+       pc = state->pc - (state->first ? 0 : LOONGARCH_INSN_SIZE);
        if (!kallsyms_lookup_size_offset(pc, &size, &offset))
                return false;
 
@@ -93,6 +149,10 @@ static bool unwind_by_prologue(struct unwind_state *state)
                ip++;
        }
 
+       /*
+        * Can't find stack alloc action, PC may be in a leaf function. Only the
+        * first being true is reasonable, otherwise indicate analysis is broken.
+        */
        if (!frame_size) {
                if (state->first)
                        goto first;
@@ -110,6 +170,7 @@ static bool unwind_by_prologue(struct unwind_state *state)
                ip++;
        }
 
+       /* Can't find save $ra action, PC may be in a leaf function, too. */
        if (frame_ra < 0) {
                if (state->first) {
                        state->sp = state->sp + frame_size;
@@ -118,88 +179,47 @@ static bool unwind_by_prologue(struct unwind_state *state)
                return false;
        }
 
-       if (state->first)
-               state->first = false;
-
        state->pc = *(unsigned long *)(state->sp + frame_ra);
        state->sp = state->sp + frame_size;
        goto out;
 
 first:
-       state->first = false;
-       if (state->pc == state->ra)
-               return false;
-
        state->pc = state->ra;
 
 out:
-       unwind_state_fixup(state);
-       return !!__kernel_text_address(state->pc);
-}
-
-void unwind_start(struct unwind_state *state, struct task_struct *task,
-                   struct pt_regs *regs)
-{
-       memset(state, 0, sizeof(*state));
-
-       if (regs &&  __kernel_text_address(regs->csr_era)) {
-               state->pc = regs->csr_era;
-               state->sp = regs->regs[3];
-               state->ra = regs->regs[1];
-               state->type = UNWINDER_PROLOGUE;
-       }
-
-       state->task = task;
-       state->first = true;
-
-       get_stack_info(state->sp, state->task, &state->stack_info);
-
-       if (!unwind_done(state) && !__kernel_text_address(state->pc))
-               unwind_next_frame(state);
+       state->first = false;
+       return unwind_state_fixup(state) || __kernel_text_address(state->pc);
 }
-EXPORT_SYMBOL_GPL(unwind_start);
 
-bool unwind_next_frame(struct unwind_state *state)
+static bool next_frame(struct unwind_state *state)
 {
-       struct stack_info *info = &state->stack_info;
-       struct pt_regs *regs;
        unsigned long pc;
+       struct pt_regs *regs;
+       struct stack_info *info = &state->stack_info;
 
        if (unwind_done(state))
                return false;
 
        do {
-               switch (state->type) {
-               case UNWINDER_GUESS:
-                       state->first = false;
-                       if (unwind_by_guess(state))
-                               return true;
-                       break;
+               if (unwind_by_prologue(state)) {
+                       state->pc = unwind_graph_addr(state, state->pc, state->sp);
+                       return true;
+               }
+
+               if (info->type == STACK_TYPE_IRQ && info->end == state->sp) {
+                       regs = (struct pt_regs *)info->next_sp;
+                       pc = regs->csr_era;
+
+                       if (user_mode(regs) || !__kernel_text_address(pc))
+                               return false;
+
+                       state->first = true;
+                       state->pc = pc;
+                       state->ra = regs->regs[1];
+                       state->sp = regs->regs[3];
+                       get_stack_info(state->sp, state->task, info);
 
-               case UNWINDER_PROLOGUE:
-                       if (unwind_by_prologue(state)) {
-                               state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
-                                               state->pc, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET));
-                               return true;
-                       }
-
-                       if (info->type == STACK_TYPE_IRQ &&
-                               info->end == state->sp) {
-                               regs = (struct pt_regs *)info->next_sp;
-                               pc = regs->csr_era;
-
-                               if (user_mode(regs) || !__kernel_text_address(pc))
-                                       return false;
-
-                               state->first = true;
-                               state->ra = regs->regs[1];
-                               state->sp = regs->regs[3];
-                               state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
-                                               pc, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET));
-                               get_stack_info(state->sp, state->task, info);
-
-                               return true;
-                       }
+                       return true;
                }
 
                state->sp = info->next_sp;
@@ -208,4 +228,36 @@ bool unwind_next_frame(struct unwind_state *state)
 
        return false;
 }
+
+unsigned long unwind_get_return_address(struct unwind_state *state)
+{
+       return __unwind_get_return_address(state);
+}
+EXPORT_SYMBOL_GPL(unwind_get_return_address);
+
+void unwind_start(struct unwind_state *state, struct task_struct *task,
+                   struct pt_regs *regs)
+{
+       __unwind_start(state, task, regs);
+       state->type = UNWINDER_PROLOGUE;
+       state->first = true;
+
+       /*
+        * The current PC is not kernel text address, we cannot find its
+        * relative symbol. Thus, prologue analysis will be broken. Luckily,
+        * we can use the default_next_frame().
+        */
+       if (!__kernel_text_address(state->pc)) {
+               state->type = UNWINDER_GUESS;
+               if (!unwind_done(state))
+                       unwind_next_frame(state);
+       }
+}
+EXPORT_SYMBOL_GPL(unwind_start);
+
+bool unwind_next_frame(struct unwind_state *state)
+{
+       return state->type == UNWINDER_PROLOGUE ?
+                       next_frame(state) : default_next_frame(state);
+}
 EXPORT_SYMBOL_GPL(unwind_next_frame);
index da3681f..8bad6b0 100644 (file)
@@ -251,7 +251,7 @@ static void output_pgtable_bits_defines(void)
 }
 
 #ifdef CONFIG_NUMA
-static unsigned long pcpu_handlers[NR_CPUS];
+unsigned long pcpu_handlers[NR_CPUS];
 #endif
 extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
 
index af04cea..352d7de 100755 (executable)
@@ -210,6 +210,10 @@ ld_version()
        gsub(".*version ", "");
        gsub("-.*", "");
        split($1,a, ".");
+       if( length(a[3]) == "8" )
+               # a[3] is probably a date of format yyyymmdd used for release snapshots. We
+               # can assume it to be zero as it does not signify a new version as such.
+               a[3] = 0;
        print a[1]*100000000 + a[2]*1000000 + a[3]*10000;
        exit
     }'
index 4f89799..699a885 100644 (file)
@@ -137,7 +137,7 @@ struct imc_pmu {
  * are inited.
  */
 struct imc_pmu_ref {
-       struct mutex lock;
+       spinlock_t lock;
        unsigned int id;
        int refc;
 };
index 80a148c..44a35ed 100644 (file)
@@ -1012,7 +1012,7 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
 
 void hpt_clear_stress(void);
 static struct timer_list stress_hpt_timer;
-void stress_hpt_timer_fn(struct timer_list *timer)
+static void stress_hpt_timer_fn(struct timer_list *timer)
 {
        int next_cpu;
 
index d517aba..100e97d 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/cputhreads.h>
 #include <asm/smp.h>
 #include <linux/string.h>
+#include <linux/spinlock.h>
 
 /* Nest IMC data structures and variables */
 
@@ -21,7 +22,7 @@
  * Used to avoid races in counting the nest-pmu units during hotplug
  * register and unregister
  */
-static DEFINE_MUTEX(nest_init_lock);
+static DEFINE_SPINLOCK(nest_init_lock);
 static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
 static struct imc_pmu **per_nest_pmu_arr;
 static cpumask_t nest_imc_cpumask;
@@ -50,7 +51,7 @@ static int trace_imc_mem_size;
  * core and trace-imc
  */
 static struct imc_pmu_ref imc_global_refc = {
-       .lock = __MUTEX_INITIALIZER(imc_global_refc.lock),
+       .lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
        .id = 0,
        .refc = 0,
 };
@@ -400,7 +401,7 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu)
                                       get_hard_smp_processor_id(cpu));
                /*
                 * If this is the last cpu in this chip then, skip the reference
-                * count mutex lock and make the reference count on this chip zero.
+                * count lock and make the reference count on this chip zero.
                 */
                ref = get_nest_pmu_ref(cpu);
                if (!ref)
@@ -462,15 +463,15 @@ static void nest_imc_counters_release(struct perf_event *event)
        /*
         * See if we need to disable the nest PMU.
         * If no events are currently in use, then we have to take a
-        * mutex to ensure that we don't race with another task doing
+        * lock to ensure that we don't race with another task doing
         * enable or disable the nest counters.
         */
        ref = get_nest_pmu_ref(event->cpu);
        if (!ref)
                return;
 
-       /* Take the mutex lock for this node and then decrement the reference count */
-       mutex_lock(&ref->lock);
+       /* Take the lock for this node and then decrement the reference count */
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                /*
                 * The scenario where this is true is, when perf session is
@@ -482,7 +483,7 @@ static void nest_imc_counters_release(struct perf_event *event)
                 * an OPAL call to disable the engine in that node.
                 *
                 */
-               mutex_unlock(&ref->lock);
+               spin_unlock(&ref->lock);
                return;
        }
        ref->refc--;
@@ -490,7 +491,7 @@ static void nest_imc_counters_release(struct perf_event *event)
                rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
                                            get_hard_smp_processor_id(event->cpu));
                if (rc) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id);
                        return;
                }
@@ -498,7 +499,7 @@ static void nest_imc_counters_release(struct perf_event *event)
                WARN(1, "nest-imc: Invalid event reference count\n");
                ref->refc = 0;
        }
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 }
 
 static int nest_imc_event_init(struct perf_event *event)
@@ -557,26 +558,25 @@ static int nest_imc_event_init(struct perf_event *event)
 
        /*
         * Get the imc_pmu_ref struct for this node.
-        * Take the mutex lock and then increment the count of nest pmu events
-        * inited.
+        * Take the lock and then increment the count of nest pmu events inited.
         */
        ref = get_nest_pmu_ref(event->cpu);
        if (!ref)
                return -EINVAL;
 
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST,
                                             get_hard_smp_processor_id(event->cpu));
                if (rc) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("nest-imc: Unable to start the counters for node %d\n",
                                                                        node_id);
                        return rc;
                }
        }
        ++ref->refc;
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 
        event->destroy = nest_imc_counters_release;
        return 0;
@@ -612,9 +612,8 @@ static int core_imc_mem_init(int cpu, int size)
                return -ENOMEM;
        mem_info->vbase = page_address(page);
 
-       /* Init the mutex */
        core_imc_refc[core_id].id = core_id;
-       mutex_init(&core_imc_refc[core_id].lock);
+       spin_lock_init(&core_imc_refc[core_id].lock);
 
        rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE,
                                __pa((void *)mem_info->vbase),
@@ -703,9 +702,8 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
                perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
        } else {
                /*
-                * If this is the last cpu in this core then, skip taking refernce
-                * count mutex lock for this core and directly zero "refc" for
-                * this core.
+                * If this is the last cpu in this core then skip taking reference
+                * count lock for this core and directly zero "refc" for this core.
                 */
                opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
                                       get_hard_smp_processor_id(cpu));
@@ -720,11 +718,11 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
                 * last cpu in this core and core-imc event running
                 * in this cpu.
                 */
-               mutex_lock(&imc_global_refc.lock);
+               spin_lock(&imc_global_refc.lock);
                if (imc_global_refc.id == IMC_DOMAIN_CORE)
                        imc_global_refc.refc--;
 
-               mutex_unlock(&imc_global_refc.lock);
+               spin_unlock(&imc_global_refc.lock);
        }
        return 0;
 }
@@ -739,7 +737,7 @@ static int core_imc_pmu_cpumask_init(void)
 
 static void reset_global_refc(struct perf_event *event)
 {
-               mutex_lock(&imc_global_refc.lock);
+               spin_lock(&imc_global_refc.lock);
                imc_global_refc.refc--;
 
                /*
@@ -751,7 +749,7 @@ static void reset_global_refc(struct perf_event *event)
                        imc_global_refc.refc = 0;
                        imc_global_refc.id = 0;
                }
-               mutex_unlock(&imc_global_refc.lock);
+               spin_unlock(&imc_global_refc.lock);
 }
 
 static void core_imc_counters_release(struct perf_event *event)
@@ -764,17 +762,17 @@ static void core_imc_counters_release(struct perf_event *event)
        /*
         * See if we need to disable the IMC PMU.
         * If no events are currently in use, then we have to take a
-        * mutex to ensure that we don't race with another task doing
+        * lock to ensure that we don't race with another task doing
         * enable or disable the core counters.
         */
        core_id = event->cpu / threads_per_core;
 
-       /* Take the mutex lock and decrement the refernce count for this core */
+       /* Take the lock and decrement the refernce count for this core */
        ref = &core_imc_refc[core_id];
        if (!ref)
                return;
 
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                /*
                 * The scenario where this is true is, when perf session is
@@ -786,7 +784,7 @@ static void core_imc_counters_release(struct perf_event *event)
                 * an OPAL call to disable the engine in that core.
                 *
                 */
-               mutex_unlock(&ref->lock);
+               spin_unlock(&ref->lock);
                return;
        }
        ref->refc--;
@@ -794,7 +792,7 @@ static void core_imc_counters_release(struct perf_event *event)
                rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
                                            get_hard_smp_processor_id(event->cpu));
                if (rc) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("IMC: Unable to stop the counters for core %d\n", core_id);
                        return;
                }
@@ -802,7 +800,7 @@ static void core_imc_counters_release(struct perf_event *event)
                WARN(1, "core-imc: Invalid event reference count\n");
                ref->refc = 0;
        }
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 
        reset_global_refc(event);
 }
@@ -840,7 +838,6 @@ static int core_imc_event_init(struct perf_event *event)
        if ((!pcmi->vbase))
                return -ENODEV;
 
-       /* Get the core_imc mutex for this core */
        ref = &core_imc_refc[core_id];
        if (!ref)
                return -EINVAL;
@@ -848,22 +845,22 @@ static int core_imc_event_init(struct perf_event *event)
        /*
         * Core pmu units are enabled only when it is used.
         * See if this is triggered for the first time.
-        * If yes, take the mutex lock and enable the core counters.
+        * If yes, take the lock and enable the core counters.
         * If not, just increment the count in core_imc_refc struct.
         */
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
                                             get_hard_smp_processor_id(event->cpu));
                if (rc) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("core-imc: Unable to start the counters for core %d\n",
                                                                        core_id);
                        return rc;
                }
        }
        ++ref->refc;
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 
        /*
         * Since the system can run either in accumulation or trace-mode
@@ -874,7 +871,7 @@ static int core_imc_event_init(struct perf_event *event)
         * to know whether any other trace/thread imc
         * events are running.
         */
-       mutex_lock(&imc_global_refc.lock);
+       spin_lock(&imc_global_refc.lock);
        if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
                /*
                 * No other trace/thread imc events are running in
@@ -883,10 +880,10 @@ static int core_imc_event_init(struct perf_event *event)
                imc_global_refc.id = IMC_DOMAIN_CORE;
                imc_global_refc.refc++;
        } else {
-               mutex_unlock(&imc_global_refc.lock);
+               spin_unlock(&imc_global_refc.lock);
                return -EBUSY;
        }
-       mutex_unlock(&imc_global_refc.lock);
+       spin_unlock(&imc_global_refc.lock);
 
        event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
        event->destroy = core_imc_counters_release;
@@ -958,10 +955,10 @@ static int ppc_thread_imc_cpu_offline(unsigned int cpu)
        mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
 
        /* Reduce the refc if thread-imc event running on this cpu */
-       mutex_lock(&imc_global_refc.lock);
+       spin_lock(&imc_global_refc.lock);
        if (imc_global_refc.id == IMC_DOMAIN_THREAD)
                imc_global_refc.refc--;
-       mutex_unlock(&imc_global_refc.lock);
+       spin_unlock(&imc_global_refc.lock);
 
        return 0;
 }
@@ -1001,7 +998,7 @@ static int thread_imc_event_init(struct perf_event *event)
        if (!target)
                return -EINVAL;
 
-       mutex_lock(&imc_global_refc.lock);
+       spin_lock(&imc_global_refc.lock);
        /*
         * Check if any other trace/core imc events are running in the
         * system, if not set the global id to thread-imc.
@@ -1010,10 +1007,10 @@ static int thread_imc_event_init(struct perf_event *event)
                imc_global_refc.id = IMC_DOMAIN_THREAD;
                imc_global_refc.refc++;
        } else {
-               mutex_unlock(&imc_global_refc.lock);
+               spin_unlock(&imc_global_refc.lock);
                return -EBUSY;
        }
-       mutex_unlock(&imc_global_refc.lock);
+       spin_unlock(&imc_global_refc.lock);
 
        event->pmu->task_ctx_nr = perf_sw_context;
        event->destroy = reset_global_refc;
@@ -1135,25 +1132,25 @@ static int thread_imc_event_add(struct perf_event *event, int flags)
        /*
         * imc pmus are enabled only when it is used.
         * See if this is triggered for the first time.
-        * If yes, take the mutex lock and enable the counters.
+        * If yes, take the lock and enable the counters.
         * If not, just increment the count in ref count struct.
         */
        ref = &core_imc_refc[core_id];
        if (!ref)
                return -EINVAL;
 
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
                    get_hard_smp_processor_id(smp_processor_id()))) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("thread-imc: Unable to start the counter\
                                for core %d\n", core_id);
                        return -EINVAL;
                }
        }
        ++ref->refc;
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
        return 0;
 }
 
@@ -1170,12 +1167,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
                return;
        }
 
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        ref->refc--;
        if (ref->refc == 0) {
                if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
                    get_hard_smp_processor_id(smp_processor_id()))) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("thread-imc: Unable to stop the counters\
                                for core %d\n", core_id);
                        return;
@@ -1183,7 +1180,7 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
        } else if (ref->refc < 0) {
                ref->refc = 0;
        }
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 
        /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
        mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
@@ -1224,9 +1221,8 @@ static int trace_imc_mem_alloc(int cpu_id, int size)
                }
        }
 
-       /* Init the mutex, if not already */
        trace_imc_refc[core_id].id = core_id;
-       mutex_init(&trace_imc_refc[core_id].lock);
+       spin_lock_init(&trace_imc_refc[core_id].lock);
 
        mtspr(SPRN_LDBAR, 0);
        return 0;
@@ -1246,10 +1242,10 @@ static int ppc_trace_imc_cpu_offline(unsigned int cpu)
         * Reduce the refc if any trace-imc event running
         * on this cpu.
         */
-       mutex_lock(&imc_global_refc.lock);
+       spin_lock(&imc_global_refc.lock);
        if (imc_global_refc.id == IMC_DOMAIN_TRACE)
                imc_global_refc.refc--;
-       mutex_unlock(&imc_global_refc.lock);
+       spin_unlock(&imc_global_refc.lock);
 
        return 0;
 }
@@ -1371,17 +1367,17 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
        }
 
        mtspr(SPRN_LDBAR, ldbar_value);
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE,
                                get_hard_smp_processor_id(smp_processor_id()))) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
                        return -EINVAL;
                }
        }
        ++ref->refc;
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
        return 0;
 }
 
@@ -1414,19 +1410,19 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
                return;
        }
 
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        ref->refc--;
        if (ref->refc == 0) {
                if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE,
                                get_hard_smp_processor_id(smp_processor_id()))) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id);
                        return;
                }
        } else if (ref->refc < 0) {
                ref->refc = 0;
        }
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 
        trace_imc_event_stop(event, flags);
 }
@@ -1448,7 +1444,7 @@ static int trace_imc_event_init(struct perf_event *event)
         * no other thread is running any core/thread imc
         * events
         */
-       mutex_lock(&imc_global_refc.lock);
+       spin_lock(&imc_global_refc.lock);
        if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
                /*
                 * No core/thread imc events are running in the
@@ -1457,10 +1453,10 @@ static int trace_imc_event_init(struct perf_event *event)
                imc_global_refc.id = IMC_DOMAIN_TRACE;
                imc_global_refc.refc++;
        } else {
-               mutex_unlock(&imc_global_refc.lock);
+               spin_unlock(&imc_global_refc.lock);
                return -EBUSY;
        }
-       mutex_unlock(&imc_global_refc.lock);
+       spin_unlock(&imc_global_refc.lock);
 
        event->hw.idx = -1;
 
@@ -1533,10 +1529,10 @@ static int init_nest_pmu_ref(void)
        i = 0;
        for_each_node(nid) {
                /*
-                * Mutex lock to avoid races while tracking the number of
+                * Take the lock to avoid races while tracking the number of
                 * sessions using the chip's nest pmu units.
                 */
-               mutex_init(&nest_imc_refc[i].lock);
+               spin_lock_init(&nest_imc_refc[i].lock);
 
                /*
                 * Loop to init the "id" with the node_id. Variable "i" initialized to
@@ -1633,7 +1629,7 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
 static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
 {
        if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
-               mutex_lock(&nest_init_lock);
+               spin_lock(&nest_init_lock);
                if (nest_pmus == 1) {
                        cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
                        kfree(nest_imc_refc);
@@ -1643,7 +1639,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
 
                if (nest_pmus > 0)
                        nest_pmus--;
-               mutex_unlock(&nest_init_lock);
+               spin_unlock(&nest_init_lock);
        }
 
        /* Free core_imc memory */
@@ -1800,11 +1796,11 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
                * rest. To handle the cpuhotplug callback unregister, we track
                * the number of nest pmus in "nest_pmus".
                */
-               mutex_lock(&nest_init_lock);
+               spin_lock(&nest_init_lock);
                if (nest_pmus == 0) {
                        ret = init_nest_pmu_ref();
                        if (ret) {
-                               mutex_unlock(&nest_init_lock);
+                               spin_unlock(&nest_init_lock);
                                kfree(per_nest_pmu_arr);
                                per_nest_pmu_arr = NULL;
                                goto err_free_mem;
@@ -1812,7 +1808,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
                        /* Register for cpu hotplug notification. */
                        ret = nest_pmu_cpumask_init();
                        if (ret) {
-                               mutex_unlock(&nest_init_lock);
+                               spin_unlock(&nest_init_lock);
                                kfree(nest_imc_refc);
                                kfree(per_nest_pmu_arr);
                                per_nest_pmu_arr = NULL;
@@ -1820,7 +1816,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
                        }
                }
                nest_pmus++;
-               mutex_unlock(&nest_init_lock);
+               spin_unlock(&nest_init_lock);
                break;
        case IMC_DOMAIN_CORE:
                ret = core_imc_pmu_cpumask_init();
index 43bed6c..5235fd1 100644 (file)
                        bus-range = <0x0 0xff>;
                        ranges = <0x81000000  0x0 0x60080000  0x0 0x60080000 0x0 0x10000>,      /* I/O */
                                 <0x82000000  0x0 0x60090000  0x0 0x60090000 0x0 0xff70000>,    /* mem */
-                                <0x82000000  0x0 0x70000000  0x0 0x70000000 0x0 0x1000000>,    /* mem */
+                                <0x82000000  0x0 0x70000000  0x0 0x70000000 0x0 0x10000000>,    /* mem */
                                 <0xc3000000 0x20 0x00000000 0x20 0x00000000 0x20 0x00000000>;  /* mem prefetchable */
                        num-lanes = <0x8>;
                        interrupts = <56>, <57>, <58>, <59>, <60>, <61>, <62>, <63>, <64>;
index 2b60913..696c9e0 100644 (file)
@@ -508,6 +508,7 @@ static void __init setup_lowcore_dat_on(void)
 {
        struct lowcore *abs_lc;
        unsigned long flags;
+       int i;
 
        __ctl_clear_bit(0, 28);
        S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
@@ -523,8 +524,8 @@ static void __init setup_lowcore_dat_on(void)
        abs_lc = get_abs_lowcore(&flags);
        abs_lc->restart_flags = RESTART_FLAG_CTLREGS;
        abs_lc->program_new_psw = S390_lowcore.program_new_psw;
-       memcpy(abs_lc->cregs_save_area, S390_lowcore.cregs_save_area,
-              sizeof(abs_lc->cregs_save_area));
+       for (i = 0; i < 16; i++)
+               abs_lc->cregs_save_area[i] = S390_lowcore.cregs_save_area[i];
        put_abs_lowcore(abs_lc, flags);
 }
 
index 5521ea1..aa9b964 100644 (file)
@@ -32,7 +32,7 @@ intcall:
        movw    %dx, %si
        movw    %sp, %di
        movw    $11, %cx
-       rep; movsd
+       rep; movsl
 
        /* Pop full state from the stack */
        popal
@@ -67,7 +67,7 @@ intcall:
        jz      4f
        movw    %sp, %si
        movw    $11, %cx
-       rep; movsd
+       rep; movsl
 4:     addw    $44, %sp
 
        /* Restore state and return */
index dfd2c12..bafdc2b 100644 (file)
@@ -6339,6 +6339,7 @@ __init int intel_pmu_init(void)
                break;
 
        case INTEL_FAM6_SAPPHIRERAPIDS_X:
+       case INTEL_FAM6_EMERALDRAPIDS_X:
                pmem = true;
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids));
index 3019fb1..551741e 100644 (file)
@@ -677,6 +677,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,           &icx_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D,           &icx_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    &icx_cstates),
+       X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X,     &icx_cstates),
 
        X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,         &icl_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,           &icl_cstates),
index f35f1ff..6aaae18 100644 (file)
@@ -1111,6 +1111,7 @@ struct msr_bitmap_range {
 
 /* Xen emulation context */
 struct kvm_xen {
+       struct mutex xen_lock;
        u32 xen_version;
        bool long_mode;
        bool runstate_update_flag;
index 1f60a2b..fdbb5f0 100644 (file)
@@ -330,7 +330,16 @@ static void __init bp_init_freq_invariance(void)
 
 static void disable_freq_invariance_workfn(struct work_struct *work)
 {
+       int cpu;
+
        static_branch_disable(&arch_scale_freq_key);
+
+       /*
+        * Set arch_freq_scale to a default value on all cpus
+        * This negates the effect of scaling
+        */
+       for_each_possible_cpu(cpu)
+               per_cpu(arch_freq_scale, cpu) = SCHED_CAPACITY_SCALE;
 }
 
 static DECLARE_WORK(disable_freq_invariance_work,
index efe0c30..77538ab 100644 (file)
@@ -146,6 +146,30 @@ static inline struct rmid_entry *__rmid_entry(u32 rmid)
        return entry;
 }
 
+static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
+{
+       u64 msr_val;
+
+       /*
+        * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
+        * with a valid event code for supported resource type and the bits
+        * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
+        * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
+        * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
+        * are error bits.
+        */
+       wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
+       rdmsrl(MSR_IA32_QM_CTR, msr_val);
+
+       if (msr_val & RMID_VAL_ERROR)
+               return -EIO;
+       if (msr_val & RMID_VAL_UNAVAIL)
+               return -EINVAL;
+
+       *val = msr_val;
+       return 0;
+}
+
 static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom,
                                                 u32 rmid,
                                                 enum resctrl_event_id eventid)
@@ -172,8 +196,12 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
        struct arch_mbm_state *am;
 
        am = get_arch_mbm_state(hw_dom, rmid, eventid);
-       if (am)
+       if (am) {
                memset(am, 0, sizeof(*am));
+
+               /* Record any initial, non-zero count value. */
+               __rmid_read(rmid, eventid, &am->prev_msr);
+       }
 }
 
 static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
@@ -191,25 +219,14 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
        struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
        struct arch_mbm_state *am;
        u64 msr_val, chunks;
+       int ret;
 
        if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
                return -EINVAL;
 
-       /*
-        * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
-        * with a valid event code for supported resource type and the bits
-        * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
-        * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
-        * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
-        * are error bits.
-        */
-       wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
-       rdmsrl(MSR_IA32_QM_CTR, msr_val);
-
-       if (msr_val & RMID_VAL_ERROR)
-               return -EIO;
-       if (msr_val & RMID_VAL_UNAVAIL)
-               return -EINVAL;
+       ret = __rmid_read(rmid, eventid, &msr_val);
+       if (ret)
+               return ret;
 
        am = get_arch_mbm_state(hw_dom, rmid, eventid);
        if (am) {
index e5a48f0..5993da2 100644 (file)
@@ -580,8 +580,10 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
        /*
         * Ensure the task's closid and rmid are written before determining if
         * the task is current that will decide if it will be interrupted.
+        * This pairs with the full barrier between the rq->curr update and
+        * resctrl_sched_in() during context switch.
         */
-       barrier();
+       smp_mb();
 
        /*
         * By now, the task's closid and rmid are set. If the task is current
@@ -2402,6 +2404,14 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
                        WRITE_ONCE(t->rmid, to->mon.rmid);
 
                        /*
+                        * Order the closid/rmid stores above before the loads
+                        * in task_curr(). This pairs with the full barrier
+                        * between the rq->curr update and resctrl_sched_in()
+                        * during context switch.
+                        */
+                       smp_mb();
+
+                       /*
                         * If the task is on a CPU, set the CPU in the mask.
                         * The detection is inaccurate as tasks might move or
                         * schedule before the smp function call takes place.
index b14653b..596061c 100644 (file)
@@ -770,16 +770,22 @@ struct kvm_cpuid_array {
        int nent;
 };
 
+static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array)
+{
+       if (array->nent >= array->maxnent)
+               return NULL;
+
+       return &array->entries[array->nent++];
+}
+
 static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
                                              u32 function, u32 index)
 {
-       struct kvm_cpuid_entry2 *entry;
+       struct kvm_cpuid_entry2 *entry = get_next_cpuid(array);
 
-       if (array->nent >= array->maxnent)
+       if (!entry)
                return NULL;
 
-       entry = &array->entries[array->nent++];
-
        memset(entry, 0, sizeof(*entry));
        entry->function = function;
        entry->index = index;
@@ -956,22 +962,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
                entry->edx = edx.full;
                break;
        }
-       /*
-        * Per Intel's SDM, the 0x1f is a superset of 0xb,
-        * thus they can be handled by common code.
-        */
        case 0x1f:
        case 0xb:
                /*
-                * Populate entries until the level type (ECX[15:8]) of the
-                * previous entry is zero.  Note, CPUID EAX.{0x1f,0xb}.0 is
-                * the starting entry, filled by the primary do_host_cpuid().
+                * No topology; a valid topology is indicated by the presence
+                * of subleaf 1.
                 */
-               for (i = 1; entry->ecx & 0xff00; ++i) {
-                       entry = do_host_cpuid(array, function, i);
-                       if (!entry)
-                               goto out;
-               }
+               entry->eax = entry->ebx = entry->ecx = 0;
                break;
        case 0xd: {
                u64 permitted_xcr0 = kvm_caps.supported_xcr0 & xstate_get_guest_group_perm();
@@ -1202,6 +1199,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
                entry->ebx = entry->ecx = entry->edx = 0;
                break;
        case 0x8000001e:
+               /* Do not return host topology information.  */
+               entry->eax = entry->ebx = entry->ecx = 0;
+               entry->edx = 0; /* reserved */
                break;
        case 0x8000001F:
                if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
index bc9cd70..add65dd 100644 (file)
@@ -138,15 +138,13 @@ void recalc_intercepts(struct vcpu_svm *svm)
                c->intercepts[i] = h->intercepts[i];
 
        if (g->int_ctl & V_INTR_MASKING_MASK) {
-               /* We only want the cr8 intercept bits of L1 */
-               vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
-               vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
-
                /*
-                * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
-                * affect any interrupt we may want to inject; therefore,
-                * interrupt window vmexits are irrelevant to L0.
+                * Once running L2 with HF_VINTR_MASK, EFLAGS.IF and CR8
+                * does not affect any interrupt we may want to inject;
+                * therefore, writes to CR8 are irrelevant to L0, as are
+                * interrupt window vmexits.
                 */
+               vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
                vmcb_clr_intercept(c, INTERCEPT_VINTR);
        }
 
index fc9008d..7eec022 100644 (file)
@@ -3440,18 +3440,15 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
 {
        u32 ar;
 
-       if (var->unusable || !var->present)
-               ar = 1 << 16;
-       else {
-               ar = var->type & 15;
-               ar |= (var->s & 1) << 4;
-               ar |= (var->dpl & 3) << 5;
-               ar |= (var->present & 1) << 7;
-               ar |= (var->avl & 1) << 12;
-               ar |= (var->l & 1) << 13;
-               ar |= (var->db & 1) << 14;
-               ar |= (var->g & 1) << 15;
-       }
+       ar = var->type & 15;
+       ar |= (var->s & 1) << 4;
+       ar |= (var->dpl & 3) << 5;
+       ar |= (var->present & 1) << 7;
+       ar |= (var->avl & 1) << 12;
+       ar |= (var->l & 1) << 13;
+       ar |= (var->db & 1) << 14;
+       ar |= (var->g & 1) << 15;
+       ar |= (var->unusable || !var->present) << 16;
 
        return ar;
 }
index 2e29bdc..8fd41f5 100644 (file)
@@ -271,7 +271,15 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
         * Attempt to obtain the GPC lock on *both* (if there are two)
         * gfn_to_pfn caches that cover the region.
         */
-       read_lock_irqsave(&gpc1->lock, flags);
+       if (atomic) {
+               local_irq_save(flags);
+               if (!read_trylock(&gpc1->lock)) {
+                       local_irq_restore(flags);
+                       return;
+               }
+       } else {
+               read_lock_irqsave(&gpc1->lock, flags);
+       }
        while (!kvm_gpc_check(gpc1, user_len1)) {
                read_unlock_irqrestore(&gpc1->lock, flags);
 
@@ -304,9 +312,18 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
                 * The guest's runstate_info is split across two pages and we
                 * need to hold and validate both GPCs simultaneously. We can
                 * declare a lock ordering GPC1 > GPC2 because nothing else
-                * takes them more than one at a time.
+                * takes them more than one at a time. Set a subclass on the
+                * gpc1 lock to make lockdep shut up about it.
                 */
-               read_lock(&gpc2->lock);
+               lock_set_subclass(&gpc1->lock.dep_map, 1, _THIS_IP_);
+               if (atomic) {
+                       if (!read_trylock(&gpc2->lock)) {
+                               read_unlock_irqrestore(&gpc1->lock, flags);
+                               return;
+                       }
+               } else {
+                       read_lock(&gpc2->lock);
+               }
 
                if (!kvm_gpc_check(gpc2, user_len2)) {
                        read_unlock(&gpc2->lock);
@@ -590,26 +607,26 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
                        r = -EINVAL;
                } else {
-                       mutex_lock(&kvm->lock);
+                       mutex_lock(&kvm->arch.xen.xen_lock);
                        kvm->arch.xen.long_mode = !!data->u.long_mode;
-                       mutex_unlock(&kvm->lock);
+                       mutex_unlock(&kvm->arch.xen.xen_lock);
                        r = 0;
                }
                break;
 
        case KVM_XEN_ATTR_TYPE_SHARED_INFO:
-               mutex_lock(&kvm->lock);
+               mutex_lock(&kvm->arch.xen.xen_lock);
                r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
-               mutex_unlock(&kvm->lock);
+               mutex_unlock(&kvm->arch.xen.xen_lock);
                break;
 
        case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
                if (data->u.vector && data->u.vector < 0x10)
                        r = -EINVAL;
                else {
-                       mutex_lock(&kvm->lock);
+                       mutex_lock(&kvm->arch.xen.xen_lock);
                        kvm->arch.xen.upcall_vector = data->u.vector;
-                       mutex_unlock(&kvm->lock);
+                       mutex_unlock(&kvm->arch.xen.xen_lock);
                        r = 0;
                }
                break;
@@ -619,9 +636,9 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                break;
 
        case KVM_XEN_ATTR_TYPE_XEN_VERSION:
-               mutex_lock(&kvm->lock);
+               mutex_lock(&kvm->arch.xen.xen_lock);
                kvm->arch.xen.xen_version = data->u.xen_version;
-               mutex_unlock(&kvm->lock);
+               mutex_unlock(&kvm->arch.xen.xen_lock);
                r = 0;
                break;
 
@@ -630,9 +647,9 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                        r = -EOPNOTSUPP;
                        break;
                }
-               mutex_lock(&kvm->lock);
+               mutex_lock(&kvm->arch.xen.xen_lock);
                kvm->arch.xen.runstate_update_flag = !!data->u.runstate_update_flag;
-               mutex_unlock(&kvm->lock);
+               mutex_unlock(&kvm->arch.xen.xen_lock);
                r = 0;
                break;
 
@@ -647,7 +664,7 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 {
        int r = -ENOENT;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
 
        switch (data->type) {
        case KVM_XEN_ATTR_TYPE_LONG_MODE:
@@ -686,7 +703,7 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                break;
        }
 
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
        return r;
 }
 
@@ -694,7 +711,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
 {
        int idx, r = -ENOENT;
 
-       mutex_lock(&vcpu->kvm->lock);
+       mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
        idx = srcu_read_lock(&vcpu->kvm->srcu);
 
        switch (data->type) {
@@ -922,7 +939,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
        }
 
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
-       mutex_unlock(&vcpu->kvm->lock);
+       mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
        return r;
 }
 
@@ -930,7 +947,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
 {
        int r = -ENOENT;
 
-       mutex_lock(&vcpu->kvm->lock);
+       mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
 
        switch (data->type) {
        case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
@@ -1013,7 +1030,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
                break;
        }
 
-       mutex_unlock(&vcpu->kvm->lock);
+       mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
        return r;
 }
 
@@ -1106,7 +1123,7 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
             xhc->blob_size_32 || xhc->blob_size_64))
                return -EINVAL;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
 
        if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
                static_branch_inc(&kvm_xen_enabled.key);
@@ -1115,7 +1132,7 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
 
        memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
 
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
        return 0;
 }
 
@@ -1658,15 +1675,7 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
                mm_borrowed = true;
        }
 
-       /*
-        * For the irqfd workqueue, using the main kvm->lock mutex is
-        * fine since this function is invoked from kvm_set_irq() with
-        * no other lock held, no srcu. In future if it will be called
-        * directly from a vCPU thread (e.g. on hypercall for an IPI)
-        * then it may need to switch to using a leaf-node mutex for
-        * serializing the shared_info mapping.
-        */
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
 
        /*
         * It is theoretically possible for the page to be unmapped
@@ -1695,7 +1704,7 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
                srcu_read_unlock(&kvm->srcu, idx);
        } while(!rc);
 
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
 
        if (mm_borrowed)
                kthread_unuse_mm(kvm->mm);
@@ -1811,7 +1820,7 @@ static int kvm_xen_eventfd_update(struct kvm *kvm,
        int ret;
 
        /* Protect writes to evtchnfd as well as the idr lookup.  */
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
        evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port);
 
        ret = -ENOENT;
@@ -1842,7 +1851,7 @@ static int kvm_xen_eventfd_update(struct kvm *kvm,
        }
        ret = 0;
 out_unlock:
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
        return ret;
 }
 
@@ -1905,10 +1914,10 @@ static int kvm_xen_eventfd_assign(struct kvm *kvm,
                evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
        }
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
        ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1,
                        GFP_KERNEL);
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
        if (ret >= 0)
                return 0;
 
@@ -1926,9 +1935,9 @@ static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
 {
        struct evtchnfd *evtchnfd;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
        evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port);
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
 
        if (!evtchnfd)
                return -ENOENT;
@@ -1946,7 +1955,7 @@ static int kvm_xen_eventfd_reset(struct kvm *kvm)
        int i;
        int n = 0;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
 
        /*
         * Because synchronize_srcu() cannot be called inside the
@@ -1958,7 +1967,7 @@ static int kvm_xen_eventfd_reset(struct kvm *kvm)
 
        all_evtchnfds = kmalloc_array(n, sizeof(struct evtchnfd *), GFP_KERNEL);
        if (!all_evtchnfds) {
-               mutex_unlock(&kvm->lock);
+               mutex_unlock(&kvm->arch.xen.xen_lock);
                return -ENOMEM;
        }
 
@@ -1967,7 +1976,7 @@ static int kvm_xen_eventfd_reset(struct kvm *kvm)
                all_evtchnfds[n++] = evtchnfd;
                idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
        }
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
 
        synchronize_srcu(&kvm->srcu);
 
@@ -2069,6 +2078,7 @@ void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
 
 void kvm_xen_init_vm(struct kvm *kvm)
 {
+       mutex_init(&kvm->arch.xen.xen_lock);
        idr_init(&kvm->arch.xen.evtchn_ports);
        kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
 }
index d398735..cb258f5 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/pti.h>
 #include <asm/text-patching.h>
 #include <asm/memtype.h>
+#include <asm/paravirt.h>
 
 /*
  * We need to define the tracepoints somewhere, and tlb.c
@@ -804,6 +805,9 @@ void __init poking_init(void)
        poking_mm = mm_alloc();
        BUG_ON(!poking_mm);
 
+       /* Xen PV guests need the PGD to be pinned. */
+       paravirt_arch_dup_mmap(NULL, poking_mm);
+
        /*
         * Randomize the poking address, but make sure that the following page
         * will be mapped at the same PMD. We need 2 pages, so find space for 3,
index 46de9cf..fb4b1b5 100644 (file)
@@ -387,7 +387,8 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end,
                u8 mtrr_type, uniform;
 
                mtrr_type = mtrr_type_lookup(start, end, &uniform);
-               if (mtrr_type != MTRR_TYPE_WRBACK)
+               if (mtrr_type != MTRR_TYPE_WRBACK &&
+                   mtrr_type != MTRR_TYPE_INVALID)
                        return _PAGE_CACHE_MODE_UC_MINUS;
 
                return _PAGE_CACHE_MODE_WB;
index 758cbfe..4b3efaa 100644 (file)
@@ -12,6 +12,7 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/efi.h>
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/bitmap.h>
@@ -442,17 +443,42 @@ static bool is_acpi_reserved(u64 start, u64 end, enum e820_type not_used)
        return mcfg_res.flags;
 }
 
+static bool is_efi_mmio(u64 start, u64 end, enum e820_type not_used)
+{
+#ifdef CONFIG_EFI
+       efi_memory_desc_t *md;
+       u64 size, mmio_start, mmio_end;
+
+       for_each_efi_memory_desc(md) {
+               if (md->type == EFI_MEMORY_MAPPED_IO) {
+                       size = md->num_pages << EFI_PAGE_SHIFT;
+                       mmio_start = md->phys_addr;
+                       mmio_end = mmio_start + size;
+
+                       /*
+                        * N.B. Caller supplies (start, start + size),
+                        * so to match, mmio_end is the first address
+                        * *past* the EFI_MEMORY_MAPPED_IO area.
+                        */
+                       if (mmio_start <= start && end <= mmio_end)
+                               return true;
+               }
+       }
+#endif
+
+       return false;
+}
+
 typedef bool (*check_reserved_t)(u64 start, u64 end, enum e820_type type);
 
 static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
                                     struct pci_mmcfg_region *cfg,
-                                    struct device *dev, int with_e820)
+                                    struct device *dev, const char *method)
 {
        u64 addr = cfg->res.start;
        u64 size = resource_size(&cfg->res);
        u64 old_size = size;
        int num_buses;
-       char *method = with_e820 ? "E820" : "ACPI motherboard resources";
 
        while (!is_reserved(addr, addr + size, E820_TYPE_RESERVED)) {
                size >>= 1;
@@ -464,10 +490,10 @@ static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
                return false;
 
        if (dev)
-               dev_info(dev, "MMCONFIG at %pR reserved in %s\n",
+               dev_info(dev, "MMCONFIG at %pR reserved as %s\n",
                         &cfg->res, method);
        else
-               pr_info(PREFIX "MMCONFIG at %pR reserved in %s\n",
+               pr_info(PREFIX "MMCONFIG at %pR reserved as %s\n",
                       &cfg->res, method);
 
        if (old_size != size) {
@@ -500,7 +526,8 @@ static bool __ref
 pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int early)
 {
        if (!early && !acpi_disabled) {
-               if (is_mmconf_reserved(is_acpi_reserved, cfg, dev, 0))
+               if (is_mmconf_reserved(is_acpi_reserved, cfg, dev,
+                                      "ACPI motherboard resource"))
                        return true;
 
                if (dev)
@@ -513,6 +540,10 @@ pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int e
                               "MMCONFIG at %pR not reserved in "
                               "ACPI motherboard resources\n",
                               &cfg->res);
+
+               if (is_mmconf_reserved(is_efi_mmio, cfg, dev,
+                                      "EfiMemoryMappedIO"))
+                       return true;
        }
 
        /*
@@ -527,7 +558,8 @@ pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int e
        /* Don't try to do this check unless configuration
           type 1 is available. how about type 2 ?*/
        if (raw_pci_ops)
-               return is_mmconf_reserved(e820__mapped_all, cfg, dev, 1);
+               return is_mmconf_reserved(e820__mapped_all, cfg, dev,
+                                         "E820 entry");
 
        return false;
 }
index 48a3eb0..650cdbb 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/elf.h>
 
 
-Elf32_Half elf_core_extra_phdrs(void)
+Elf32_Half elf_core_extra_phdrs(struct coredump_params *cprm)
 {
        return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0;
 }
@@ -60,7 +60,7 @@ int elf_core_write_extra_data(struct coredump_params *cprm)
        return 1;
 }
 
-size_t elf_core_extra_data_size(void)
+size_t elf_core_extra_data_size(struct coredump_params *cprm)
 {
        if ( vsyscall_ehdr ) {
                const struct elfhdr *const ehdrp =
index 1b2829e..7d9b15f 100644 (file)
@@ -316,14 +316,12 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
 
 static void bfqg_get(struct bfq_group *bfqg)
 {
-       bfqg->ref++;
+       refcount_inc(&bfqg->ref);
 }
 
 static void bfqg_put(struct bfq_group *bfqg)
 {
-       bfqg->ref--;
-
-       if (bfqg->ref == 0)
+       if (refcount_dec_and_test(&bfqg->ref))
                kfree(bfqg);
 }
 
@@ -530,7 +528,7 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
        }
 
        /* see comments in bfq_bic_update_cgroup for why refcounting */
-       bfqg_get(bfqg);
+       refcount_set(&bfqg->ref, 1);
        return &bfqg->pd;
 }
 
index 41aa151..466e486 100644 (file)
@@ -928,7 +928,7 @@ struct bfq_group {
        char blkg_path[128];
 
        /* reference counter (see comments in bfq_bic_update_cgroup) */
-       int ref;
+       refcount_t ref;
        /* Is bfq_group still online? */
        bool online;
 
index ce6a2b7..4c94a65 100644 (file)
@@ -1455,6 +1455,10 @@ retry:
                list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
                        pol->pd_init_fn(blkg->pd[pol->plid]);
 
+       if (pol->pd_online_fn)
+               list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
+                       pol->pd_online_fn(blkg->pd[pol->plid]);
+
        __set_bit(pol->plid, q->blkcg_pols);
        ret = 0;
 
index 9321767..b509835 100644 (file)
@@ -283,12 +283,9 @@ static void blk_free_queue(struct request_queue *q)
  *
  * Decrements the refcount of the request_queue and free it when the refcount
  * reaches 0.
- *
- * Context: Can sleep.
  */
 void blk_put_queue(struct request_queue *q)
 {
-       might_sleep();
        if (refcount_dec_and_test(&q->refs))
                blk_free_queue(q);
 }
index 2c49b41..9d463f7 100644 (file)
@@ -2890,6 +2890,7 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
                struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
 {
        struct request *rq;
+       enum hctx_type type, hctx_type;
 
        if (!plug)
                return NULL;
@@ -2902,7 +2903,10 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
                return NULL;
        }
 
-       if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
+       type = blk_mq_get_hctx_type((*bio)->bi_opf);
+       hctx_type = rq->mq_hctx->type;
+       if (type != hctx_type &&
+           !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
                return NULL;
        if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
                return NULL;
index 08cf8a1..07373b3 100644 (file)
@@ -354,6 +354,9 @@ void spk_ttyio_release(struct spk_synth *in_synth)
 {
        struct tty_struct *tty = in_synth->dev;
 
+       if (tty == NULL)
+               return;
+
        tty_lock(tty);
 
        if (tty->ops->close)
index 204fe94..a194f30 100644 (file)
@@ -75,7 +75,8 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
 }
 
 #define FIND_CHILD_MIN_SCORE   1
-#define FIND_CHILD_MAX_SCORE   2
+#define FIND_CHILD_MID_SCORE   2
+#define FIND_CHILD_MAX_SCORE   3
 
 static int match_any(struct acpi_device *adev, void *not_used)
 {
@@ -96,8 +97,17 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
                return -ENODEV;
 
        status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
-       if (status == AE_NOT_FOUND)
+       if (status == AE_NOT_FOUND) {
+               /*
+                * Special case: backlight device objects without _STA are
+                * preferred to other objects with the same _ADR value, because
+                * it is more likely that they are actually useful.
+                */
+               if (adev->pnp.type.backlight)
+                       return FIND_CHILD_MID_SCORE;
+
                return FIND_CHILD_MIN_SCORE;
+       }
 
        if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
                return -ENODEV;
index 998101c..3d4c462 100644 (file)
@@ -236,6 +236,11 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
        efi_status_t status;
        struct prm_context_buffer context;
 
+       if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
+               pr_err_ratelimited("PRM: EFI runtime services no longer available\n");
+               return AE_NO_HANDLER;
+       }
+
        /*
         * The returned acpi_status will always be AE_OK. Error values will be
         * saved in the first byte of the PRM message buffer to be used by ASL.
@@ -325,6 +330,11 @@ void __init init_prmt(void)
 
        pr_info("PRM: found %u modules\n", mc);
 
+       if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
+               pr_err("PRM: EFI runtime services unavailable\n");
+               return;
+       }
+
        status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
                                                    ACPI_ADR_SPACE_PLATFORM_RT,
                                                    &acpi_platformrt_space_handler,
index 16dcd31..192d178 100644 (file)
@@ -433,6 +433,13 @@ static const struct dmi_system_id asus_laptop[] = {
                },
        },
        {
+               .ident = "Asus ExpertBook B2402CBA",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"),
+               },
+       },
+       {
                .ident = "Asus ExpertBook B2502",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
index 2743444..0c6f06a 100644 (file)
@@ -1370,9 +1370,12 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
                 * Some devices don't reliably have _HIDs & _CIDs, so add
                 * synthetic HIDs to make sure drivers can find them.
                 */
-               if (acpi_is_video_device(handle))
+               if (acpi_is_video_device(handle)) {
                        acpi_add_id(pnp, ACPI_VIDEO_HID);
-               else if (acpi_bay_match(handle))
+                       pnp->type.backlight = 1;
+                       break;
+               }
+               if (acpi_bay_match(handle))
                        acpi_add_id(pnp, ACPI_BAY_HID);
                else if (acpi_dock_match(handle))
                        acpi_add_id(pnp, ACPI_DOCK_HID);
index 1b78c74..65cec7b 100644 (file)
@@ -50,6 +50,10 @@ static void acpi_video_parse_cmdline(void)
                acpi_backlight_cmdline = acpi_backlight_video;
        if (!strcmp("native", acpi_video_backlight_string))
                acpi_backlight_cmdline = acpi_backlight_native;
+       if (!strcmp("nvidia_wmi_ec", acpi_video_backlight_string))
+               acpi_backlight_cmdline = acpi_backlight_nvidia_wmi_ec;
+       if (!strcmp("apple_gmux", acpi_video_backlight_string))
+               acpi_backlight_cmdline = acpi_backlight_apple_gmux;
        if (!strcmp("none", acpi_video_backlight_string))
                acpi_backlight_cmdline = acpi_backlight_none;
 }
@@ -513,6 +517,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
        },
        {
         .callback = video_detect_force_native,
+        /* Acer Aspire 4810T */
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 4810T"),
+               },
+       },
+       {
+        .callback = video_detect_force_native,
         /* Acer Aspire 5738z */
         .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
index eceaec3..9695c44 100644 (file)
@@ -640,6 +640,7 @@ config PATA_CS5530
 config PATA_CS5535
        tristate "CS5535 PATA support (Experimental)"
        depends on PCI && (X86_32 || (X86_64 && COMPILE_TEST))
+       depends on !UML
        help
          This option enables support for the NatSemi/AMD CS5535
          companion chip used with the Geode processor family.
index bbb3e49..083a957 100644 (file)
@@ -997,26 +997,32 @@ struct fwnode_handle *
 fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
                               struct fwnode_handle *prev)
 {
+       struct fwnode_handle *ep, *port_parent = NULL;
        const struct fwnode_handle *parent;
-       struct fwnode_handle *ep;
 
        /*
         * If this function is in a loop and the previous iteration returned
         * an endpoint from fwnode->secondary, then we need to use the secondary
         * as parent rather than @fwnode.
         */
-       if (prev)
-               parent = fwnode_graph_get_port_parent(prev);
-       else
+       if (prev) {
+               port_parent = fwnode_graph_get_port_parent(prev);
+               parent = port_parent;
+       } else {
                parent = fwnode;
+       }
        if (IS_ERR_OR_NULL(parent))
                return NULL;
 
        ep = fwnode_call_ptr_op(parent, graph_get_next_endpoint, prev);
        if (ep)
-               return ep;
+               goto out_put_port_parent;
+
+       ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL);
 
-       return fwnode_graph_get_next_endpoint(parent->secondary, NULL);
+out_put_port_parent:
+       fwnode_handle_put(port_parent);
+       return ep;
 }
 EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
 
index 4d1976c..929410d 100644 (file)
@@ -145,7 +145,7 @@ static int __init test_async_probe_init(void)
        calltime = ktime_get();
        for_each_online_cpu(cpu) {
                nid = cpu_to_node(cpu);
-               pdev = &sync_dev[sync_id];
+               pdev = &async_dev[async_id];
 
                *pdev = test_platform_device_register_node("test_async_driver",
                                                           async_id,
index 4cea3b0..2f1a925 100644 (file)
@@ -2400,6 +2400,8 @@ static void pkt_submit_bio(struct bio *bio)
        struct bio *split;
 
        bio = bio_split_to_limits(bio);
+       if (!bio)
+               return;
 
        pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
                (unsigned long long)bio->bi_iter.bi_sector,
index 78334da..5eb8c78 100644 (file)
@@ -1440,7 +1440,7 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
                goto out_alloc;
        }
 
-       ret = ida_alloc_max(&index_ida, 1 << (MINORBITS - RNBD_PART_BITS),
+       ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
                            GFP_KERNEL);
        if (ret < 0) {
                pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
index fcfc2e2..27f3890 100644 (file)
@@ -58,7 +58,7 @@
 #define PCI1760_CMD_CLR_IMB2           0x00    /* Clears IMB2 */
 #define PCI1760_CMD_SET_DO             0x01    /* Set output state */
 #define PCI1760_CMD_GET_DO             0x02    /* Read output status */
-#define PCI1760_CMD_GET_STATUS         0x03    /* Read current status */
+#define PCI1760_CMD_GET_STATUS         0x07    /* Read current status */
 #define PCI1760_CMD_GET_FW_VER         0x0e    /* Read firmware version */
 #define PCI1760_CMD_GET_HW_VER         0x0f    /* Read hardware version */
 #define PCI1760_CMD_SET_PWM_HI(x)      (0x10 + (x) * 2) /* Set "hi" period */
index 204e390..c17bd84 100644 (file)
@@ -307,6 +307,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
                max_perf = min_perf;
 
        amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
+       cpufreq_cpu_put(policy);
 }
 
 static int amd_get_min_freq(struct amd_cpudata *cpudata)
index d180128..c11d22f 100644 (file)
@@ -280,6 +280,7 @@ static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
        policy->cpuinfo.transition_latency = transition_latency;
        policy->dvfs_possible_from_any_cpu = true;
        policy->fast_switch_possible = true;
+       policy->suspend_freq = freq_table[0].frequency;
 
        if (policy_has_boost_freq(policy)) {
                ret = cpufreq_enable_boost_support();
@@ -321,7 +322,6 @@ static struct cpufreq_driver apple_soc_cpufreq_driver = {
        .flags          = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
                          CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_IS_COOLING_DEV,
        .verify         = cpufreq_generic_frequency_table_verify,
-       .attr           = cpufreq_generic_attr,
        .get            = apple_soc_cpufreq_get_rate,
        .init           = apple_soc_cpufreq_init,
        .exit           = apple_soc_cpufreq_exit,
@@ -329,6 +329,7 @@ static struct cpufreq_driver apple_soc_cpufreq_driver = {
        .fast_switch    = apple_soc_cpufreq_fast_switch,
        .register_em    = cpufreq_register_em_with_opp,
        .attr           = apple_soc_cpufreq_hw_attr,
+       .suspend        = cpufreq_generic_suspend,
 };
 
 static int __init apple_soc_cpufreq_module_init(void)
index c10fc33..b74289a 100644 (file)
@@ -445,7 +445,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
                return -ENODEV;
        }
 
-       clk = clk_get(cpu_dev, 0);
+       clk = clk_get(cpu_dev, NULL);
        if (IS_ERR(clk)) {
                dev_err(cpu_dev, "Cannot get clock for CPU0\n");
                return PTR_ERR(clk);
index 432dfb4..022e355 100644 (file)
@@ -487,7 +487,8 @@ static unsigned int get_perf_level_count(struct cpufreq_policy *policy)
        cpu_data = policy->driver_data;
        perf_caps = &cpu_data->perf_caps;
        max_cap = arch_scale_cpu_capacity(cpu);
-       min_cap = div_u64(max_cap * perf_caps->lowest_perf, perf_caps->highest_perf);
+       min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
+                         perf_caps->highest_perf);
        if ((min_cap == 0) || (max_cap < min_cap))
                return 0;
        return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP;
@@ -519,10 +520,10 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
        cpu_data = policy->driver_data;
        perf_caps = &cpu_data->perf_caps;
        max_cap = arch_scale_cpu_capacity(cpu_dev->id);
-       min_cap = div_u64(max_cap * perf_caps->lowest_perf,
-                       perf_caps->highest_perf);
-
-       perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
+       min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
+                         perf_caps->highest_perf);
+       perf_step = div_u64((u64)CPPC_EM_CAP_STEP * perf_caps->highest_perf,
+                           max_cap);
        min_step = min_cap / CPPC_EM_CAP_STEP;
        max_step = max_cap / CPPC_EM_CAP_STEP;
 
index 8ab6728..e857036 100644 (file)
@@ -137,6 +137,7 @@ static const struct of_device_id blocklist[] __initconst = {
        { .compatible = "nvidia,tegra30", },
        { .compatible = "nvidia,tegra124", },
        { .compatible = "nvidia,tegra210", },
+       { .compatible = "nvidia,tegra234", },
 
        { .compatible = "qcom,apq8096", },
        { .compatible = "qcom,msm8996", },
@@ -150,6 +151,7 @@ static const struct of_device_id blocklist[] __initconst = {
        { .compatible = "qcom,sdm845", },
        { .compatible = "qcom,sm6115", },
        { .compatible = "qcom,sm6350", },
+       { .compatible = "qcom,sm6375", },
        { .compatible = "qcom,sm8150", },
        { .compatible = "qcom,sm8250", },
        { .compatible = "qcom,sm8350", },
index 340fed3..9505a81 100644 (file)
@@ -649,9 +649,10 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
 {
        struct clk_hw_onecell_data *clk_data;
        struct device *dev = &pdev->dev;
+       struct device_node *soc_node;
        struct device *cpu_dev;
        struct clk *clk;
-       int ret, i, num_domains;
+       int ret, i, num_domains, reg_sz;
 
        clk = clk_get(dev, "xo");
        if (IS_ERR(clk))
@@ -679,7 +680,21 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
                return ret;
 
        /* Allocate qcom_cpufreq_data based on the available frequency domains in DT */
-       num_domains = of_property_count_elems_of_size(dev->of_node, "reg", sizeof(u32) * 4);
+       soc_node = of_get_parent(dev->of_node);
+       if (!soc_node)
+               return -EINVAL;
+
+       ret = of_property_read_u32(soc_node, "#address-cells", &reg_sz);
+       if (ret)
+               goto of_exit;
+
+       ret = of_property_read_u32(soc_node, "#size-cells", &i);
+       if (ret)
+               goto of_exit;
+
+       reg_sz += i;
+
+       num_domains = of_property_count_elems_of_size(dev->of_node, "reg", sizeof(u32) * reg_sz);
        if (num_domains <= 0)
                return num_domains;
 
@@ -743,6 +758,9 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
        else
                dev_dbg(dev, "QCOM CPUFreq HW driver initialized\n");
 
+of_exit:
+       of_node_put(soc_node);
+
        return ret;
 }
 
index c741b64..8a6e6b6 100644 (file)
@@ -451,7 +451,8 @@ static int dma_chan_get(struct dma_chan *chan)
        /* The channel is already in use, update client count */
        if (chan->client_count) {
                __module_get(owner);
-               goto out;
+               chan->client_count++;
+               return 0;
        }
 
        if (!try_module_get(owner))
@@ -470,11 +471,11 @@ static int dma_chan_get(struct dma_chan *chan)
                        goto err_out;
        }
 
+       chan->client_count++;
+
        if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
                balance_ref_count(chan);
 
-out:
-       chan->client_count++;
        return 0;
 
 err_out:
index a183d93..bf85aa0 100644 (file)
@@ -1018,6 +1018,11 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
 
        /* The bad descriptor currently is in the head of vc list */
        vd = vchan_next_desc(&chan->vc);
+       if (!vd) {
+               dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
+                       axi_chan_name(chan));
+               goto out;
+       }
        /* Remove the completed descriptor from issued list */
        list_del(&vd->node);
 
@@ -1032,6 +1037,7 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
        /* Try to restart the controller */
        axi_chan_start_first_queued(chan);
 
+out:
        spin_unlock_irqrestore(&chan->vc.lock, flags);
 }
 
index 06f5d37..29dbb0f 100644 (file)
@@ -1172,8 +1172,19 @@ static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
        spin_unlock(&ie->list_lock);
 
        list_for_each_entry_safe(desc, itr, &flist, list) {
+               struct dma_async_tx_descriptor *tx;
+
                list_del(&desc->list);
                ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
+               /*
+                * wq is being disabled. Any remaining descriptors are
+                * likely to be stuck and can be dropped. callback could
+                * point to code that is no longer accessible, for example
+                * if dmatest module has been unloaded.
+                */
+               tx = &desc->txd;
+               tx->callback = NULL;
+               tx->callback_result = NULL;
                idxd_dma_complete_txd(desc, ctype, true);
        }
 }
@@ -1390,8 +1401,7 @@ err_res_alloc:
 err_irq:
        idxd_wq_unmap_portal(wq);
 err_map_portal:
-       rc = idxd_wq_disable(wq, false);
-       if (rc < 0)
+       if (idxd_wq_disable(wq, false))
                dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
 err:
        return rc;
@@ -1408,11 +1418,11 @@ void drv_disable_wq(struct idxd_wq *wq)
                dev_warn(dev, "Clients has claim on wq %d: %d\n",
                         wq->id, idxd_wq_refcount(wq));
 
-       idxd_wq_free_resources(wq);
        idxd_wq_unmap_portal(wq);
        idxd_wq_drain(wq);
        idxd_wq_free_irq(wq);
        idxd_wq_reset(wq);
+       idxd_wq_free_resources(wq);
        percpu_ref_exit(&wq->wq_active);
        wq->type = IDXD_WQT_NONE;
        wq->client_count = 0;
index fbea5f6..b926abe 100644 (file)
@@ -1521,10 +1521,12 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
                sdma_config_ownership(sdmac, false, true, false);
 
        if (sdma_load_context(sdmac))
-               goto err_desc_out;
+               goto err_bd_out;
 
        return desc;
 
+err_bd_out:
+       sdma_free_bd(desc);
 err_desc_out:
        kfree(desc);
 err_out:
index 9b9184f..1709d15 100644 (file)
@@ -914,7 +914,7 @@ static void ldma_dev_init(struct ldma_dev *d)
        }
 }
 
-static int ldma_cfg_init(struct ldma_dev *d)
+static int ldma_parse_dt(struct ldma_dev *d)
 {
        struct fwnode_handle *fwnode = dev_fwnode(d->dev);
        struct ldma_port *p;
@@ -1661,10 +1661,6 @@ static int intel_ldma_probe(struct platform_device *pdev)
                p->ldev = d;
        }
 
-       ret = ldma_cfg_init(d);
-       if (ret)
-               return ret;
-
        dma_dev->dev = &pdev->dev;
 
        ch_mask = (unsigned long)d->channels_mask;
@@ -1675,6 +1671,10 @@ static int intel_ldma_probe(struct platform_device *pdev)
                        ldma_dma_init_v3X(j, d);
        }
 
+       ret = ldma_parse_dt(d);
+       if (ret)
+               return ret;
+
        dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources;
        dma_dev->device_free_chan_resources = ldma_free_chan_resources;
        dma_dev->device_terminate_all = ldma_terminate_all;
index 377da23..a2bf13f 100644 (file)
@@ -71,12 +71,13 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
        bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
        u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
        u32 tail;
+       unsigned long flags;
 
        if (soc) {
                desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
                desc->dw0 &= ~DWORD0_SOC;
        }
-       mutex_lock(&cmd_q->q_mutex);
+       spin_lock_irqsave(&cmd_q->q_lock, flags);
 
        /* Copy 32-byte command descriptor to hw queue. */
        memcpy(q_desc, desc, 32);
@@ -91,7 +92,7 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
 
        /* Turn the queue back on using our cached control register */
        pt_start_queue(cmd_q);
-       mutex_unlock(&cmd_q->q_mutex);
+       spin_unlock_irqrestore(&cmd_q->q_lock, flags);
 
        return 0;
 }
@@ -199,7 +200,7 @@ int pt_core_init(struct pt_device *pt)
 
        cmd_q->pt = pt;
        cmd_q->dma_pool = dma_pool;
-       mutex_init(&cmd_q->q_mutex);
+       spin_lock_init(&cmd_q->q_lock);
 
        /* Page alignment satisfies our needs for N <= 128 */
        cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
index d093c43..21b4bf8 100644 (file)
@@ -196,7 +196,7 @@ struct pt_cmd_queue {
        struct ptdma_desc *qbase;
 
        /* Aligned queue start address (per requirement) */
-       struct mutex q_mutex ____cacheline_aligned;
+       spinlock_t q_lock ____cacheline_aligned;
        unsigned int qidx;
 
        unsigned int qsize;
index 061add8..59a36cb 100644 (file)
@@ -1756,6 +1756,7 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
                tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
                if (spi->cmd == SPI_RX) {
                        tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
+                       tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
                } else if (spi->cmd == SPI_TX) {
                        tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
                } else { /* SPI_DUPLEX */
index 1d1180d..8f67f45 100644 (file)
@@ -711,6 +711,7 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
                        return err;
                }
 
+               vchan_terminate_vdesc(&tdc->dma_desc->vd);
                tegra_dma_disable(tdc);
                tdc->dma_desc = NULL;
        }
index ae39b52..79da93c 100644 (file)
@@ -221,7 +221,7 @@ static int tegra_adma_init(struct tegra_adma *tdma)
        int ret;
 
        /* Clear any interrupts */
-       tdma_write(tdma, tdma->cdata->global_int_clear, 0x1);
+       tdma_write(tdma, tdma->cdata->ch_base_offset + tdma->cdata->global_int_clear, 0x1);
 
        /* Assert soft reset */
        tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
index ce8b80b..4c62274 100644 (file)
@@ -762,11 +762,12 @@ static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
        if (uc->desc->dir == DMA_DEV_TO_MEM) {
                udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
                udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
-               udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+               if (uc->config.ep_type != PSIL_EP_NATIVE)
+                       udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
        } else {
                udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
                udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
-               if (!uc->bchan)
+               if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
                        udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
        }
 }
index a8d23cd..ac09f0e 100644 (file)
@@ -3143,8 +3143,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
        /* Initialize the channels */
        for_each_child_of_node(node, child) {
                err = xilinx_dma_child_probe(xdev, child);
-               if (err < 0)
+               if (err < 0) {
+                       of_node_put(child);
                        goto error;
+               }
        }
 
        if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
index 19522c5..0689e15 100644 (file)
@@ -34,6 +34,9 @@
 static DEFINE_MUTEX(device_ctls_mutex);
 static LIST_HEAD(edac_device_list);
 
+/* Default workqueue processing interval on this instance, in msecs */
+#define DEFAULT_POLL_INTERVAL 1000
+
 #ifdef CONFIG_EDAC_DEBUG
 static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
 {
@@ -336,7 +339,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
         * whole one second to save timers firing all over the period
         * between integral seconds
         */
-       if (edac_dev->poll_msec == 1000)
+       if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
                edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
        else
                edac_queue_work(&edac_dev->work, edac_dev->delay);
@@ -366,7 +369,7 @@ static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
         * timers firing on sub-second basis, while they are happy
         * to fire together on the 1 second exactly
         */
-       if (edac_dev->poll_msec == 1000)
+       if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
                edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
        else
                edac_queue_work(&edac_dev->work, edac_dev->delay);
@@ -394,17 +397,16 @@ static void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
  *     Then restart the workq on the new delay
  */
 void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
-                                       unsigned long value)
+                                   unsigned long msec)
 {
-       unsigned long jiffs = msecs_to_jiffies(value);
-
-       if (value == 1000)
-               jiffs = round_jiffies_relative(value);
-
-       edac_dev->poll_msec = value;
-       edac_dev->delay     = jiffs;
+       edac_dev->poll_msec = msec;
+       edac_dev->delay     = msecs_to_jiffies(msec);
 
-       edac_mod_work(&edac_dev->work, jiffs);
+       /* See comment in edac_device_workq_setup() above */
+       if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+               edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+       else
+               edac_mod_work(&edac_dev->work, edac_dev->delay);
 }
 
 int edac_device_alloc_index(void)
@@ -443,11 +445,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
                /* This instance is NOW RUNNING */
                edac_dev->op_state = OP_RUNNING_POLL;
 
-               /*
-                * enable workq processing on this instance,
-                * default = 1000 msec
-                */
-               edac_device_workq_setup(edac_dev, 1000);
+               edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
        } else {
                edac_dev->op_state = OP_RUNNING_INTERRUPT;
        }
index 763c076..47593af 100644 (file)
@@ -53,7 +53,7 @@ bool edac_stop_work(struct delayed_work *work);
 bool edac_mod_work(struct delayed_work *work, unsigned long delay);
 
 extern void edac_device_reset_delay_period(struct edac_device_ctl_info
-                                          *edac_dev, unsigned long value);
+                                          *edac_dev, unsigned long msec);
 extern void edac_mc_reset_delay_period(unsigned long value);
 
 /*
index 61b76ec..19fba25 100644 (file)
@@ -174,8 +174,10 @@ static int highbank_mc_probe(struct platform_device *pdev)
        drvdata = mci->pvt_info;
        platform_set_drvdata(pdev, mci);
 
-       if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
-               return -ENOMEM;
+       if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+               res = -ENOMEM;
+               goto free;
+       }
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!r) {
@@ -243,6 +245,7 @@ err2:
        edac_mc_del_mc(&pdev->dev);
 err:
        devres_release_group(&pdev->dev, NULL);
+free:
        edac_mc_free(mci);
        return res;
 }
index 97a27e4..c45519f 100644 (file)
@@ -252,7 +252,7 @@ clear:
 static int
 dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
 {
-       struct llcc_drv_data *drv = edev_ctl->pvt_info;
+       struct llcc_drv_data *drv = edev_ctl->dev->platform_data;
        int ret;
 
        ret = dump_syn_reg_values(drv, bank, err_type);
@@ -289,7 +289,7 @@ static irqreturn_t
 llcc_ecc_irq_handler(int irq, void *edev_ctl)
 {
        struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
-       struct llcc_drv_data *drv = edac_dev_ctl->pvt_info;
+       struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
        irqreturn_t irq_rc = IRQ_NONE;
        u32 drp_error, trp_error, i;
        int ret;
@@ -358,7 +358,6 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
        edev_ctl->dev_name = dev_name(dev);
        edev_ctl->ctl_name = "llcc";
        edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
-       edev_ctl->pvt_info = llcc_driv_data;
 
        rc = edac_device_add_device(edev_ctl);
        if (rc)
index f818d00..ffdad59 100644 (file)
@@ -910,6 +910,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
                              xfer->hdr.protocol_id, xfer->hdr.seq,
                              xfer->hdr.poll_completion);
 
+       /* Clear any stale status */
+       xfer->hdr.status = SCMI_SUCCESS;
        xfer->state = SCMI_XFER_SENT_OK;
        /*
         * Even though spinlocking is not needed here since no race is possible
index 1dfe534..87b4f4d 100644 (file)
@@ -81,10 +81,11 @@ u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
 void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
                          struct scmi_xfer *xfer)
 {
+       size_t len = ioread32(&shmem->length);
+
        xfer->hdr.status = ioread32(shmem->msg_payload);
        /* Skip the length of header and status in shmem area i.e 8 bytes */
-       xfer->rx.len = min_t(size_t, xfer->rx.len,
-                            ioread32(&shmem->length) - 8);
+       xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
 
        /* Take a copy to the rx buffer.. */
        memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
@@ -93,8 +94,10 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
 void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
                              size_t max_len, struct scmi_xfer *xfer)
 {
+       size_t len = ioread32(&shmem->length);
+
        /* Skip only the length of header in shmem area i.e 4 bytes */
-       xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4);
+       xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
 
        /* Take a copy to the rx buffer.. */
        memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
index 33c9b81..1db975c 100644 (file)
@@ -160,7 +160,6 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
        }
 
        vioch->shutdown_done = &vioch_shutdown_done;
-       virtio_break_device(vioch->vqueue->vdev);
        if (!vioch->is_rx && vioch->deferred_tx_wq)
                /* Cannot be kicked anymore after this...*/
                vioch->deferred_tx_wq = NULL;
@@ -482,6 +481,12 @@ static int virtio_chan_free(int id, void *p, void *data)
        struct scmi_chan_info *cinfo = p;
        struct scmi_vio_channel *vioch = cinfo->transport_info;
 
+       /*
+        * Break device to inhibit further traffic flowing while shutting down
+        * the channels: doing it later holding vioch->lock creates unsafe
+        * locking dependency chains as reported by LOCKDEP.
+        */
+       virtio_break_device(vioch->vqueue->vdev);
        scmi_vio_channel_cleanup_sync(vioch);
 
        scmi_free_channel(cinfo, data, id);
index 09716ee..a2b0cbc 100644 (file)
@@ -394,8 +394,8 @@ static int __init efisubsys_init(void)
        efi_kobj = kobject_create_and_add("efi", firmware_kobj);
        if (!efi_kobj) {
                pr_err("efi: Firmware registration failed.\n");
-               destroy_workqueue(efi_rts_wq);
-               return -ENOMEM;
+               error = -ENOMEM;
+               goto err_destroy_wq;
        }
 
        if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
@@ -443,7 +443,10 @@ err_unregister:
 err_put:
        kobject_put(efi_kobj);
        efi_kobj = NULL;
-       destroy_workqueue(efi_rts_wq);
+err_destroy_wq:
+       if (efi_rts_wq)
+               destroy_workqueue(efi_rts_wq);
+
        return error;
 }
 
index 7feee3d..1fba4e0 100644 (file)
@@ -62,6 +62,7 @@ struct efi_runtime_work efi_rts_work;
                                                                        \
        if (!efi_enabled(EFI_RUNTIME_SERVICES)) {                       \
                pr_warn_once("EFI Runtime Services are disabled!\n");   \
+               efi_rts_work.status = EFI_DEVICE_ERROR;                 \
                goto exit;                                              \
        }                                                               \
                                                                        \
index 2652c39..33ae947 100644 (file)
@@ -93,14 +93,19 @@ static int coreboot_table_populate(struct device *dev, void *ptr)
        for (i = 0; i < header->table_entries; i++) {
                entry = ptr_entry;
 
-               device = kzalloc(sizeof(struct device) + entry->size, GFP_KERNEL);
+               if (entry->size < sizeof(*entry)) {
+                       dev_warn(dev, "coreboot table entry too small!\n");
+                       return -EINVAL;
+               }
+
+               device = kzalloc(sizeof(device->dev) + entry->size, GFP_KERNEL);
                if (!device)
                        return -ENOMEM;
 
                device->dev.parent = dev;
                device->dev.bus = &coreboot_bus_type;
                device->dev.release = coreboot_device_release;
-               memcpy(&device->entry, ptr_entry, entry->size);
+               memcpy(device->raw, ptr_entry, entry->size);
 
                switch (device->entry.tag) {
                case LB_TAG_CBMEM_ENTRY:
index 37f4d33..d814dca 100644 (file)
@@ -79,6 +79,7 @@ struct coreboot_device {
                struct lb_cbmem_ref cbmem_ref;
                struct lb_cbmem_entry cbmem_entry;
                struct lb_framebuffer framebuffer;
+               DECLARE_FLEX_ARRAY(u8, raw);
        };
 };
 
index 4e2575d..871bedf 100644 (file)
@@ -361,9 +361,10 @@ static efi_status_t gsmi_get_variable(efi_char16_t *name,
                memcpy(data, gsmi_dev.data_buf->start, *data_size);
 
                /* All variables are have the following attributes */
-               *attr = EFI_VARIABLE_NON_VOLATILE |
-                       EFI_VARIABLE_BOOTSERVICE_ACCESS |
-                       EFI_VARIABLE_RUNTIME_ACCESS;
+               if (attr)
+                       *attr = EFI_VARIABLE_NON_VOLATILE |
+                               EFI_VARIABLE_BOOTSERVICE_ACCESS |
+                               EFI_VARIABLE_RUNTIME_ACCESS;
        }
 
        spin_unlock_irqrestore(&gsmi_dev.lock, flags);
index e7bcfca..447ee4e 100644 (file)
@@ -440,6 +440,9 @@ static const struct file_operations psci_debugfs_ops = {
 
 static int __init psci_debugfs_init(void)
 {
+       if (!invoke_psci_fn || !psci_ops.get_version)
+               return 0;
+
        return PTR_ERR_OR_ZERO(debugfs_create_file("psci", 0444, NULL, NULL,
                                                   &psci_debugfs_ops));
 }
index d5626c5..6f673b2 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/spinlock.h>
 #include <linux/syscore_ops.h>
 #include <linux/gpio/driver.h>
 #include <linux/of.h>
@@ -159,6 +160,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
 {
        struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
        struct mxc_gpio_port *port = gc->private;
+       unsigned long flags;
        u32 bit, val;
        u32 gpio_idx = d->hwirq;
        int edge;
@@ -197,6 +199,8 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
                return -EINVAL;
        }
 
+       raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
+
        if (GPIO_EDGE_SEL >= 0) {
                val = readl(port->base + GPIO_EDGE_SEL);
                if (edge == GPIO_INT_BOTH_EDGES)
@@ -217,15 +221,20 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
        writel(1 << gpio_idx, port->base + GPIO_ISR);
        port->pad_type[gpio_idx] = type;
 
-       return 0;
+       raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
+
+       return port->gc.direction_input(&port->gc, gpio_idx);
 }
 
 static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
 {
        void __iomem *reg = port->base;
+       unsigned long flags;
        u32 bit, val;
        int edge;
 
+       raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
+
        reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
        bit = gpio & 0xf;
        val = readl(reg);
@@ -243,6 +252,8 @@ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
                return;
        }
        writel(val | (edge << (bit << 1)), reg);
+
+       raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
 }
 
 /* handle 32 interrupts in one status register */
index bed0380..9ef0f56 100644 (file)
@@ -385,7 +385,7 @@ err:
 }
 
 static bool acpi_gpio_irq_is_wake(struct device *parent,
-                                 struct acpi_resource_gpio *agpio)
+                                 const struct acpi_resource_gpio *agpio)
 {
        unsigned int pin = agpio->pin_table[0];
 
@@ -778,7 +778,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
                lookup->info.pin_config = agpio->pin_config;
                lookup->info.debounce = agpio->debounce_timeout;
                lookup->info.gpioint = gpioint;
-               lookup->info.wake_capable = agpio->wake_capable == ACPI_WAKE_CAPABLE;
+               lookup->info.wake_capable = acpi_gpio_irq_is_wake(&lookup->info.adev->dev, agpio);
 
                /*
                 * Polarity and triggering are only specified for GpioInt
@@ -1623,6 +1623,19 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
                        .ignore_interrupt = "AMDI0030:00@18",
                },
        },
+       {
+               /*
+                * Spurious wakeups from TP_ATTN# pin
+                * Found in BIOS 1.7.8
+                * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
+                */
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+               },
+               .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+                       .ignore_wake = "ELAN0415:00@9",
+               },
+       },
        {} /* Terminating entry */
 };
 
index b15091d..3b5c537 100644 (file)
@@ -2099,7 +2099,7 @@ int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_b
        }
 
        amdgpu_amdkfd_remove_eviction_fence(
-               bo, bo->kfd_bo->process_info->eviction_fence);
+               bo, bo->vm_bo->vm->process_info->eviction_fence);
 
        amdgpu_bo_unreserve(bo);
 
index 8516c81..7b5ce00 100644 (file)
@@ -61,6 +61,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
                amdgpu_ctx_put(p->ctx);
                return -ECANCELED;
        }
+
+       amdgpu_sync_create(&p->sync);
        return 0;
 }
 
@@ -452,18 +454,6 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
        }
 
        r = amdgpu_sync_fence(&p->sync, fence);
-       if (r)
-               goto error;
-
-       /*
-        * When we have an explicit dependency it might be necessary to insert a
-        * pipeline sync to make sure that all caches etc are flushed and the
-        * next job actually sees the results from the previous one.
-        */
-       if (fence->context == p->gang_leader->base.entity->fence_context)
-               r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
-
-error:
        dma_fence_put(fence);
        return r;
 }
@@ -1188,10 +1178,19 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
 {
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+       struct drm_gpu_scheduler *sched;
        struct amdgpu_bo_list_entry *e;
+       struct dma_fence *fence;
        unsigned int i;
        int r;
 
+       r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
+       if (r) {
+               if (r != -ERESTARTSYS)
+                       DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
+               return r;
+       }
+
        list_for_each_entry(e, &p->validated, tv.head) {
                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
                struct dma_resv *resv = bo->tbo.base.resv;
@@ -1211,10 +1210,24 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
                        return r;
        }
 
-       r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
-       if (r && r != -ERESTARTSYS)
-               DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
-       return r;
+       sched = p->gang_leader->base.entity->rq->sched;
+       while ((fence = amdgpu_sync_get_fence(&p->sync))) {
+               struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
+
+               /*
+                * When we have an dependency it might be necessary to insert a
+                * pipeline sync to make sure that all caches etc are flushed and the
+                * next job actually sees the results from the previous one
+                * before we start executing on the same scheduler ring.
+                */
+               if (!s_fence || s_fence->sched != sched)
+                       continue;
+
+               r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
+               if (r)
+                       return r;
+       }
+       return 0;
 }
 
 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
@@ -1254,9 +1267,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
                        continue;
 
                fence = &p->jobs[i]->base.s_fence->scheduled;
+               dma_fence_get(fence);
                r = drm_sched_job_add_dependency(&leader->base, fence);
-               if (r)
+               if (r) {
+                       dma_fence_put(fence);
                        goto error_cleanup;
+               }
        }
 
        if (p->gang_size > 1) {
@@ -1344,6 +1360,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
 {
        unsigned i;
 
+       amdgpu_sync_free(&parser->sync);
        for (i = 0; i < parser->num_post_deps; i++) {
                drm_syncobj_put(parser->post_deps[i].syncobj);
                kfree(parser->post_deps[i].chain);
index afe6af9..2f28a8c 100644 (file)
@@ -36,6 +36,7 @@
 #include <generated/utsrelease.h>
 #include <linux/pci-p2pdma.h>
 
+#include <drm/drm_aperture.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_probe_helper.h>
@@ -90,6 +91,8 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
 #define AMDGPU_MAX_RETRY_LIMIT         2
 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
 
+static const struct drm_driver amdgpu_kms_driver;
+
 const char *amdgpu_asic_name[] = {
        "TAHITI",
        "PITCAIRN",
@@ -3687,6 +3690,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        if (r)
                return r;
 
+       /* Get rid of things like offb */
+       r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
+       if (r)
+               return r;
+
        /* Enable TMZ based on IP_VERSION */
        amdgpu_gmc_tmz_set(adev);
 
index 1353ffd..cd4caaa 100644 (file)
@@ -23,7 +23,6 @@
  */
 
 #include <drm/amdgpu_drm.h>
-#include <drm/drm_aperture.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_fbdev_generic.h>
 #include <drm/drm_gem.h>
@@ -2122,11 +2121,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
        }
 #endif
 
-       /* Get rid of things like offb */
-       ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver);
-       if (ret)
-               return ret;
-
        adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
        if (IS_ERR(adev))
                return PTR_ERR(adev);
index 23692e5..3380daf 100644 (file)
@@ -156,6 +156,9 @@ static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
                return amdgpu_compute_multipipe == 1;
        }
 
+       if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
+               return true;
+
        /* FIXME: spreading the queues across pipes causes perf regressions
         * on POLARIS11 compute workloads */
        if (adev->asic_type == CHIP_POLARIS11)
index fcb711a..3f07b1a 100644 (file)
@@ -497,6 +497,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
            !--id_mgr->reserved_use_count) {
                /* give the reserved ID back to normal round robin */
                list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
+               id_mgr->reserved = NULL;
        }
        vm->reserved_vmid[vmhub] = false;
        mutex_unlock(&id_mgr->lock);
index 9e54992..c3d9d75 100644 (file)
@@ -161,8 +161,14 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
        struct dma_fence *f;
        unsigned i;
 
-       /* use sched fence if available */
-       f = job->base.s_fence ? &job->base.s_fence->finished :  &job->hw_fence;
+       /* Check if any fences where initialized */
+       if (job->base.s_fence && job->base.s_fence->finished.ops)
+               f = &job->base.s_fence->finished;
+       else if (job->hw_fence.ops)
+               f = &job->hw_fence;
+       else
+               f = NULL;
+
        for (i = 0; i < job->num_ibs; ++i)
                amdgpu_ib_free(ring->adev, &job->ibs[i], f);
 }
index 4e684c2..25a68d8 100644 (file)
@@ -470,8 +470,9 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
        return true;
 
 fail:
-       DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
-                 man->size);
+       if (man)
+               DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
+                         man->size);
        return false;
 }
 
index bac7976..dcd8c06 100644 (file)
@@ -391,8 +391,10 @@ int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job)
 
                dma_fence_get(f);
                r = drm_sched_job_add_dependency(&job->base, f);
-               if (r)
+               if (r) {
+                       dma_fence_put(f);
                        return r;
+               }
        }
        return 0;
 }
index faa1214..9fa1d81 100644 (file)
@@ -882,7 +882,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
                kfree(rsv);
 
        list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
-               drm_buddy_free_list(&mgr->mm, &rsv->blocks);
+               drm_buddy_free_list(&mgr->mm, &rsv->allocated);
                kfree(rsv);
        }
        drm_buddy_fini(&mgr->mm);
index a56c6e1..b9b57a6 100644 (file)
@@ -1287,10 +1287,8 @@ static int gfx_v11_0_sw_init(void *handle)
 
        switch (adev->ip_versions[GC_HWIP][0]) {
        case IP_VERSION(11, 0, 0):
-       case IP_VERSION(11, 0, 1):
        case IP_VERSION(11, 0, 2):
        case IP_VERSION(11, 0, 3):
-       case IP_VERSION(11, 0, 4):
                adev->gfx.me.num_me = 1;
                adev->gfx.me.num_pipe_per_me = 1;
                adev->gfx.me.num_queue_per_pipe = 1;
@@ -1298,6 +1296,15 @@ static int gfx_v11_0_sw_init(void *handle)
                adev->gfx.mec.num_pipe_per_mec = 4;
                adev->gfx.mec.num_queue_per_pipe = 4;
                break;
+       case IP_VERSION(11, 0, 1):
+       case IP_VERSION(11, 0, 4):
+               adev->gfx.me.num_me = 1;
+               adev->gfx.me.num_pipe_per_me = 1;
+               adev->gfx.me.num_queue_per_pipe = 1;
+               adev->gfx.mec.num_mec = 1;
+               adev->gfx.mec.num_pipe_per_mec = 4;
+               adev->gfx.mec.num_queue_per_pipe = 4;
+               break;
        default:
                adev->gfx.me.num_me = 1;
                adev->gfx.me.num_pipe_per_me = 1;
index ecb4c3a..c06ada0 100644 (file)
@@ -200,7 +200,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
        queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
 
        if (q->wptr_bo) {
-               wptr_addr_off = (uint64_t)q->properties.write_ptr - (uint64_t)q->wptr_bo->kfd_bo->va;
+               wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
                queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
        }
 
index 814f998..b94d2c1 100644 (file)
@@ -570,6 +570,15 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
                goto reserve_bo_failed;
        }
 
+       if (clear) {
+               r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
+               if (r) {
+                       pr_debug("failed %d to sync bo\n", r);
+                       amdgpu_bo_unreserve(bo);
+                       goto reserve_bo_failed;
+               }
+       }
+
        r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
        if (r) {
                pr_debug("failed %d to reserve bo\n", r);
index 1b7f20a..4d42033 100644 (file)
@@ -1503,8 +1503,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                case IP_VERSION(3, 0, 1):
                case IP_VERSION(3, 1, 2):
                case IP_VERSION(3, 1, 3):
-               case IP_VERSION(3, 1, 4):
-               case IP_VERSION(3, 1, 5):
                case IP_VERSION(3, 1, 6):
                        init_data.flags.gpu_vm_support = true;
                        break;
@@ -1730,10 +1728,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
                adev->dm.vblank_control_workqueue = NULL;
        }
 
-       for (i = 0; i < adev->dm.display_indexes_num; i++) {
-               drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
-       }
-
        amdgpu_dm_destroy_drm_device(&adev->dm);
 
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
@@ -5311,8 +5305,6 @@ static void fill_stream_properties_from_drm_display_mode(
 
        timing_out->aspect_ratio = get_aspect_ratio(mode_in);
 
-       stream->output_color_space = get_output_color_space(timing_out);
-
        stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
        stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
        if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
@@ -5323,6 +5315,8 @@ static void fill_stream_properties_from_drm_display_mode(
                        adjust_colour_depth_from_display_info(timing_out, info);
                }
        }
+
+       stream->output_color_space = get_output_color_space(timing_out);
 }
 
 static void fill_audio_info(struct audio_info *audio_info,
@@ -9530,8 +9524,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        goto fail;
                }
 
-               if (dm_old_con_state->abm_level !=
-                   dm_new_con_state->abm_level)
+               if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
+                   dm_old_con_state->scaling != dm_new_con_state->scaling)
                        new_crtc_state->connectors_changed = true;
        }
 
index 1edf738..d7a044e 100644 (file)
@@ -468,7 +468,6 @@ static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs
 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
 {
        drm_encoder_cleanup(encoder);
-       kfree(encoder);
 }
 
 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
index 471078f..652270a 100644 (file)
@@ -90,8 +90,8 @@ static const struct out_csc_color_matrix_type output_csc_matrix[] = {
                { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
                                0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
        { COLOR_SPACE_YCBCR2020_TYPE,
-               { 0x1000, 0xF149, 0xFEB7, 0x0000, 0x0868, 0x15B2,
-                               0x01E6, 0x0000, 0xFB88, 0xF478, 0x1000, 0x0000} },
+               { 0x1000, 0xF149, 0xFEB7, 0x1004, 0x0868, 0x15B2,
+                               0x01E6, 0x201, 0xFB88, 0xF478, 0x1000, 0x1004} },
        { COLOR_SPACE_YCBCR709_BLACK_TYPE,
                { 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000,
                                0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} },
index 85e2221..5cdc071 100644 (file)
@@ -1171,6 +1171,7 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
        int ret = 0;
        uint32_t apu_percent = 0;
        uint32_t dgpu_percent = 0;
+       struct amdgpu_device *adev = smu->adev;
 
 
        ret = smu_cmn_get_metrics_table(smu,
@@ -1196,7 +1197,11 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
                *value = metrics->AverageUvdActivity / 100;
                break;
        case METRICS_AVERAGE_SOCKETPOWER:
-               *value = (metrics->CurrentSocketPower << 8) / 1000;
+               if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) && (adev->pm.fw_version >= 0x40000f)) ||
+               ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0)) && (adev->pm.fw_version >= 0x373200)))
+                       *value = metrics->CurrentSocketPower << 8;
+               else
+                       *value = (metrics->CurrentSocketPower << 8) / 1000;
                break;
        case METRICS_TEMPERATURE_EDGE:
                *value = (metrics->GfxTemperature / 100) *
index e54b760..b4373b6 100644 (file)
@@ -1261,7 +1261,8 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
                                uint32_t speed)
 {
        struct amdgpu_device *adev = smu->adev;
-       uint32_t tach_period, crystal_clock_freq;
+       uint32_t crystal_clock_freq = 2500;
+       uint32_t tach_period;
        int ret;
 
        if (!speed)
@@ -1271,7 +1272,6 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
        if (ret)
                return ret;
 
-       crystal_clock_freq = amdgpu_asic_get_xclk(adev);
        tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
        WREG32_SOC15(THM, 0, regCG_TACH_CTRL,
                     REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL),
@@ -2298,6 +2298,10 @@ bool smu_v13_0_baco_is_support(struct smu_context *smu)
            !smu_baco->platform_support)
                return false;
 
+       /* return true if ASIC is in BACO state already */
+       if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
+               return true;
+
        if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
            !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
                return false;
index 9643b21..4c20d17 100644 (file)
@@ -213,6 +213,7 @@ static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] =
        FEA_MAP(SOC_PCC),
        [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
        [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+       [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
 };
 
 static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
index 5c6c6ad..e87db7e 100644 (file)
@@ -192,6 +192,7 @@ static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] =
        FEA_MAP(SOC_PCC),
        [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
        [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+       [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
 };
 
 static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
index 11bb593..3d1f50f 100644 (file)
@@ -38,6 +38,25 @@ static void drm_block_free(struct drm_buddy *mm,
        kmem_cache_free(slab_blocks, block);
 }
 
+static void list_insert_sorted(struct drm_buddy *mm,
+                              struct drm_buddy_block *block)
+{
+       struct drm_buddy_block *node;
+       struct list_head *head;
+
+       head = &mm->free_list[drm_buddy_block_order(block)];
+       if (list_empty(head)) {
+               list_add(&block->link, head);
+               return;
+       }
+
+       list_for_each_entry(node, head, link)
+               if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
+                       break;
+
+       __list_add(&block->link, node->link.prev, &node->link);
+}
+
 static void mark_allocated(struct drm_buddy_block *block)
 {
        block->header &= ~DRM_BUDDY_HEADER_STATE;
@@ -52,8 +71,7 @@ static void mark_free(struct drm_buddy *mm,
        block->header &= ~DRM_BUDDY_HEADER_STATE;
        block->header |= DRM_BUDDY_FREE;
 
-       list_add(&block->link,
-                &mm->free_list[drm_buddy_block_order(block)]);
+       list_insert_sorted(mm, block);
 }
 
 static void mark_split(struct drm_buddy_block *block)
@@ -387,20 +405,26 @@ err_undo:
 }
 
 static struct drm_buddy_block *
-get_maxblock(struct list_head *head)
+get_maxblock(struct drm_buddy *mm, unsigned int order)
 {
        struct drm_buddy_block *max_block = NULL, *node;
+       unsigned int i;
 
-       max_block = list_first_entry_or_null(head,
-                                            struct drm_buddy_block,
-                                            link);
-       if (!max_block)
-               return NULL;
+       for (i = order; i <= mm->max_order; ++i) {
+               if (!list_empty(&mm->free_list[i])) {
+                       node = list_last_entry(&mm->free_list[i],
+                                              struct drm_buddy_block,
+                                              link);
+                       if (!max_block) {
+                               max_block = node;
+                               continue;
+                       }
 
-       list_for_each_entry(node, head, link) {
-               if (drm_buddy_block_offset(node) >
-                   drm_buddy_block_offset(max_block))
-                       max_block = node;
+                       if (drm_buddy_block_offset(node) >
+                           drm_buddy_block_offset(max_block)) {
+                               max_block = node;
+                       }
+               }
        }
 
        return max_block;
@@ -412,20 +436,23 @@ alloc_from_freelist(struct drm_buddy *mm,
                    unsigned long flags)
 {
        struct drm_buddy_block *block = NULL;
-       unsigned int i;
+       unsigned int tmp;
        int err;
 
-       for (i = order; i <= mm->max_order; ++i) {
-               if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
-                       block = get_maxblock(&mm->free_list[i]);
-                       if (block)
-                               break;
-               } else {
-                       block = list_first_entry_or_null(&mm->free_list[i],
-                                                        struct drm_buddy_block,
-                                                        link);
-                       if (block)
-                               break;
+       if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
+               block = get_maxblock(mm, order);
+               if (block)
+                       /* Store the obtained block order */
+                       tmp = drm_buddy_block_order(block);
+       } else {
+               for (tmp = order; tmp <= mm->max_order; ++tmp) {
+                       if (!list_empty(&mm->free_list[tmp])) {
+                               block = list_last_entry(&mm->free_list[tmp],
+                                                       struct drm_buddy_block,
+                                                       link);
+                               if (block)
+                                       break;
+                       }
                }
        }
 
@@ -434,18 +461,18 @@ alloc_from_freelist(struct drm_buddy *mm,
 
        BUG_ON(!drm_buddy_block_is_free(block));
 
-       while (i != order) {
+       while (tmp != order) {
                err = split_block(mm, block);
                if (unlikely(err))
                        goto err_undo;
 
                block = block->right;
-               i--;
+               tmp--;
        }
        return block;
 
 err_undo:
-       if (i != order)
+       if (tmp != order)
                __drm_buddy_free(mm, block);
        return ERR_PTR(err);
 }
index b3a731b..0d0c26e 100644 (file)
@@ -30,7 +30,9 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/console.h>
+#include <linux/pci.h>
 #include <linux/sysrq.h>
+#include <linux/vga_switcheroo.h>
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_drv.h>
@@ -1909,6 +1911,11 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
                return ret;
 
        strcpy(fb_helper->fb->comm, "[fbcon]");
+
+       /* Set the fb info for vgaswitcheroo clients. Does nothing otherwise. */
+       if (dev_is_pci(dev->dev))
+               vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), fb_helper->info);
+
        return 0;
 }
 
index 52d8800..3659f04 100644 (file)
@@ -304,6 +304,12 @@ static const struct dmi_system_id orientation_data[] = {
                  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
                },
                .driver_data = (void *)&lcd1200x1920_rightside_up,
+       }, {    /* Lenovo Ideapad D330-10IGL (HD) */
+               .matches = {
+                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
+               },
+               .driver_data = (void *)&lcd800x1280_rightside_up,
        }, {    /* Lenovo Yoga Book X90F / X91F / X91L */
                .matches = {
                  /* Non exact match to match all versions */
index 76490cc..7d07fa3 100644 (file)
@@ -1627,7 +1627,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
        u32 offset;
        int ret;
 
-       if (w > max_width || w < min_width || h > max_height) {
+       if (w > max_width || w < min_width || h > max_height || h < 1) {
                drm_dbg_kms(&dev_priv->drm,
                            "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
                            w, h, min_width, max_width, max_height);
index 7f2831e..6250de9 100644 (file)
@@ -1688,6 +1688,10 @@ void i915_gem_init__contexts(struct drm_i915_private *i915)
        init_contexts(&i915->gem.contexts);
 }
 
+/*
+ * Note that this implicitly consumes the ctx reference, by placing
+ * the ctx in the context_xa.
+ */
 static void gem_context_register(struct i915_gem_context *ctx,
                                 struct drm_i915_file_private *fpriv,
                                 u32 id)
@@ -1703,10 +1707,6 @@ static void gem_context_register(struct i915_gem_context *ctx,
        snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
                 current->comm, pid_nr(ctx->pid));
 
-       /* And finally expose ourselves to userspace via the idr */
-       old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
-       WARN_ON(old);
-
        spin_lock(&ctx->client->ctx_lock);
        list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list);
        spin_unlock(&ctx->client->ctx_lock);
@@ -1714,6 +1714,10 @@ static void gem_context_register(struct i915_gem_context *ctx,
        spin_lock(&i915->gem.contexts.lock);
        list_add_tail(&ctx->link, &i915->gem.contexts.list);
        spin_unlock(&i915->gem.contexts.lock);
+
+       /* And finally expose ourselves to userspace via the idr */
+       old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
+       WARN_ON(old);
 }
 
 int i915_gem_context_open(struct drm_i915_private *i915,
@@ -2199,14 +2203,22 @@ finalize_create_context_locked(struct drm_i915_file_private *file_priv,
        if (IS_ERR(ctx))
                return ctx;
 
+       /*
+        * One for the xarray and one for the caller.  We need to grab
+        * the reference *prior* to making the ctx visble to userspace
+        * in gem_context_register(), as at any point after that
+        * userspace can try to race us with another thread destroying
+        * the context under our feet.
+        */
+       i915_gem_context_get(ctx);
+
        gem_context_register(ctx, file_priv, id);
 
        old = xa_erase(&file_priv->proto_context_xa, id);
        GEM_BUG_ON(old != pc);
        proto_context_close(file_priv->dev_priv, pc);
 
-       /* One for the xarray and one for the caller */
-       return i915_gem_context_get(ctx);
+       return ctx;
 }
 
 struct i915_gem_context *
index beaf27e..977dead 100644 (file)
@@ -1847,7 +1847,7 @@ static int igt_shrink_thp(void *arg)
                        I915_SHRINK_ACTIVE);
        i915_vma_unpin(vma);
        if (err)
-               goto out_put;
+               goto out_wf;
 
        /*
         * Now that the pages are *unpinned* shrinking should invoke
@@ -1863,19 +1863,19 @@ static int igt_shrink_thp(void *arg)
                pr_err("unexpected pages mismatch, should_swap=%s\n",
                       str_yes_no(should_swap));
                err = -EINVAL;
-               goto out_put;
+               goto out_wf;
        }
 
        if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) {
                pr_err("unexpected residual page-size bits, should_swap=%s\n",
                       str_yes_no(should_swap));
                err = -EINVAL;
-               goto out_put;
+               goto out_wf;
        }
 
        err = i915_vma_pin(vma, 0, 0, flags);
        if (err)
-               goto out_put;
+               goto out_wf;
 
        while (n--) {
                err = cpu_check(obj, n, 0xdeadbeaf);
index c3cd926..a5454af 100644 (file)
 #define GEN9_WM_CHICKEN3                       _MMIO(0x5588)
 #define   GEN9_FACTOR_IN_CLR_VAL_HIZ           (1 << 9)
 
-#define CHICKEN_RASTER_1                       _MMIO(0x6204)
+#define CHICKEN_RASTER_1                       MCR_REG(0x6204)
 #define   DIS_SF_ROUND_NEAREST_EVEN            REG_BIT(8)
 
-#define CHICKEN_RASTER_2                       _MMIO(0x6208)
+#define CHICKEN_RASTER_2                       MCR_REG(0x6208)
 #define   TBIMR_FAST_CLIP                      REG_BIT(5)
 
 #define VFLSKPD                                        MCR_REG(0x62a8)
 #define   RC_OP_FLUSH_ENABLE                   (1 << 0)
 #define   HIZ_RAW_STALL_OPT_DISABLE            (1 << 2)
 #define CACHE_MODE_1                           _MMIO(0x7004) /* IVB+ */
-#define   PIXEL_SUBSPAN_COLLECT_OPT_DISABLE    (1 << 6)
-#define   GEN8_4x4_STC_OPTIMIZATION_DISABLE    (1 << 6)
-#define   GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE   (1 << 1)
+#define   MSAA_OPTIMIZATION_REDUC_DISABLE      REG_BIT(11)
+#define   PIXEL_SUBSPAN_COLLECT_OPT_DISABLE    REG_BIT(6)
+#define   GEN8_4x4_STC_OPTIMIZATION_DISABLE    REG_BIT(6)
+#define   GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE   REG_BIT(1)
 
 #define GEN7_GT_MODE                           _MMIO(0x7008)
 #define   GEN9_IZ_HASHING_MASK(slice)          (0x3 << ((slice) * 2))
 #define GEN8_L3CNTLREG                         _MMIO(0x7034)
 #define   GEN8_ERRDETBCTRL                     (1 << 9)
 
+#define PSS_MODE2                              _MMIO(0x703c)
+#define   SCOREBOARD_STALL_FLUSH_CONTROL       REG_BIT(5)
+
 #define GEN7_SC_INSTDONE                       _MMIO(0x7100)
 #define GEN12_SC_INSTDONE_EXTRA                        _MMIO(0x7104)
 #define GEN12_SC_INSTDONE_EXTRA2               _MMIO(0x7108)
index 24736eb..78dc5e4 100644 (file)
@@ -278,6 +278,7 @@ out:
 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
 {
        struct intel_uncore *uncore = gt->uncore;
+       int loops = 2;
        int err;
 
        /*
@@ -285,18 +286,39 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
         * for fifo space for the write or forcewake the chip for
         * the read
         */
-       intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
+       do {
+               intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
 
-       /* Wait for the device to ack the reset requests */
-       err = __intel_wait_for_register_fw(uncore,
-                                          GEN6_GDRST, hw_domain_mask, 0,
-                                          500, 0,
-                                          NULL);
+               /*
+                * Wait for the device to ack the reset requests.
+                *
+                * On some platforms, e.g. Jasperlake, we see that the
+                * engine register state is not cleared until shortly after
+                * GDRST reports completion, causing a failure as we try
+                * to immediately resume while the internal state is still
+                * in flux. If we immediately repeat the reset, the second
+                * reset appears to serialise with the first, and since
+                * it is a no-op, the registers should retain their reset
+                * value. However, there is still a concern that upon
+                * leaving the second reset, the internal engine state
+                * is still in flux and not ready for resuming.
+                */
+               err = __intel_wait_for_register_fw(uncore, GEN6_GDRST,
+                                                  hw_domain_mask, 0,
+                                                  2000, 0,
+                                                  NULL);
+       } while (err == 0 && --loops);
        if (err)
                GT_TRACE(gt,
                         "Wait for 0x%08x engines reset failed\n",
                         hw_domain_mask);
 
+       /*
+        * As we have observed that the engine state is still volatile
+        * after GDRST is acked, impose a small delay to let everything settle.
+        */
+       udelay(50);
+
        return err;
 }
 
index 2afb4f8..949c193 100644 (file)
@@ -645,7 +645,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
 static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
                                   struct i915_wa_list *wal)
 {
-       wa_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
+       wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
        wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
                             REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
        wa_mcr_add(wal,
@@ -771,11 +771,19 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
 
        /* Wa_14014947963:dg2 */
        if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) ||
-               IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
+           IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
                wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
 
+       /* Wa_18018764978:dg2 */
+       if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_C0, STEP_FOREVER) ||
+           IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
+               wa_masked_en(wal, PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
+
        /* Wa_15010599737:dg2 */
-       wa_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN);
+       wa_mcr_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN);
+
+       /* Wa_18019271663:dg2 */
+       wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
 }
 
 static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
index 69103ae..61c38fc 100644 (file)
@@ -1069,12 +1069,9 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
  */
 static void i915_driver_lastclose(struct drm_device *dev)
 {
-       struct drm_i915_private *i915 = to_i915(dev);
-
        intel_fbdev_restore_mode(dev);
 
-       if (HAS_DISPLAY(i915))
-               vga_switcheroo_process_delayed_switch();
+       vga_switcheroo_process_delayed_switch();
 }
 
 static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
index ccd1f86..4fada7e 100644 (file)
@@ -423,7 +423,8 @@ static const struct intel_device_info ilk_m_info = {
        .has_coherent_ggtt = true, \
        .has_llc = 1, \
        .has_rc6 = 1, \
-       .has_rc6p = 1, \
+       /* snb does support rc6p, but enabling it causes various issues */ \
+       .has_rc6p = 0, \
        .has_rps = true, \
        .dma_mask_size = 40, \
        .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \
index 23777d5..f45bd6b 100644 (file)
@@ -19,6 +19,10 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
                dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
                return;
        }
+       if (!HAS_DISPLAY(i915)) {
+               dev_err(&pdev->dev, "Device state not initialized, aborting switch.\n");
+               return;
+       }
 
        if (state == VGA_SWITCHEROO_ON) {
                drm_info(&i915->drm, "switched on\n");
@@ -44,7 +48,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
         * locking inversion with the driver load path. And the access here is
         * completely racy anyway. So don't bother with locking for now.
         */
-       return i915 && atomic_read(&i915->drm.open_count) == 0;
+       return i915 && HAS_DISPLAY(i915) && atomic_read(&i915->drm.open_count) == 0;
 }
 
 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
index 3a33be5..135390d 100644 (file)
@@ -2116,7 +2116,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
        if (!obj->mm.rsgt)
                return -EBUSY;
 
-       err = dma_resv_reserve_fences(obj->base.resv, 1);
+       err = dma_resv_reserve_fences(obj->base.resv, 2);
        if (err)
                return -EBUSY;
 
index 6484b97..f3c9600 100644 (file)
@@ -876,7 +876,8 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
 #define GBIF_CLIENT_HALT_MASK             BIT(0)
 #define GBIF_ARB_HALT_MASK                BIT(1)
 
-static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
+static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu,
+               bool gx_off)
 {
        struct msm_gpu *gpu = &adreno_gpu->base;
 
@@ -889,9 +890,11 @@ static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
                return;
        }
 
-       /* Halt the gx side of GBIF */
-       gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
-       spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
+       if (gx_off) {
+               /* Halt the gx side of GBIF */
+               gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
+               spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
+       }
 
        /* Halt new client requests on GBIF */
        gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
@@ -929,7 +932,7 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
        /* Halt the gmu cm3 core */
        gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
 
-       a6xx_bus_clear_pending_transactions(adreno_gpu);
+       a6xx_bus_clear_pending_transactions(adreno_gpu, true);
 
        /* Reset GPU core blocks */
        gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1);
@@ -1083,7 +1086,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
                        return;
                }
 
-               a6xx_bus_clear_pending_transactions(adreno_gpu);
+               a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
 
                /* tell the GMU we want to slumber */
                ret = a6xx_gmu_notify_slumber(gmu);
index 36c8fb6..3be0f29 100644 (file)
@@ -1270,6 +1270,12 @@ static void a6xx_recover(struct msm_gpu *gpu)
        if (hang_debug)
                a6xx_dump(gpu);
 
+       /*
+        * To handle recovery specific sequences during the rpm suspend we are
+        * about to trigger
+        */
+       a6xx_gpu->hung = true;
+
        /* Halt SQE first */
        gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
 
@@ -1312,6 +1318,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
        mutex_unlock(&gpu->active_lock);
 
        msm_gpu_hw_init(gpu);
+       a6xx_gpu->hung = false;
 }
 
 static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
index ab853f6..eea2e60 100644 (file)
@@ -32,6 +32,7 @@ struct a6xx_gpu {
        void *llc_slice;
        void *htw_llc_slice;
        bool have_mmu500;
+       bool hung;
 };
 
 #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
index 6288064..36f062c 100644 (file)
@@ -551,13 +551,14 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
        return 0;
 }
 
+static int adreno_system_suspend(struct device *dev);
 static void adreno_unbind(struct device *dev, struct device *master,
                void *data)
 {
        struct msm_drm_private *priv = dev_get_drvdata(master);
        struct msm_gpu *gpu = dev_to_gpu(dev);
 
-       pm_runtime_force_suspend(dev);
+       WARN_ON_ONCE(adreno_system_suspend(dev));
        gpu->funcs->destroy(gpu);
 
        priv->gpu_pdev = NULL;
@@ -609,7 +610,7 @@ static int adreno_remove(struct platform_device *pdev)
 
 static void adreno_shutdown(struct platform_device *pdev)
 {
-       pm_runtime_force_suspend(&pdev->dev);
+       WARN_ON_ONCE(adreno_system_suspend(&pdev->dev));
 }
 
 static const struct of_device_id dt_match[] = {
index 57586c7..3605f09 100644 (file)
@@ -352,6 +352,8 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
                /* Ensure string is null terminated: */
                str[len] = '\0';
 
+               mutex_lock(&gpu->lock);
+
                if (param == MSM_PARAM_COMM) {
                        paramp = &ctx->comm;
                } else {
@@ -361,6 +363,8 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
                kfree(*paramp);
                *paramp = str;
 
+               mutex_unlock(&gpu->lock);
+
                return 0;
        }
        case MSM_PARAM_SYSPROF:
index 5d4b1c9..b4f9b13 100644 (file)
@@ -29,11 +29,9 @@ enum {
        ADRENO_FW_MAX,
 };
 
-enum adreno_quirks {
-       ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
-       ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
-       ADRENO_QUIRK_LMLOADKILL_DISABLE = 3,
-};
+#define ADRENO_QUIRK_TWO_PASS_USE_WFI          BIT(0)
+#define ADRENO_QUIRK_FAULT_DETECT_MASK         BIT(1)
+#define ADRENO_QUIRK_LMLOADKILL_DISABLE                BIT(2)
 
 struct adreno_rev {
        uint8_t  core;
@@ -65,7 +63,7 @@ struct adreno_info {
        const char *name;
        const char *fw[ADRENO_FW_MAX];
        uint32_t gmem;
-       enum adreno_quirks quirks;
+       u64 quirks;
        struct msm_gpu *(*init)(struct drm_device *dev);
        const char *zapfw;
        u32 inactive_period;
index 7cbcef6..62f6ff6 100644 (file)
@@ -132,7 +132,6 @@ static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
  * dpu_encoder_phys_wb_setup_fb - setup output framebuffer
  * @phys_enc:  Pointer to physical encoder
  * @fb:                Pointer to output framebuffer
- * @wb_roi:    Pointer to output region of interest
  */
 static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
                struct drm_framebuffer *fb)
@@ -692,7 +691,7 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
 
 /**
  * dpu_encoder_phys_wb_init - initialize writeback encoder
- * @init:      Pointer to init info structure with initialization params
+ * @p: Pointer to init info structure with initialization params
  */
 struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
                struct dpu_enc_phys_init_params *p)
index d030a93..cc3efed 100644 (file)
@@ -423,6 +423,10 @@ void dp_aux_isr(struct drm_dp_aux *dp_aux)
 
        isr = dp_catalog_aux_get_irq(aux->catalog);
 
+       /* no interrupts pending, return immediately */
+       if (!isr)
+               return;
+
        if (!aux->cmd_busy)
                return;
 
index 4d3fdc8..97372bb 100644 (file)
@@ -532,11 +532,19 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
 
        ret = devm_pm_runtime_enable(&pdev->dev);
        if (ret)
-               return ret;
+               goto err_put_phy;
 
        platform_set_drvdata(pdev, hdmi);
 
-       return component_add(&pdev->dev, &msm_hdmi_ops);
+       ret = component_add(&pdev->dev, &msm_hdmi_ops);
+       if (ret)
+               goto err_put_phy;
+
+       return 0;
+
+err_put_phy:
+       msm_hdmi_put_phy(hdmi);
+       return ret;
 }
 
 static int msm_hdmi_dev_remove(struct platform_device *pdev)
index 8b0b0ac..45e81eb 100644 (file)
@@ -1278,7 +1278,7 @@ void msm_drv_shutdown(struct platform_device *pdev)
         * msm_drm_init, drm_dev->registered is used as an indicator that the
         * shutdown will be successful.
         */
-       if (drm && drm->registered)
+       if (drm && drm->registered && priv->kms)
                drm_atomic_helper_shutdown(drm);
 }
 
index 30ed45a..3802495 100644 (file)
@@ -335,6 +335,8 @@ static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **
        struct msm_file_private *ctx = submit->queue->ctx;
        struct task_struct *task;
 
+       WARN_ON(!mutex_is_locked(&submit->gpu->lock));
+
        /* Note that kstrdup will return NULL if argument is NULL: */
        *comm = kstrdup(ctx->comm, GFP_KERNEL);
        *cmd  = kstrdup(ctx->cmdline, GFP_KERNEL);
index 651786b..732295e 100644 (file)
@@ -376,10 +376,18 @@ struct msm_file_private {
         */
        int sysprof;
 
-       /** comm: Overridden task comm, see MSM_PARAM_COMM */
+       /**
+        * comm: Overridden task comm, see MSM_PARAM_COMM
+        *
+        * Accessed under msm_gpu::lock
+        */
        char *comm;
 
-       /** cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE */
+       /**
+        * cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
+        *
+        * Accessed under msm_gpu::lock
+        */
        char *cmdline;
 
        /**
index 86b28ad..2527afe 100644 (file)
@@ -47,15 +47,17 @@ struct msm_mdss {
 static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
                                            struct msm_mdss *msm_mdss)
 {
-       struct icc_path *path0 = of_icc_get(dev, "mdp0-mem");
-       struct icc_path *path1 = of_icc_get(dev, "mdp1-mem");
+       struct icc_path *path0;
+       struct icc_path *path1;
 
+       path0 = of_icc_get(dev, "mdp0-mem");
        if (IS_ERR_OR_NULL(path0))
                return PTR_ERR_OR_ZERO(path0);
 
        msm_mdss->path[0] = path0;
        msm_mdss->num_paths = 1;
 
+       path1 = of_icc_get(dev, "mdp1-mem");
        if (!IS_ERR_OR_NULL(path1)) {
                msm_mdss->path[1] = path1;
                msm_mdss->num_paths++;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
deleted file mode 100644 (file)
index e87de79..0000000
+++ /dev/null
@@ -1,613 +0,0 @@
-/*
- * Copyright Â© 2007 David Airlie
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *     David Airlie
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/tty.h>
-#include <linux/sysrq.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/screen_info.h>
-#include <linux/vga_switcheroo.h>
-#include <linux/console.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_atomic.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_gem.h"
-#include "nouveau_bo.h"
-#include "nouveau_fbcon.h"
-#include "nouveau_chan.h"
-#include "nouveau_vmm.h"
-
-#include "nouveau_crtc.h"
-
-MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
-int nouveau_nofbaccel = 0;
-module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
-
-MODULE_PARM_DESC(fbcon_bpp, "fbcon bits-per-pixel (default: auto)");
-static int nouveau_fbcon_bpp;
-module_param_named(fbcon_bpp, nouveau_fbcon_bpp, int, 0400);
-
-static void
-nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       struct nvif_device *device = &drm->client.device;
-       int ret;
-
-       if (info->state != FBINFO_STATE_RUNNING)
-               return;
-
-       ret = -ENODEV;
-       if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
-           mutex_trylock(&drm->client.mutex)) {
-               if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
-                       ret = nv04_fbcon_fillrect(info, rect);
-               else
-               if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
-                       ret = nv50_fbcon_fillrect(info, rect);
-               else
-                       ret = nvc0_fbcon_fillrect(info, rect);
-               mutex_unlock(&drm->client.mutex);
-       }
-
-       if (ret == 0)
-               return;
-
-       if (ret != -ENODEV)
-               nouveau_fbcon_gpu_lockup(info);
-       drm_fb_helper_cfb_fillrect(info, rect);
-}
-
-static void
-nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       struct nvif_device *device = &drm->client.device;
-       int ret;
-
-       if (info->state != FBINFO_STATE_RUNNING)
-               return;
-
-       ret = -ENODEV;
-       if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
-           mutex_trylock(&drm->client.mutex)) {
-               if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
-                       ret = nv04_fbcon_copyarea(info, image);
-               else
-               if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
-                       ret = nv50_fbcon_copyarea(info, image);
-               else
-                       ret = nvc0_fbcon_copyarea(info, image);
-               mutex_unlock(&drm->client.mutex);
-       }
-
-       if (ret == 0)
-               return;
-
-       if (ret != -ENODEV)
-               nouveau_fbcon_gpu_lockup(info);
-       drm_fb_helper_cfb_copyarea(info, image);
-}
-
-static void
-nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       struct nvif_device *device = &drm->client.device;
-       int ret;
-
-       if (info->state != FBINFO_STATE_RUNNING)
-               return;
-
-       ret = -ENODEV;
-       if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
-           mutex_trylock(&drm->client.mutex)) {
-               if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
-                       ret = nv04_fbcon_imageblit(info, image);
-               else
-               if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
-                       ret = nv50_fbcon_imageblit(info, image);
-               else
-                       ret = nvc0_fbcon_imageblit(info, image);
-               mutex_unlock(&drm->client.mutex);
-       }
-
-       if (ret == 0)
-               return;
-
-       if (ret != -ENODEV)
-               nouveau_fbcon_gpu_lockup(info);
-       drm_fb_helper_cfb_imageblit(info, image);
-}
-
-static int
-nouveau_fbcon_sync(struct fb_info *info)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       struct nouveau_channel *chan = drm->channel;
-       int ret;
-
-       if (!chan || !chan->accel_done || in_interrupt() ||
-           info->state != FBINFO_STATE_RUNNING ||
-           info->flags & FBINFO_HWACCEL_DISABLED)
-               return 0;
-
-       if (!mutex_trylock(&drm->client.mutex))
-               return 0;
-
-       ret = nouveau_channel_idle(chan);
-       mutex_unlock(&drm->client.mutex);
-       if (ret) {
-               nouveau_fbcon_gpu_lockup(info);
-               return 0;
-       }
-
-       chan->accel_done = false;
-       return 0;
-}
-
-static int
-nouveau_fbcon_open(struct fb_info *info, int user)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       int ret = pm_runtime_get_sync(drm->dev->dev);
-       if (ret < 0 && ret != -EACCES) {
-               pm_runtime_put(drm->dev->dev);
-               return ret;
-       }
-       return 0;
-}
-
-static int
-nouveau_fbcon_release(struct fb_info *info, int user)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       pm_runtime_put(drm->dev->dev);
-       return 0;
-}
-
-static const struct fb_ops nouveau_fbcon_ops = {
-       .owner = THIS_MODULE,
-       DRM_FB_HELPER_DEFAULT_OPS,
-       .fb_open = nouveau_fbcon_open,
-       .fb_release = nouveau_fbcon_release,
-       .fb_fillrect = nouveau_fbcon_fillrect,
-       .fb_copyarea = nouveau_fbcon_copyarea,
-       .fb_imageblit = nouveau_fbcon_imageblit,
-       .fb_sync = nouveau_fbcon_sync,
-};
-
-static const struct fb_ops nouveau_fbcon_sw_ops = {
-       .owner = THIS_MODULE,
-       DRM_FB_HELPER_DEFAULT_OPS,
-       .fb_open = nouveau_fbcon_open,
-       .fb_release = nouveau_fbcon_release,
-       .fb_fillrect = drm_fb_helper_cfb_fillrect,
-       .fb_copyarea = drm_fb_helper_cfb_copyarea,
-       .fb_imageblit = drm_fb_helper_cfb_imageblit,
-};
-
-void
-nouveau_fbcon_accel_save_disable(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon && drm->fbcon->helper.info) {
-               drm->fbcon->saved_flags = drm->fbcon->helper.info->flags;
-               drm->fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED;
-       }
-}
-
-void
-nouveau_fbcon_accel_restore(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon && drm->fbcon->helper.info)
-               drm->fbcon->helper.info->flags = drm->fbcon->saved_flags;
-}
-
-static void
-nouveau_fbcon_accel_fini(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_fbdev *fbcon = drm->fbcon;
-       if (fbcon && drm->channel) {
-               console_lock();
-               if (fbcon->helper.info)
-                       fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED;
-               console_unlock();
-               nouveau_channel_idle(drm->channel);
-               nvif_object_dtor(&fbcon->twod);
-               nvif_object_dtor(&fbcon->blit);
-               nvif_object_dtor(&fbcon->gdi);
-               nvif_object_dtor(&fbcon->patt);
-               nvif_object_dtor(&fbcon->rop);
-               nvif_object_dtor(&fbcon->clip);
-               nvif_object_dtor(&fbcon->surf2d);
-       }
-}
-
-static void
-nouveau_fbcon_accel_init(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_fbdev *fbcon = drm->fbcon;
-       struct fb_info *info = fbcon->helper.info;
-       int ret;
-
-       if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
-               ret = nv04_fbcon_accel_init(info);
-       else
-       if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
-               ret = nv50_fbcon_accel_init(info);
-       else
-               ret = nvc0_fbcon_accel_init(info);
-
-       if (ret == 0)
-               info->fbops = &nouveau_fbcon_ops;
-}
-
-static void
-nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
-{
-       struct fb_info *info = fbcon->helper.info;
-       struct fb_fillrect rect;
-
-       /* Clear the entire fbcon.  The drm will program every connector
-        * with it's preferred mode.  If the sizes differ, one display will
-        * quite likely have garbage around the console.
-        */
-       rect.dx = rect.dy = 0;
-       rect.width = info->var.xres_virtual;
-       rect.height = info->var.yres_virtual;
-       rect.color = 0;
-       rect.rop = ROP_COPY;
-       info->fbops->fb_fillrect(info, &rect);
-}
-
-static int
-nouveau_fbcon_create(struct drm_fb_helper *helper,
-                    struct drm_fb_helper_surface_size *sizes)
-{
-       struct nouveau_fbdev *fbcon =
-               container_of(helper, struct nouveau_fbdev, helper);
-       struct drm_device *dev = fbcon->helper.dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nvif_device *device = &drm->client.device;
-       struct fb_info *info;
-       struct drm_framebuffer *fb;
-       struct nouveau_channel *chan;
-       struct nouveau_bo *nvbo;
-       struct drm_mode_fb_cmd2 mode_cmd = {};
-       int ret;
-
-       mode_cmd.width = sizes->surface_width;
-       mode_cmd.height = sizes->surface_height;
-
-       mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
-       mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
-
-       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
-                                                         sizes->surface_depth);
-
-       ret = nouveau_gem_new(&drm->client, mode_cmd.pitches[0] *
-                             mode_cmd.height, 0, NOUVEAU_GEM_DOMAIN_VRAM,
-                             0, 0x0000, &nvbo);
-       if (ret) {
-               NV_ERROR(drm, "failed to allocate framebuffer\n");
-               goto out;
-       }
-
-       ret = nouveau_framebuffer_new(dev, &mode_cmd, &nvbo->bo.base, &fb);
-       if (ret)
-               goto out_unref;
-
-       ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
-       if (ret) {
-               NV_ERROR(drm, "failed to pin fb: %d\n", ret);
-               goto out_unref;
-       }
-
-       ret = nouveau_bo_map(nvbo);
-       if (ret) {
-               NV_ERROR(drm, "failed to map fb: %d\n", ret);
-               goto out_unpin;
-       }
-
-       chan = nouveau_nofbaccel ? NULL : drm->channel;
-       if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
-               ret = nouveau_vma_new(nvbo, chan->vmm, &fbcon->vma);
-               if (ret) {
-                       NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
-                       chan = NULL;
-               }
-       }
-
-       info = drm_fb_helper_alloc_info(helper);
-       if (IS_ERR(info)) {
-               ret = PTR_ERR(info);
-               goto out_unlock;
-       }
-
-       /* setup helper */
-       fbcon->helper.fb = fb;
-
-       if (!chan)
-               info->flags = FBINFO_HWACCEL_DISABLED;
-       else
-               info->flags = FBINFO_HWACCEL_COPYAREA |
-                             FBINFO_HWACCEL_FILLRECT |
-                             FBINFO_HWACCEL_IMAGEBLIT;
-       info->fbops = &nouveau_fbcon_sw_ops;
-       info->fix.smem_start = nvbo->bo.resource->bus.offset;
-       info->fix.smem_len = nvbo->bo.base.size;
-
-       info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
-       info->screen_size = nvbo->bo.base.size;
-
-       drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
-
-       /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
-
-       if (chan)
-               nouveau_fbcon_accel_init(dev);
-       nouveau_fbcon_zfill(dev, fbcon);
-
-       /* To allow resizeing without swapping buffers */
-       NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
-               fb->width, fb->height, nvbo->offset, nvbo);
-
-       if (dev_is_pci(dev->dev))
-               vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), info);
-
-       return 0;
-
-out_unlock:
-       if (chan)
-               nouveau_vma_del(&fbcon->vma);
-       nouveau_bo_unmap(nvbo);
-out_unpin:
-       nouveau_bo_unpin(nvbo);
-out_unref:
-       nouveau_bo_ref(NULL, &nvbo);
-out:
-       return ret;
-}
-
-static int
-nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
-{
-       struct drm_framebuffer *fb = fbcon->helper.fb;
-       struct nouveau_bo *nvbo;
-
-       drm_fb_helper_unregister_info(&fbcon->helper);
-       drm_fb_helper_fini(&fbcon->helper);
-
-       if (fb && fb->obj[0]) {
-               nvbo = nouveau_gem_object(fb->obj[0]);
-               nouveau_vma_del(&fbcon->vma);
-               nouveau_bo_unmap(nvbo);
-               nouveau_bo_unpin(nvbo);
-               drm_framebuffer_put(fb);
-       }
-
-       return 0;
-}
-
-void nouveau_fbcon_gpu_lockup(struct fb_info *info)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-
-       NV_ERROR(drm, "GPU lockup - switching to software fbcon\n");
-       info->flags |= FBINFO_HWACCEL_DISABLED;
-}
-
-static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
-       .fb_probe = nouveau_fbcon_create,
-};
-
-static void
-nouveau_fbcon_set_suspend_work(struct work_struct *work)
-{
-       struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
-       int state = READ_ONCE(drm->fbcon_new_state);
-
-       if (state == FBINFO_STATE_RUNNING)
-               pm_runtime_get_sync(drm->dev->dev);
-
-       console_lock();
-       if (state == FBINFO_STATE_RUNNING)
-               nouveau_fbcon_accel_restore(drm->dev);
-       drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
-       if (state != FBINFO_STATE_RUNNING)
-               nouveau_fbcon_accel_save_disable(drm->dev);
-       console_unlock();
-
-       if (state == FBINFO_STATE_RUNNING) {
-               nouveau_fbcon_hotplug_resume(drm->fbcon);
-               pm_runtime_mark_last_busy(drm->dev->dev);
-               pm_runtime_put_autosuspend(drm->dev->dev);
-       }
-}
-
-void
-nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-
-       if (!drm->fbcon)
-               return;
-
-       drm->fbcon_new_state = state;
-       /* Since runtime resume can happen as a result of a sysfs operation,
-        * it's possible we already have the console locked. So handle fbcon
-        * init/deinit from a seperate work thread
-        */
-       schedule_work(&drm->fbcon_work);
-}
-
-void
-nouveau_fbcon_output_poll_changed(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_fbdev *fbcon = drm->fbcon;
-       int ret;
-
-       if (!fbcon)
-               return;
-
-       mutex_lock(&fbcon->hotplug_lock);
-
-       ret = pm_runtime_get(dev->dev);
-       if (ret == 1 || ret == -EACCES) {
-               drm_fb_helper_hotplug_event(&fbcon->helper);
-
-               pm_runtime_mark_last_busy(dev->dev);
-               pm_runtime_put_autosuspend(dev->dev);
-       } else if (ret == 0) {
-               /* If the GPU was already in the process of suspending before
-                * this event happened, then we can't block here as we'll
-                * deadlock the runtime pmops since they wait for us to
-                * finish. So, just defer this event for when we runtime
-                * resume again. It will be handled by fbcon_work.
-                */
-               NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
-               fbcon->hotplug_waiting = true;
-               pm_runtime_put_noidle(drm->dev->dev);
-       } else {
-               DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
-                        ret);
-       }
-
-       mutex_unlock(&fbcon->hotplug_lock);
-}
-
-void
-nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
-{
-       struct nouveau_drm *drm;
-
-       if (!fbcon)
-               return;
-       drm = nouveau_drm(fbcon->helper.dev);
-
-       mutex_lock(&fbcon->hotplug_lock);
-       if (fbcon->hotplug_waiting) {
-               fbcon->hotplug_waiting = false;
-
-               NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
-               drm_fb_helper_hotplug_event(&fbcon->helper);
-       }
-       mutex_unlock(&fbcon->hotplug_lock);
-}
-
-int
-nouveau_fbcon_init(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_fbdev *fbcon;
-       int preferred_bpp = nouveau_fbcon_bpp;
-       int ret;
-
-       if (!dev->mode_config.num_crtc ||
-           (to_pci_dev(dev->dev)->class >> 8) != PCI_CLASS_DISPLAY_VGA)
-               return 0;
-
-       fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
-       if (!fbcon)
-               return -ENOMEM;
-
-       drm->fbcon = fbcon;
-       INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
-       mutex_init(&fbcon->hotplug_lock);
-
-       drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
-
-       ret = drm_fb_helper_init(dev, &fbcon->helper);
-       if (ret)
-               goto free;
-
-       if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) {
-               if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
-                       preferred_bpp = 8;
-               else
-               if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
-                       preferred_bpp = 16;
-               else
-                       preferred_bpp = 32;
-       }
-
-       /* disable all the possible outputs/crtcs before entering KMS mode */
-       if (!drm_drv_uses_atomic_modeset(dev))
-               drm_helper_disable_unused_functions(dev);
-
-       ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
-       if (ret)
-               goto fini;
-
-       if (fbcon->helper.info)
-               fbcon->helper.info->pixmap.buf_align = 4;
-       return 0;
-
-fini:
-       drm_fb_helper_fini(&fbcon->helper);
-free:
-       kfree(fbcon);
-       drm->fbcon = NULL;
-       return ret;
-}
-
-void
-nouveau_fbcon_fini(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-
-       if (!drm->fbcon)
-               return;
-
-       drm_kms_helper_poll_fini(dev);
-       nouveau_fbcon_accel_fini(dev);
-       nouveau_fbcon_destroy(dev, drm->fbcon);
-       kfree(drm->fbcon);
-       drm->fbcon = NULL;
-}
index 0796003..e6403a9 100644 (file)
@@ -3,7 +3,8 @@
 config DRM_PANFROST
        tristate "Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs)"
        depends on DRM
-       depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
+       depends on ARM || ARM64 || COMPILE_TEST
+       depends on !GENERIC_ATOMIC64    # for IOMMU_IO_PGTABLE_LPAE
        depends on MMU
        select DRM_SCHED
        select IOMMU_SUPPORT
index ba3aa0a..da5493f 100644 (file)
@@ -173,7 +173,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
 
        clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
        if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
-               ttm_move_memcpy(clear, ttm->num_pages, dst_iter, src_iter);
+               ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter);
 
        if (!src_iter->ops->maps_tt)
                ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
index 43d9b3a..c5947ed 100644 (file)
@@ -179,6 +179,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
                bo->validated_shader = NULL;
        }
 
+       mutex_destroy(&bo->madv_lock);
        drm_gem_dma_free(&bo->base);
 }
 
@@ -394,7 +395,6 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct vc4_bo *bo;
-       int ret;
 
        if (WARN_ON_ONCE(vc4->is_vc5))
                return ERR_PTR(-ENODEV);
@@ -406,9 +406,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
        bo->madv = VC4_MADV_WILLNEED;
        refcount_set(&bo->usecnt, 0);
 
-       ret = drmm_mutex_init(dev, &bo->madv_lock);
-       if (ret)
-               return ERR_PTR(ret);
+       mutex_init(&bo->madv_lock);
 
        mutex_lock(&vc4->bo_lock);
        bo->label = VC4_BO_TYPE_KERNEL;
index 5d05093..9f4a904 100644 (file)
@@ -358,10 +358,18 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
                drm_gem_object_release(obj);
                return ret;
        }
-       drm_gem_object_put(obj);
 
        rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
        rc->bo_handle = handle;
+
+       /*
+        * The handle owns the reference now.  But we must drop our
+        * remaining reference *after* we no longer need to dereference
+        * the obj.  Otherwise userspace could guess the handle and
+        * race closing it from another thread.
+        */
+       drm_gem_object_put(obj);
+
        return 0;
 }
 
@@ -723,11 +731,18 @@ static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
                drm_gem_object_release(obj);
                return ret;
        }
-       drm_gem_object_put(obj);
 
        rc_blob->res_handle = bo->hw_res_handle;
        rc_blob->bo_handle = handle;
 
+       /*
+        * The handle owns the reference now.  But we must drop our
+        * remaining reference *after* we no longer need to dereference
+        * the obj.  Otherwise userspace could guess the handle and
+        * race closing it from another thread.
+        */
+       drm_gem_object_put(obj);
+
        return 0;
 }
 
index 932b125..ddf8373 100644 (file)
@@ -254,40 +254,6 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
        kref_put(&base->refcount, ttm_release_base);
 }
 
-/**
- * ttm_base_object_noref_lookup - look up a base object without reference
- * @tfile: The struct ttm_object_file the object is registered with.
- * @key: The object handle.
- *
- * This function looks up a ttm base object and returns a pointer to it
- * without refcounting the pointer. The returned pointer is only valid
- * until ttm_base_object_noref_release() is called, and the object
- * pointed to by the returned pointer may be doomed. Any persistent usage
- * of the object requires a refcount to be taken using kref_get_unless_zero().
- * Iff this function returns successfully it needs to be paired with
- * ttm_base_object_noref_release() and no sleeping- or scheduling functions
- * may be called inbetween these function callse.
- *
- * Return: A pointer to the object if successful or NULL otherwise.
- */
-struct ttm_base_object *
-ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key)
-{
-       struct vmwgfx_hash_item *hash;
-       int ret;
-
-       rcu_read_lock();
-       ret = ttm_tfile_find_ref_rcu(tfile, key, &hash);
-       if (ret) {
-               rcu_read_unlock();
-               return NULL;
-       }
-
-       __release(RCU);
-       return hlist_entry(hash, struct ttm_ref_object, hash)->obj;
-}
-EXPORT_SYMBOL(ttm_base_object_noref_lookup);
-
 struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
                                               uint64_t key)
 {
@@ -295,15 +261,16 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
        struct vmwgfx_hash_item *hash;
        int ret;
 
-       rcu_read_lock();
-       ret = ttm_tfile_find_ref_rcu(tfile, key, &hash);
+       spin_lock(&tfile->lock);
+       ret = ttm_tfile_find_ref(tfile, key, &hash);
 
        if (likely(ret == 0)) {
                base = hlist_entry(hash, struct ttm_ref_object, hash)->obj;
                if (!kref_get_unless_zero(&base->refcount))
                        base = NULL;
        }
-       rcu_read_unlock();
+       spin_unlock(&tfile->lock);
+
 
        return base;
 }
index f0ebbe3..8098a38 100644 (file)
@@ -307,18 +307,4 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
 #define ttm_prime_object_kfree(__obj, __prime)         \
        kfree_rcu(__obj, __prime.base.rhead)
 
-struct ttm_base_object *
-ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key);
-
-/**
- * ttm_base_object_noref_release - release a base object pointer looked up
- * without reference
- *
- * Releases a base object pointer looked up with ttm_base_object_noref_lookup().
- */
-static inline void ttm_base_object_noref_release(void)
-{
-       __acquire(RCU);
-       rcu_read_unlock();
-}
 #endif
index 321c551..aa1cd51 100644 (file)
@@ -716,44 +716,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
 }
 
 /**
- * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
- * @filp: The TTM object file the handle is registered with.
- * @handle: The user buffer object handle.
- *
- * This function looks up a struct vmw_bo and returns a pointer to the
- * struct vmw_buffer_object it derives from without refcounting the pointer.
- * The returned pointer is only valid until vmw_user_bo_noref_release() is
- * called, and the object pointed to by the returned pointer may be doomed.
- * Any persistent usage of the object requires a refcount to be taken using
- * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
- * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
- * or scheduling functions may be called in between these function calls.
- *
- * Return: A struct vmw_buffer_object pointer if successful or negative
- * error pointer on failure.
- */
-struct vmw_buffer_object *
-vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle)
-{
-       struct vmw_buffer_object *vmw_bo;
-       struct ttm_buffer_object *bo;
-       struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle);
-
-       if (!gobj) {
-               DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
-                         (unsigned long)handle);
-               return ERR_PTR(-ESRCH);
-       }
-       vmw_bo = gem_to_vmw_bo(gobj);
-       bo = ttm_bo_get_unless_zero(&vmw_bo->base);
-       vmw_bo = vmw_buffer_object(bo);
-       drm_gem_object_put(gobj);
-
-       return vmw_bo;
-}
-
-
-/**
  * vmw_bo_fence_single - Utility function to fence a single TTM buffer
  *                       object without unreserving it.
  *
index b062b02..5acbf58 100644 (file)
@@ -830,12 +830,7 @@ extern int vmw_user_resource_lookup_handle(
        uint32_t handle,
        const struct vmw_user_resource_conv *converter,
        struct vmw_resource **p_res);
-extern struct vmw_resource *
-vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
-                                     struct ttm_object_file *tfile,
-                                     uint32_t handle,
-                                     const struct vmw_user_resource_conv *
-                                     converter);
+
 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv);
 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -875,15 +870,6 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
 }
 
 /**
- * vmw_user_resource_noref_release - release a user resource pointer looked up
- * without reference
- */
-static inline void vmw_user_resource_noref_release(void)
-{
-       ttm_base_object_noref_release();
-}
-
-/**
  * Buffer object helper functions - vmwgfx_bo.c
  */
 extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
@@ -934,8 +920,6 @@ extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
 extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
                               struct ttm_resource *mem);
 extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
-extern struct vmw_buffer_object *
-vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle);
 
 /**
  * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
index a5379f6..a44d53e 100644 (file)
@@ -290,20 +290,26 @@ static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
        rcache->valid_handle = 0;
 }
 
+enum vmw_val_add_flags {
+       vmw_val_add_flag_none  =      0,
+       vmw_val_add_flag_noctx = 1 << 0,
+};
+
 /**
- * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
- * rcu-protected pointer to the validation list.
+ * vmw_execbuf_res_val_add - Add a resource to the validation list.
  *
  * @sw_context: Pointer to the software context.
  * @res: Unreferenced rcu-protected pointer to the resource.
  * @dirty: Whether to change dirty status.
+ * @flags: specifies whether to use the context or not
  *
  * Returns: 0 on success. Negative error code on failure. Typical error codes
  * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
  */
-static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
-                                        struct vmw_resource *res,
-                                        u32 dirty)
+static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
+                                  struct vmw_resource *res,
+                                  u32 dirty,
+                                  u32 flags)
 {
        struct vmw_private *dev_priv = res->dev_priv;
        int ret;
@@ -318,24 +324,30 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
                if (dirty)
                        vmw_validation_res_set_dirty(sw_context->ctx,
                                                     rcache->private, dirty);
-               vmw_user_resource_noref_release();
                return 0;
        }
 
-       priv_size = vmw_execbuf_res_size(dev_priv, res_type);
-       ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
-                                         dirty, (void **)&ctx_info,
-                                         &first_usage);
-       vmw_user_resource_noref_release();
-       if (ret)
-               return ret;
+       if ((flags & vmw_val_add_flag_noctx) != 0) {
+               ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
+                                                 (void **)&ctx_info, NULL);
+               if (ret)
+                       return ret;
 
-       if (priv_size && first_usage) {
-               ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
-                                             ctx_info);
-               if (ret) {
-                       VMW_DEBUG_USER("Failed first usage context setup.\n");
+       } else {
+               priv_size = vmw_execbuf_res_size(dev_priv, res_type);
+               ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
+                                                 dirty, (void **)&ctx_info,
+                                                 &first_usage);
+               if (ret)
                        return ret;
+
+               if (priv_size && first_usage) {
+                       ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
+                                                     ctx_info);
+                       if (ret) {
+                               VMW_DEBUG_USER("Failed first usage context setup.\n");
+                               return ret;
+                       }
                }
        }
 
@@ -344,43 +356,6 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
 }
 
 /**
- * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
- * validation list if it's not already on it
- *
- * @sw_context: Pointer to the software context.
- * @res: Pointer to the resource.
- * @dirty: Whether to change dirty status.
- *
- * Returns: Zero on success. Negative error code on failure.
- */
-static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
-                                        struct vmw_resource *res,
-                                        u32 dirty)
-{
-       struct vmw_res_cache_entry *rcache;
-       enum vmw_res_type res_type = vmw_res_type(res);
-       void *ptr;
-       int ret;
-
-       rcache = &sw_context->res_cache[res_type];
-       if (likely(rcache->valid && rcache->res == res)) {
-               if (dirty)
-                       vmw_validation_res_set_dirty(sw_context->ctx,
-                                                    rcache->private, dirty);
-               return 0;
-       }
-
-       ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
-                                         &ptr, NULL);
-       if (ret)
-               return ret;
-
-       vmw_execbuf_rcache_update(rcache, res, ptr);
-
-       return 0;
-}
-
-/**
  * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
  * validation list
  *
@@ -398,13 +373,13 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
         * First add the resource the view is pointing to, otherwise it may be
         * swapped out when the view is validated.
         */
-       ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
-                                           vmw_view_dirtying(view));
+       ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
+                                     vmw_view_dirtying(view), vmw_val_add_flag_noctx);
        if (ret)
                return ret;
 
-       return vmw_execbuf_res_noctx_val_add(sw_context, view,
-                                            VMW_RES_DIRTY_NONE);
+       return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
+                                      vmw_val_add_flag_noctx);
 }
 
 /**
@@ -475,8 +450,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
                        if (IS_ERR(res))
                                continue;
 
-                       ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                                           VMW_RES_DIRTY_SET);
+                       ret = vmw_execbuf_res_val_add(sw_context, res,
+                                                     VMW_RES_DIRTY_SET,
+                                                     vmw_val_add_flag_noctx);
                        if (unlikely(ret != 0))
                                return ret;
                }
@@ -490,9 +466,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
                if (vmw_res_type(entry->res) == vmw_res_view)
                        ret = vmw_view_res_val_add(sw_context, entry->res);
                else
-                       ret = vmw_execbuf_res_noctx_val_add
-                               (sw_context, entry->res,
-                                vmw_binding_dirtying(entry->bt));
+                       ret = vmw_execbuf_res_val_add(sw_context, entry->res,
+                                                     vmw_binding_dirtying(entry->bt),
+                                                     vmw_val_add_flag_noctx);
                if (unlikely(ret != 0))
                        break;
        }
@@ -658,7 +634,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
 {
        struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
        struct vmw_resource *res;
-       int ret;
+       int ret = 0;
+       bool needs_unref = false;
 
        if (p_res)
                *p_res = NULL;
@@ -683,17 +660,18 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
                if (ret)
                        return ret;
 
-               res = vmw_user_resource_noref_lookup_handle
-                       (dev_priv, sw_context->fp->tfile, *id_loc, converter);
-               if (IS_ERR(res)) {
+               ret = vmw_user_resource_lookup_handle
+                       (dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
+               if (ret != 0) {
                        VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
                                       (unsigned int) *id_loc);
-                       return PTR_ERR(res);
+                       return ret;
                }
+               needs_unref = true;
 
-               ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
+               ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
                if (unlikely(ret != 0))
-                       return ret;
+                       goto res_check_done;
 
                if (rcache->valid && rcache->res == res) {
                        rcache->valid_handle = true;
@@ -708,7 +686,11 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
        if (p_res)
                *p_res = res;
 
-       return 0;
+res_check_done:
+       if (needs_unref)
+               vmw_resource_unreference(&res);
+
+       return ret;
 }
 
 /**
@@ -1171,9 +1153,9 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
        int ret;
 
        vmw_validation_preload_bo(sw_context->ctx);
-       vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
-       if (IS_ERR(vmw_bo)) {
-               VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
+       ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+       if (ret != 0) {
+               drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
                return PTR_ERR(vmw_bo);
        }
        ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
@@ -1225,9 +1207,9 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
        int ret;
 
        vmw_validation_preload_bo(sw_context->ctx);
-       vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
-       if (IS_ERR(vmw_bo)) {
-               VMW_DEBUG_USER("Could not find or use GMR region.\n");
+       ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+       if (ret != 0) {
+               drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
                return PTR_ERR(vmw_bo);
        }
        ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
@@ -2025,8 +2007,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
                res = vmw_shader_lookup(vmw_context_res_man(ctx),
                                        cmd->body.shid, cmd->body.type);
                if (!IS_ERR(res)) {
-                       ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                                           VMW_RES_DIRTY_NONE);
+                       ret = vmw_execbuf_res_val_add(sw_context, res,
+                                                     VMW_RES_DIRTY_NONE,
+                                                     vmw_val_add_flag_noctx);
                        if (unlikely(ret != 0))
                                return ret;
 
@@ -2273,8 +2256,9 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
                        return PTR_ERR(res);
                }
 
-               ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                                   VMW_RES_DIRTY_NONE);
+               ret = vmw_execbuf_res_val_add(sw_context, res,
+                                             VMW_RES_DIRTY_NONE,
+                                             vmw_val_add_flag_noctx);
                if (ret)
                        return ret;
        }
@@ -2777,8 +2761,8 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
                return PTR_ERR(res);
        }
 
-       ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                           VMW_RES_DIRTY_NONE);
+       ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
+                                     vmw_val_add_flag_noctx);
        if (ret) {
                VMW_DEBUG_USER("Error creating resource validation node.\n");
                return ret;
@@ -3098,8 +3082,8 @@ static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
 
        vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
 
-       ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                           VMW_RES_DIRTY_NONE);
+       ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
+                                     vmw_val_add_flag_noctx);
        if (ret) {
                DRM_ERROR("Error creating resource validation node.\n");
                return ret;
@@ -3148,8 +3132,8 @@ static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
                return 0;
        }
 
-       ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                           VMW_RES_DIRTY_NONE);
+       ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
+                                     vmw_val_add_flag_noctx);
        if (ret) {
                DRM_ERROR("Error creating resource validation node.\n");
                return ret;
@@ -4066,22 +4050,26 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
        if (ret)
                return ret;
 
-       res = vmw_user_resource_noref_lookup_handle
+       ret = vmw_user_resource_lookup_handle
                (dev_priv, sw_context->fp->tfile, handle,
-                user_context_converter);
-       if (IS_ERR(res)) {
+                user_context_converter, &res);
+       if (ret != 0) {
                VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
                               (unsigned int) handle);
-               return PTR_ERR(res);
+               return ret;
        }
 
-       ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
-       if (unlikely(ret != 0))
+       ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
+                                     vmw_val_add_flag_none);
+       if (unlikely(ret != 0)) {
+               vmw_resource_unreference(&res);
                return ret;
+       }
 
        sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
        sw_context->man = vmw_context_res_man(res);
 
+       vmw_resource_unreference(&res);
        return 0;
 }
 
old mode 100755 (executable)
new mode 100644 (file)
index f66caa5..c7d645e 100644 (file)
@@ -281,39 +281,6 @@ out_bad_resource:
        return ret;
 }
 
-/**
- * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
- * TTM user-space handle and perform basic type checks
- *
- * @dev_priv:     Pointer to a device private struct
- * @tfile:        Pointer to a struct ttm_object_file identifying the caller
- * @handle:       The TTM user-space handle
- * @converter:    Pointer to an object describing the resource type
- *
- * If the handle can't be found or is associated with an incorrect resource
- * type, -EINVAL will be returned.
- */
-struct vmw_resource *
-vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
-                                     struct ttm_object_file *tfile,
-                                     uint32_t handle,
-                                     const struct vmw_user_resource_conv
-                                     *converter)
-{
-       struct ttm_base_object *base;
-
-       base = ttm_base_object_noref_lookup(tfile, handle);
-       if (!base)
-               return ERR_PTR(-ESRCH);
-
-       if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
-               ttm_base_object_noref_release();
-               return ERR_PTR(-EINVAL);
-       }
-
-       return converter->base_obj_to_res(base);
-}
-
 /*
  * Helper function that looks either a surface or bo.
  *
index ab125f7..1fb0f71 100644 (file)
@@ -282,7 +282,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
                }
                rc = mp2_ops->get_rep_desc(cl_idx, cl_data->report_descr[i]);
                if (rc)
-                       return rc;
+                       goto cleanup;
                mp2_ops->start(privdata, info);
                status = amd_sfh_wait_for_response
                                (privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
index 4da2f9f..a1d6e08 100644 (file)
@@ -160,7 +160,7 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
                }
                rc = mp2_ops->get_rep_desc(cl_idx, cl_data->report_descr[i]);
                if (rc)
-                       return rc;
+                       goto cleanup;
 
                writel(0, privdata->mmio + AMD_P2C_MSG(0));
                mp2_ops->start(privdata, info);
index 467d789..25ed7b9 100644 (file)
@@ -60,7 +60,6 @@ static int betopff_init(struct hid_device *hid)
        struct list_head *report_list =
                        &hid->report_enum[HID_OUTPUT_REPORT].report_list;
        struct input_dev *dev;
-       int field_count = 0;
        int error;
        int i, j;
 
@@ -86,19 +85,21 @@ static int betopff_init(struct hid_device *hid)
         * -----------------------------------------
         * Do init them with default value.
         */
+       if (report->maxfield < 4) {
+               hid_err(hid, "not enough fields in the report: %d\n",
+                               report->maxfield);
+               return -ENODEV;
+       }
        for (i = 0; i < report->maxfield; i++) {
+               if (report->field[i]->report_count < 1) {
+                       hid_err(hid, "no values in the field\n");
+                       return -ENODEV;
+               }
                for (j = 0; j < report->field[i]->report_count; j++) {
                        report->field[i]->value[j] = 0x00;
-                       field_count++;
                }
        }
 
-       if (field_count < 4) {
-               hid_err(hid, "not enough fields in the report: %d\n",
-                               field_count);
-               return -ENODEV;
-       }
-
        betopff = kzalloc(sizeof(*betopff), GFP_KERNEL);
        if (!betopff)
                return -ENOMEM;
index e8c5e3a..e8b1666 100644 (file)
@@ -344,6 +344,11 @@ static int bigben_probe(struct hid_device *hid,
        }
 
        report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+       if (list_empty(report_list)) {
+               hid_err(hid, "no output report found\n");
+               error = -ENODEV;
+               goto error_hw_stop;
+       }
        bigben->report = list_entry(report_list->next,
                struct hid_report, list);
 
index bd47628..3e18035 100644 (file)
@@ -993,8 +993,8 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
                 * Validating on id 0 means we should examine the first
                 * report in the list.
                 */
-               report = list_entry(
-                               hid->report_enum[type].report_list.next,
+               report = list_first_entry_or_null(
+                               &hid->report_enum[type].report_list,
                                struct hid_report, list);
        } else {
                report = hid->report_enum[type].report_id_hash[id];
index 82713ef..0f8c118 100644 (file)
 #define USB_DEVICE_ID_CH_AXIS_295      0x001c
 
 #define USB_VENDOR_ID_CHERRY           0x046a
-#define USB_DEVICE_ID_CHERRY_MOUSE_000C        0x000c
 #define USB_DEVICE_ID_CHERRY_CYMOTION  0x0023
 #define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR    0x0027
 
 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_G540   0x0075
 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_G640   0x0094
 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01 0x0042
+#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2      0x0905
 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L 0x0935
 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S     0x0909
 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06 0x0078
index f399bf0..27c4089 100644 (file)
@@ -944,6 +944,7 @@ ATTRIBUTE_GROUPS(ps_device);
 
 static int dualsense_get_calibration_data(struct dualsense *ds)
 {
+       struct hid_device *hdev = ds->base.hdev;
        short gyro_pitch_bias, gyro_pitch_plus, gyro_pitch_minus;
        short gyro_yaw_bias, gyro_yaw_plus, gyro_yaw_minus;
        short gyro_roll_bias, gyro_roll_plus, gyro_roll_minus;
@@ -954,6 +955,7 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
        int speed_2x;
        int range_2g;
        int ret = 0;
+       int i;
        uint8_t *buf;
 
        buf = kzalloc(DS_FEATURE_REPORT_CALIBRATION_SIZE, GFP_KERNEL);
@@ -1006,6 +1008,21 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
        ds->gyro_calib_data[2].sens_denom = gyro_roll_plus - gyro_roll_minus;
 
        /*
+        * Sanity check gyro calibration data. This is needed to prevent crashes
+        * during report handling of virtual, clone or broken devices not implementing
+        * calibration data properly.
+        */
+       for (i = 0; i < ARRAY_SIZE(ds->gyro_calib_data); i++) {
+               if (ds->gyro_calib_data[i].sens_denom == 0) {
+                       hid_warn(hdev, "Invalid gyro calibration data for axis (%d), disabling calibration.",
+                                       ds->gyro_calib_data[i].abs_code);
+                       ds->gyro_calib_data[i].bias = 0;
+                       ds->gyro_calib_data[i].sens_numer = DS_GYRO_RANGE;
+                       ds->gyro_calib_data[i].sens_denom = S16_MAX;
+               }
+       }
+
+       /*
         * Set accelerometer calibration and normalization parameters.
         * Data values will be normalized to 1/DS_ACC_RES_PER_G g.
         */
@@ -1027,6 +1044,21 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
        ds->accel_calib_data[2].sens_numer = 2*DS_ACC_RES_PER_G;
        ds->accel_calib_data[2].sens_denom = range_2g;
 
+       /*
+        * Sanity check accelerometer calibration data. This is needed to prevent crashes
+        * during report handling of virtual, clone or broken devices not implementing calibration
+        * data properly.
+        */
+       for (i = 0; i < ARRAY_SIZE(ds->accel_calib_data); i++) {
+               if (ds->accel_calib_data[i].sens_denom == 0) {
+                       hid_warn(hdev, "Invalid accelerometer calibration data for axis (%d), disabling calibration.",
+                                       ds->accel_calib_data[i].abs_code);
+                       ds->accel_calib_data[i].bias = 0;
+                       ds->accel_calib_data[i].sens_numer = DS_ACC_RANGE;
+                       ds->accel_calib_data[i].sens_denom = S16_MAX;
+               }
+       }
+
 err_free:
        kfree(buf);
        return ret;
@@ -1737,6 +1769,7 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
        int speed_2x;
        int range_2g;
        int ret = 0;
+       int i;
        uint8_t *buf;
 
        if (ds4->base.hdev->bus == BUS_USB) {
@@ -1831,6 +1864,21 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
        ds4->gyro_calib_data[2].sens_denom = gyro_roll_plus - gyro_roll_minus;
 
        /*
+        * Sanity check gyro calibration data. This is needed to prevent crashes
+        * during report handling of virtual, clone or broken devices not implementing
+        * calibration data properly.
+        */
+       for (i = 0; i < ARRAY_SIZE(ds4->gyro_calib_data); i++) {
+               if (ds4->gyro_calib_data[i].sens_denom == 0) {
+                       hid_warn(hdev, "Invalid gyro calibration data for axis (%d), disabling calibration.",
+                                       ds4->gyro_calib_data[i].abs_code);
+                       ds4->gyro_calib_data[i].bias = 0;
+                       ds4->gyro_calib_data[i].sens_numer = DS4_GYRO_RANGE;
+                       ds4->gyro_calib_data[i].sens_denom = S16_MAX;
+               }
+       }
+
+       /*
         * Set accelerometer calibration and normalization parameters.
         * Data values will be normalized to 1/DS4_ACC_RES_PER_G g.
         */
@@ -1852,6 +1900,21 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
        ds4->accel_calib_data[2].sens_numer = 2*DS4_ACC_RES_PER_G;
        ds4->accel_calib_data[2].sens_denom = range_2g;
 
+       /*
+        * Sanity check accelerometer calibration data. This is needed to prevent crashes
+        * during report handling of virtual, clone or broken devices not implementing calibration
+        * data properly.
+        */
+       for (i = 0; i < ARRAY_SIZE(ds4->accel_calib_data); i++) {
+               if (ds4->accel_calib_data[i].sens_denom == 0) {
+                       hid_warn(hdev, "Invalid accelerometer calibration data for axis (%d), disabling calibration.",
+                                       ds4->accel_calib_data[i].abs_code);
+                       ds4->accel_calib_data[i].bias = 0;
+                       ds4->accel_calib_data[i].sens_numer = DS4_ACC_RANGE;
+                       ds4->accel_calib_data[i].sens_denom = S16_MAX;
+               }
+       }
+
 err_free:
        kfree(buf);
        return ret;
index 0e9702c..be3ad02 100644 (file)
@@ -54,7 +54,6 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET },
-       { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_MOUSE_000C), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },
index 7fa6fe0..cfbbc39 100644 (file)
@@ -526,6 +526,8 @@ static const struct hid_device_id uclogic_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
                                USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+                               USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
                                USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
                                USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S) },
index cd1233d..3c5eea3 100644 (file)
@@ -1656,6 +1656,8 @@ int uclogic_params_init(struct uclogic_params *params,
        case VID_PID(USB_VENDOR_ID_UGEE,
                     USB_DEVICE_ID_UGEE_PARBLO_A610_PRO):
        case VID_PID(USB_VENDOR_ID_UGEE,
+                    USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2):
+       case VID_PID(USB_VENDOR_ID_UGEE,
                     USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L):
        case VID_PID(USB_VENDOR_ID_UGEE,
                     USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S):
index 40554c8..00046cb 100644 (file)
@@ -104,6 +104,11 @@ void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
        int required_slots = (size / DMA_SLOT_SIZE)
                + 1 * (size % DMA_SLOT_SIZE != 0);
 
+       if (!dev->ishtp_dma_tx_map) {
+               dev_err(dev->devc, "Fail to allocate Tx map\n");
+               return NULL;
+       }
+
        spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
        for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
                free = 1;
@@ -150,6 +155,11 @@ void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
                return;
        }
 
+       if (!dev->ishtp_dma_tx_map) {
+               dev_err(dev->devc, "Fail to allocate Tx map\n");
+               return;
+       }
+
        i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
        spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
        for (j = 0; j < acked_slots; j++) {
index 26b021f..11b1c16 100644 (file)
@@ -2957,15 +2957,18 @@ EXPORT_SYMBOL(__rdma_block_iter_start);
 bool __rdma_block_iter_next(struct ib_block_iter *biter)
 {
        unsigned int block_offset;
+       unsigned int sg_delta;
 
        if (!biter->__sg_nents || !biter->__sg)
                return false;
 
        biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
        block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
-       biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
+       sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
 
-       if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
+       if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
+               biter->__sg_advance += sg_delta;
+       } else {
                biter->__sg_advance = 0;
                biter->__sg = sg_next(biter->__sg);
                biter->__sg_nents--;
index 186d302..b02f2f0 100644 (file)
@@ -23,18 +23,25 @@ static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
 static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
                              const struct mmu_notifier_range *range,
                              unsigned long cur_seq);
+static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
+                                const struct mmu_notifier_range *range,
+                                unsigned long cur_seq);
 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
                            struct tid_group *grp,
                            unsigned int start, u16 count,
                            u32 *tidlist, unsigned int *tididx,
                            unsigned int *pmapped);
-static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
-                             struct tid_group **grp);
+static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo);
+static void __clear_tid_node(struct hfi1_filedata *fd,
+                            struct tid_rb_node *node);
 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
 
 static const struct mmu_interval_notifier_ops tid_mn_ops = {
        .invalidate = tid_rb_invalidate,
 };
+static const struct mmu_interval_notifier_ops tid_cover_ops = {
+       .invalidate = tid_cover_invalidate,
+};
 
 /*
  * Initialize context and file private data needed for Expected
@@ -253,53 +260,65 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
                tididx = 0, mapped, mapped_pages = 0;
        u32 *tidlist = NULL;
        struct tid_user_buf *tidbuf;
+       unsigned long mmu_seq = 0;
 
        if (!PAGE_ALIGNED(tinfo->vaddr))
                return -EINVAL;
+       if (tinfo->length == 0)
+               return -EINVAL;
 
        tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
        if (!tidbuf)
                return -ENOMEM;
 
+       mutex_init(&tidbuf->cover_mutex);
        tidbuf->vaddr = tinfo->vaddr;
        tidbuf->length = tinfo->length;
        tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
                                GFP_KERNEL);
        if (!tidbuf->psets) {
-               kfree(tidbuf);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto fail_release_mem;
+       }
+
+       if (fd->use_mn) {
+               ret = mmu_interval_notifier_insert(
+                       &tidbuf->notifier, current->mm,
+                       tidbuf->vaddr, tidbuf->npages * PAGE_SIZE,
+                       &tid_cover_ops);
+               if (ret)
+                       goto fail_release_mem;
+               mmu_seq = mmu_interval_read_begin(&tidbuf->notifier);
        }
 
        pinned = pin_rcv_pages(fd, tidbuf);
        if (pinned <= 0) {
-               kfree(tidbuf->psets);
-               kfree(tidbuf);
-               return pinned;
+               ret = (pinned < 0) ? pinned : -ENOSPC;
+               goto fail_unpin;
        }
 
        /* Find sets of physically contiguous pages */
        tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
 
-       /*
-        * We don't need to access this under a lock since tid_used is per
-        * process and the same process cannot be in hfi1_user_exp_rcv_clear()
-        * and hfi1_user_exp_rcv_setup() at the same time.
-        */
+       /* Reserve the number of expected tids to be used. */
        spin_lock(&fd->tid_lock);
        if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
                pageset_count = fd->tid_limit - fd->tid_used;
        else
                pageset_count = tidbuf->n_psets;
+       fd->tid_used += pageset_count;
        spin_unlock(&fd->tid_lock);
 
-       if (!pageset_count)
-               goto bail;
+       if (!pageset_count) {
+               ret = -ENOSPC;
+               goto fail_unreserve;
+       }
 
        ngroups = pageset_count / dd->rcv_entries.group_size;
        tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
        if (!tidlist) {
                ret = -ENOMEM;
-               goto nomem;
+               goto fail_unreserve;
        }
 
        tididx = 0;
@@ -395,43 +414,78 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
        }
 unlock:
        mutex_unlock(&uctxt->exp_mutex);
-nomem:
        hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
                  mapped_pages, ret);
-       if (tididx) {
-               spin_lock(&fd->tid_lock);
-               fd->tid_used += tididx;
-               spin_unlock(&fd->tid_lock);
-               tinfo->tidcnt = tididx;
-               tinfo->length = mapped_pages * PAGE_SIZE;
-
-               if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
-                                tidlist, sizeof(tidlist[0]) * tididx)) {
-                       /*
-                        * On failure to copy to the user level, we need to undo
-                        * everything done so far so we don't leak resources.
-                        */
-                       tinfo->tidlist = (unsigned long)&tidlist;
-                       hfi1_user_exp_rcv_clear(fd, tinfo);
-                       tinfo->tidlist = 0;
-                       ret = -EFAULT;
-                       goto bail;
+
+       /* fail if nothing was programmed, set error if none provided */
+       if (tididx == 0) {
+               if (ret >= 0)
+                       ret = -ENOSPC;
+               goto fail_unreserve;
+       }
+
+       /* adjust reserved tid_used to actual count */
+       spin_lock(&fd->tid_lock);
+       fd->tid_used -= pageset_count - tididx;
+       spin_unlock(&fd->tid_lock);
+
+       /* unpin all pages not covered by a TID */
+       unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages,
+                       false);
+
+       if (fd->use_mn) {
+               /* check for an invalidate during setup */
+               bool fail = false;
+
+               mutex_lock(&tidbuf->cover_mutex);
+               fail = mmu_interval_read_retry(&tidbuf->notifier, mmu_seq);
+               mutex_unlock(&tidbuf->cover_mutex);
+
+               if (fail) {
+                       ret = -EBUSY;
+                       goto fail_unprogram;
                }
        }
 
-       /*
-        * If not everything was mapped (due to insufficient RcvArray entries,
-        * for example), unpin all unmapped pages so we can pin them nex time.
-        */
-       if (mapped_pages != pinned)
-               unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
-                               (pinned - mapped_pages), false);
-bail:
+       tinfo->tidcnt = tididx;
+       tinfo->length = mapped_pages * PAGE_SIZE;
+
+       if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
+                        tidlist, sizeof(tidlist[0]) * tididx)) {
+               ret = -EFAULT;
+               goto fail_unprogram;
+       }
+
+       if (fd->use_mn)
+               mmu_interval_notifier_remove(&tidbuf->notifier);
+       kfree(tidbuf->pages);
        kfree(tidbuf->psets);
+       kfree(tidbuf);
        kfree(tidlist);
+       return 0;
+
+fail_unprogram:
+       /* unprogram, unmap, and unpin all allocated TIDs */
+       tinfo->tidlist = (unsigned long)tidlist;
+       hfi1_user_exp_rcv_clear(fd, tinfo);
+       tinfo->tidlist = 0;
+       pinned = 0;             /* nothing left to unpin */
+       pageset_count = 0;      /* nothing left reserved */
+fail_unreserve:
+       spin_lock(&fd->tid_lock);
+       fd->tid_used -= pageset_count;
+       spin_unlock(&fd->tid_lock);
+fail_unpin:
+       if (fd->use_mn)
+               mmu_interval_notifier_remove(&tidbuf->notifier);
+       if (pinned > 0)
+               unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false);
+fail_release_mem:
        kfree(tidbuf->pages);
+       kfree(tidbuf->psets);
        kfree(tidbuf);
-       return ret > 0 ? 0 : ret;
+       kfree(tidlist);
+       return ret;
 }
 
 int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
@@ -452,7 +506,7 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
 
        mutex_lock(&uctxt->exp_mutex);
        for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
-               ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL);
+               ret = unprogram_rcvarray(fd, tidinfo[tididx]);
                if (ret) {
                        hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
                                  ret);
@@ -706,6 +760,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
        }
 
        node->fdata = fd;
+       mutex_init(&node->invalidate_mutex);
        node->phys = page_to_phys(pages[0]);
        node->npages = npages;
        node->rcventry = rcventry;
@@ -721,11 +776,6 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
                        &tid_mn_ops);
                if (ret)
                        goto out_unmap;
-               /*
-                * FIXME: This is in the wrong order, the notifier should be
-                * established before the pages are pinned by pin_rcv_pages.
-                */
-               mmu_interval_read_begin(&node->notifier);
        }
        fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
 
@@ -745,8 +795,7 @@ out_unmap:
        return -EFAULT;
 }
 
-static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
-                             struct tid_group **grp)
+static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo)
 {
        struct hfi1_ctxtdata *uctxt = fd->uctxt;
        struct hfi1_devdata *dd = uctxt->dd;
@@ -769,9 +818,6 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
        if (!node || node->rcventry != (uctxt->expected_base + rcventry))
                return -EBADF;
 
-       if (grp)
-               *grp = node->grp;
-
        if (fd->use_mn)
                mmu_interval_notifier_remove(&node->notifier);
        cacheless_tid_rb_remove(fd, node);
@@ -779,23 +825,34 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
        return 0;
 }
 
-static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
+static void __clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
 {
        struct hfi1_ctxtdata *uctxt = fd->uctxt;
        struct hfi1_devdata *dd = uctxt->dd;
 
+       mutex_lock(&node->invalidate_mutex);
+       if (node->freed)
+               goto done;
+       node->freed = true;
+
        trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
                                 node->npages,
                                 node->notifier.interval_tree.start, node->phys,
                                 node->dma_addr);
 
-       /*
-        * Make sure device has seen the write before we unpin the
-        * pages.
-        */
+       /* Make sure device has seen the write before pages are unpinned */
        hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
 
        unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
+done:
+       mutex_unlock(&node->invalidate_mutex);
+}
+
+static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
+{
+       struct hfi1_ctxtdata *uctxt = fd->uctxt;
+
+       __clear_tid_node(fd, node);
 
        node->grp->used--;
        node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
@@ -854,10 +911,16 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
        if (node->freed)
                return true;
 
+       /* take action only if unmapping */
+       if (range->event != MMU_NOTIFY_UNMAP)
+               return true;
+
        trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt,
                                 node->notifier.interval_tree.start,
                                 node->rcventry, node->npages, node->dma_addr);
-       node->freed = true;
+
+       /* clear the hardware rcvarray entry */
+       __clear_tid_node(fdata, node);
 
        spin_lock(&fdata->invalid_lock);
        if (fdata->invalid_tid_idx < uctxt->expected_count) {
@@ -887,6 +950,23 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
        return true;
 }
 
+static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
+                                const struct mmu_notifier_range *range,
+                                unsigned long cur_seq)
+{
+       struct tid_user_buf *tidbuf =
+               container_of(mni, struct tid_user_buf, notifier);
+
+       /* take action only if unmapping */
+       if (range->event == MMU_NOTIFY_UNMAP) {
+               mutex_lock(&tidbuf->cover_mutex);
+               mmu_interval_set_seq(mni, cur_seq);
+               mutex_unlock(&tidbuf->cover_mutex);
+       }
+
+       return true;
+}
+
 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
                                    struct tid_rb_node *tnode)
 {
index 8c53e41..f8ee997 100644 (file)
@@ -16,6 +16,8 @@ struct tid_pageset {
 };
 
 struct tid_user_buf {
+       struct mmu_interval_notifier notifier;
+       struct mutex cover_mutex;
        unsigned long vaddr;
        unsigned long length;
        unsigned int npages;
@@ -27,6 +29,7 @@ struct tid_user_buf {
 struct tid_rb_node {
        struct mmu_interval_notifier notifier;
        struct hfi1_filedata *fdata;
+       struct mutex invalidate_mutex; /* covers hw removal */
        unsigned long phys;
        struct tid_group *grp;
        u32 rcventry;
index a754fc9..7b41d79 100644 (file)
@@ -98,11 +98,11 @@ enum rxe_device_param {
        RXE_MAX_SRQ                     = DEFAULT_MAX_VALUE - RXE_MIN_SRQ_INDEX,
 
        RXE_MIN_MR_INDEX                = 0x00000001,
-       RXE_MAX_MR_INDEX                = DEFAULT_MAX_VALUE,
-       RXE_MAX_MR                      = DEFAULT_MAX_VALUE - RXE_MIN_MR_INDEX,
-       RXE_MIN_MW_INDEX                = 0x00010001,
-       RXE_MAX_MW_INDEX                = 0x00020000,
-       RXE_MAX_MW                      = 0x00001000,
+       RXE_MAX_MR_INDEX                = DEFAULT_MAX_VALUE >> 1,
+       RXE_MAX_MR                      = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX,
+       RXE_MIN_MW_INDEX                = RXE_MAX_MR_INDEX + 1,
+       RXE_MAX_MW_INDEX                = DEFAULT_MAX_VALUE,
+       RXE_MAX_MW                      = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX,
 
        RXE_MAX_PKT_PER_ACK             = 64,
 
index f50620f..1151c0b 100644 (file)
@@ -23,16 +23,16 @@ static const struct rxe_type_info {
                .size           = sizeof(struct rxe_ucontext),
                .elem_offset    = offsetof(struct rxe_ucontext, elem),
                .min_index      = 1,
-               .max_index      = UINT_MAX,
-               .max_elem       = UINT_MAX,
+               .max_index      = RXE_MAX_UCONTEXT,
+               .max_elem       = RXE_MAX_UCONTEXT,
        },
        [RXE_TYPE_PD] = {
                .name           = "pd",
                .size           = sizeof(struct rxe_pd),
                .elem_offset    = offsetof(struct rxe_pd, elem),
                .min_index      = 1,
-               .max_index      = UINT_MAX,
-               .max_elem       = UINT_MAX,
+               .max_index      = RXE_MAX_PD,
+               .max_elem       = RXE_MAX_PD,
        },
        [RXE_TYPE_AH] = {
                .name           = "ah",
@@ -40,7 +40,7 @@ static const struct rxe_type_info {
                .elem_offset    = offsetof(struct rxe_ah, elem),
                .min_index      = RXE_MIN_AH_INDEX,
                .max_index      = RXE_MAX_AH_INDEX,
-               .max_elem       = RXE_MAX_AH_INDEX - RXE_MIN_AH_INDEX + 1,
+               .max_elem       = RXE_MAX_AH,
        },
        [RXE_TYPE_SRQ] = {
                .name           = "srq",
@@ -49,7 +49,7 @@ static const struct rxe_type_info {
                .cleanup        = rxe_srq_cleanup,
                .min_index      = RXE_MIN_SRQ_INDEX,
                .max_index      = RXE_MAX_SRQ_INDEX,
-               .max_elem       = RXE_MAX_SRQ_INDEX - RXE_MIN_SRQ_INDEX + 1,
+               .max_elem       = RXE_MAX_SRQ,
        },
        [RXE_TYPE_QP] = {
                .name           = "qp",
@@ -58,7 +58,7 @@ static const struct rxe_type_info {
                .cleanup        = rxe_qp_cleanup,
                .min_index      = RXE_MIN_QP_INDEX,
                .max_index      = RXE_MAX_QP_INDEX,
-               .max_elem       = RXE_MAX_QP_INDEX - RXE_MIN_QP_INDEX + 1,
+               .max_elem       = RXE_MAX_QP,
        },
        [RXE_TYPE_CQ] = {
                .name           = "cq",
@@ -66,8 +66,8 @@ static const struct rxe_type_info {
                .elem_offset    = offsetof(struct rxe_cq, elem),
                .cleanup        = rxe_cq_cleanup,
                .min_index      = 1,
-               .max_index      = UINT_MAX,
-               .max_elem       = UINT_MAX,
+               .max_index      = RXE_MAX_CQ,
+               .max_elem       = RXE_MAX_CQ,
        },
        [RXE_TYPE_MR] = {
                .name           = "mr",
@@ -76,7 +76,7 @@ static const struct rxe_type_info {
                .cleanup        = rxe_mr_cleanup,
                .min_index      = RXE_MIN_MR_INDEX,
                .max_index      = RXE_MAX_MR_INDEX,
-               .max_elem       = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX + 1,
+               .max_elem       = RXE_MAX_MR,
        },
        [RXE_TYPE_MW] = {
                .name           = "mw",
@@ -85,7 +85,7 @@ static const struct rxe_type_info {
                .cleanup        = rxe_mw_cleanup,
                .min_index      = RXE_MIN_MW_INDEX,
                .max_index      = RXE_MAX_MW_INDEX,
-               .max_elem       = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX + 1,
+               .max_elem       = RXE_MAX_MW,
        },
 };
 
index ba6781f..df3196f 100644 (file)
@@ -488,7 +488,7 @@ int qnoc_probe(struct platform_device *pdev)
        }
 
 regmap_done:
-       ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
+       ret = devm_clk_bulk_get_optional(dev, qp->num_clks, qp->bus_clks);
        if (ret)
                return ret;
 
index c2903ae..25a1a32 100644 (file)
@@ -33,6 +33,13 @@ static const char * const bus_a0noc_clocks[] = {
        "aggre0_noc_mpu_cfg"
 };
 
+static const char * const bus_a2noc_clocks[] = {
+       "bus",
+       "bus_a",
+       "aggre2_ufs_axi",
+       "ufs_axi"
+};
+
 static const u16 mas_a0noc_common_links[] = {
        MSM8996_SLAVE_A0NOC_SNOC
 };
@@ -1806,7 +1813,7 @@ static const struct regmap_config msm8996_a0noc_regmap_config = {
        .reg_bits       = 32,
        .reg_stride     = 4,
        .val_bits       = 32,
-       .max_register   = 0x9000,
+       .max_register   = 0x6000,
        .fast_io        = true
 };
 
@@ -1830,7 +1837,7 @@ static const struct regmap_config msm8996_a1noc_regmap_config = {
        .reg_bits       = 32,
        .reg_stride     = 4,
        .val_bits       = 32,
-       .max_register   = 0x7000,
+       .max_register   = 0x5000,
        .fast_io        = true
 };
 
@@ -1851,7 +1858,7 @@ static const struct regmap_config msm8996_a2noc_regmap_config = {
        .reg_bits       = 32,
        .reg_stride     = 4,
        .val_bits       = 32,
-       .max_register   = 0xa000,
+       .max_register   = 0x7000,
        .fast_io        = true
 };
 
@@ -1859,6 +1866,8 @@ static const struct qcom_icc_desc msm8996_a2noc = {
        .type = QCOM_ICC_NOC,
        .nodes = a2noc_nodes,
        .num_nodes = ARRAY_SIZE(a2noc_nodes),
+       .clocks = bus_a2noc_clocks,
+       .num_clocks = ARRAY_SIZE(bus_a2noc_clocks),
        .regmap_cfg = &msm8996_a2noc_regmap_config
 };
 
@@ -1877,7 +1886,7 @@ static const struct regmap_config msm8996_bimc_regmap_config = {
        .reg_bits       = 32,
        .reg_stride     = 4,
        .val_bits       = 32,
-       .max_register   = 0x62000,
+       .max_register   = 0x5a000,
        .fast_io        = true
 };
 
@@ -1988,7 +1997,7 @@ static const struct regmap_config msm8996_mnoc_regmap_config = {
        .reg_bits       = 32,
        .reg_stride     = 4,
        .val_bits       = 32,
-       .max_register   = 0x20000,
+       .max_register   = 0x1c000,
        .fast_io        = true
 };
 
index ab16019..f2425b0 100644 (file)
@@ -3858,7 +3858,9 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
 
 static void arm_smmu_device_shutdown(struct platform_device *pdev)
 {
-       arm_smmu_device_remove(pdev);
+       struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
+
+       arm_smmu_device_disable(smmu);
 }
 
 static const struct of_device_id arm_smmu_of_match[] = {
index 719fbca..2ff7a72 100644 (file)
@@ -1316,8 +1316,14 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
 
        switch (cap) {
        case IOMMU_CAP_CACHE_COHERENCY:
-               /* Assume that a coherent TCU implies coherent TBUs */
-               return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
+               /*
+                * It's overwhelmingly the case in practice that when the pagetable
+                * walk interface is connected to a coherent interconnect, all the
+                * translation interfaces are too. Furthermore if the device is
+                * natively coherent, then its translation interface must also be.
+                */
+               return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
+                       device_get_dma_attr(dev) == DEV_DMA_COHERENT;
        case IOMMU_CAP_NOEXEC:
                return true;
        default:
@@ -2185,19 +2191,16 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int arm_smmu_device_remove(struct platform_device *pdev)
+static void arm_smmu_device_shutdown(struct platform_device *pdev)
 {
        struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
 
        if (!smmu)
-               return -ENODEV;
+               return;
 
        if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
                dev_notice(&pdev->dev, "disabling translation\n");
 
-       iommu_device_unregister(&smmu->iommu);
-       iommu_device_sysfs_remove(&smmu->iommu);
-
        arm_smmu_rpm_get(smmu);
        /* Turn the thing off */
        arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
@@ -2209,12 +2212,21 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
                clk_bulk_disable(smmu->num_clks, smmu->clks);
 
        clk_bulk_unprepare(smmu->num_clks, smmu->clks);
-       return 0;
 }
 
-static void arm_smmu_device_shutdown(struct platform_device *pdev)
+static int arm_smmu_device_remove(struct platform_device *pdev)
 {
-       arm_smmu_device_remove(pdev);
+       struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
+
+       if (!smmu)
+               return -ENODEV;
+
+       iommu_device_unregister(&smmu->iommu);
+       iommu_device_sysfs_remove(&smmu->iommu);
+
+       arm_smmu_device_shutdown(pdev);
+
+       return 0;
 }
 
 static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
index de91dd8..5f6a85a 100644 (file)
@@ -3185,14 +3185,16 @@ EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner);
  */
 int iommu_device_claim_dma_owner(struct device *dev, void *owner)
 {
-       struct iommu_group *group = iommu_group_get(dev);
+       struct iommu_group *group;
        int ret = 0;
 
-       if (!group)
-               return -ENODEV;
        if (WARN_ON(!owner))
                return -EINVAL;
 
+       group = iommu_group_get(dev);
+       if (!group)
+               return -ENODEV;
+
        mutex_lock(&group->mutex);
        if (group->owner_cnt) {
                if (group->owner != owner) {
index a44ad92..fe452ce 100644 (file)
@@ -197,7 +197,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 
        curr = __get_cached_rbnode(iovad, limit_pfn);
        curr_iova = to_iova(curr);
-       retry_pfn = curr_iova->pfn_hi + 1;
+       retry_pfn = curr_iova->pfn_hi;
 
 retry:
        do {
@@ -211,7 +211,7 @@ retry:
        if (high_pfn < size || new_pfn < low_pfn) {
                if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
                        high_pfn = limit_pfn;
-                       low_pfn = retry_pfn;
+                       low_pfn = retry_pfn + 1;
                        curr = iova_find_limit(iovad, limit_pfn);
                        curr_iova = to_iova(curr);
                        goto retry;
index 69682ee..ca581ff 100644 (file)
@@ -683,7 +683,7 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev)
        ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
                                     dev_name(&pdev->dev));
        if (ret)
-               return ret;
+               goto out_clk_unprepare;
 
        ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev);
        if (ret)
@@ -698,6 +698,8 @@ out_dev_unreg:
        iommu_device_unregister(&data->iommu);
 out_sysfs_remove:
        iommu_device_sysfs_remove(&data->iommu);
+out_clk_unprepare:
+       clk_disable_unprepare(data->bclk);
        return ret;
 }
 
index 8af6392..02b0240 100644 (file)
@@ -3644,7 +3644,7 @@ EXPORT_SYMBOL_GPL(md_rdev_init);
  */
 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
 {
-       static struct md_rdev *claim_rdev; /* just for claiming the bdev */
+       static struct md_rdev claim_rdev; /* just for claiming the bdev */
        struct md_rdev *rdev;
        sector_t size;
        int err;
@@ -3662,7 +3662,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
 
        rdev->bdev = blkdev_get_by_dev(newdev,
                        FMODE_READ | FMODE_WRITE | FMODE_EXCL,
-                       super_format == -2 ? claim_rdev : rdev);
+                       super_format == -2 ? &claim_rdev : rdev);
        if (IS_ERR(rdev->bdev)) {
                pr_warn("md: could not open device unknown-block(%u,%u).\n",
                        MAJOR(newdev), MINOR(newdev));
index 9c49d00..ea6e9e1 100644 (file)
@@ -47,19 +47,17 @@ static int atmel_ramc_probe(struct platform_device *pdev)
        caps = of_device_get_match_data(&pdev->dev);
 
        if (caps->has_ddrck) {
-               clk = devm_clk_get(&pdev->dev, "ddrck");
+               clk = devm_clk_get_enabled(&pdev->dev, "ddrck");
                if (IS_ERR(clk))
                        return PTR_ERR(clk);
-               clk_prepare_enable(clk);
        }
 
        if (caps->has_mpddr_clk) {
-               clk = devm_clk_get(&pdev->dev, "mpddr");
+               clk = devm_clk_get_enabled(&pdev->dev, "mpddr");
                if (IS_ERR(clk)) {
                        pr_err("AT91 RAMC: couldn't get mpddr clock\n");
                        return PTR_ERR(clk);
                }
-               clk_prepare_enable(clk);
        }
 
        return 0;
index 8450638..efc6c08 100644 (file)
@@ -280,10 +280,9 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
        if (IS_ERR(devbus->base))
                return PTR_ERR(devbus->base);
 
-       clk = devm_clk_get(&pdev->dev, NULL);
+       clk = devm_clk_get_enabled(&pdev->dev, NULL);
        if (IS_ERR(clk))
                return PTR_ERR(clk);
-       clk_prepare_enable(clk);
 
        /*
         * Obtain clock period in picoseconds,
index 57d9f91..d78f73d 100644 (file)
@@ -1918,7 +1918,8 @@ int gpmc_cs_program_settings(int cs, struct gpmc_settings *p)
                }
        }
 
-       if (p->wait_pin > gpmc_nr_waitpins) {
+       if (p->wait_pin != GPMC_WAITPIN_INVALID &&
+           p->wait_pin > gpmc_nr_waitpins) {
                pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin);
                return -EINVAL;
        }
index 62477e5..7bb73f0 100644 (file)
 #define MC_SID_STREAMID_SECURITY_WRITE_ACCESS_DISABLED BIT(16)
 #define MC_SID_STREAMID_SECURITY_OVERRIDE BIT(8)
 
-static void tegra186_mc_program_sid(struct tegra_mc *mc)
-{
-       unsigned int i;
-
-       for (i = 0; i < mc->soc->num_clients; i++) {
-               const struct tegra_mc_client *client = &mc->soc->clients[i];
-               u32 override, security;
-
-               override = readl(mc->regs + client->regs.sid.override);
-               security = readl(mc->regs + client->regs.sid.security);
-
-               dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
-                       client->name, override, security);
-
-               dev_dbg(mc->dev, "setting SID %u for %s\n", client->sid,
-                       client->name);
-               writel(client->sid, mc->regs + client->regs.sid.override);
-
-               override = readl(mc->regs + client->regs.sid.override);
-               security = readl(mc->regs + client->regs.sid.security);
-
-               dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
-                       client->name, override, security);
-       }
-}
-
 static int tegra186_mc_probe(struct tegra_mc *mc)
 {
        struct platform_device *pdev = to_platform_device(mc->dev);
@@ -85,8 +59,6 @@ populate:
        if (err < 0)
                return err;
 
-       tegra186_mc_program_sid(mc);
-
        return 0;
 }
 
@@ -95,13 +67,6 @@ static void tegra186_mc_remove(struct tegra_mc *mc)
        of_platform_depopulate(mc->dev);
 }
 
-static int tegra186_mc_resume(struct tegra_mc *mc)
-{
-       tegra186_mc_program_sid(mc);
-
-       return 0;
-}
-
 #if IS_ENABLED(CONFIG_IOMMU_API)
 static void tegra186_mc_client_sid_override(struct tegra_mc *mc,
                                            const struct tegra_mc_client *client,
@@ -173,7 +138,6 @@ static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev)
 const struct tegra_mc_ops tegra186_mc_ops = {
        .probe = tegra186_mc_probe,
        .remove = tegra186_mc_remove,
-       .resume = tegra186_mc_resume,
        .probe_device = tegra186_mc_probe_device,
        .handle_irq = tegra30_mc_handle_irq,
 };
index c9902a1..5310606 100644 (file)
@@ -321,7 +321,7 @@ static void fastrpc_free_map(struct kref *ref)
                        perm.vmid = QCOM_SCM_VMID_HLOS;
                        perm.perm = QCOM_SCM_PERM_RWX;
                        err = qcom_scm_assign_mem(map->phys, map->size,
-                               &(map->fl->cctx->vmperms[0].vmid), &perm, 1);
+                               &map->fl->cctx->perms, &perm, 1);
                        if (err) {
                                dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
                                                map->phys, map->size, err);
@@ -334,6 +334,13 @@ static void fastrpc_free_map(struct kref *ref)
                dma_buf_put(map->buf);
        }
 
+       if (map->fl) {
+               spin_lock(&map->fl->lock);
+               list_del(&map->node);
+               spin_unlock(&map->fl->lock);
+               map->fl = NULL;
+       }
+
        kfree(map);
 }
 
@@ -343,38 +350,41 @@ static void fastrpc_map_put(struct fastrpc_map *map)
                kref_put(&map->refcount, fastrpc_free_map);
 }
 
-static void fastrpc_map_get(struct fastrpc_map *map)
+static int fastrpc_map_get(struct fastrpc_map *map)
 {
-       if (map)
-               kref_get(&map->refcount);
+       if (!map)
+               return -ENOENT;
+
+       return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT;
 }
 
 
 static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
-                           struct fastrpc_map **ppmap)
+                           struct fastrpc_map **ppmap, bool take_ref)
 {
+       struct fastrpc_session_ctx *sess = fl->sctx;
        struct fastrpc_map *map = NULL;
+       int ret = -ENOENT;
 
-       mutex_lock(&fl->mutex);
+       spin_lock(&fl->lock);
        list_for_each_entry(map, &fl->maps, node) {
-               if (map->fd == fd) {
-                       *ppmap = map;
-                       mutex_unlock(&fl->mutex);
-                       return 0;
-               }
-       }
-       mutex_unlock(&fl->mutex);
-
-       return -ENOENT;
-}
+               if (map->fd != fd)
+                       continue;
 
-static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
-                           struct fastrpc_map **ppmap)
-{
-       int ret = fastrpc_map_lookup(fl, fd, ppmap);
+               if (take_ref) {
+                       ret = fastrpc_map_get(map);
+                       if (ret) {
+                               dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n",
+                                       __func__, fd, ret);
+                               break;
+                       }
+               }
 
-       if (!ret)
-               fastrpc_map_get(*ppmap);
+               *ppmap = map;
+               ret = 0;
+               break;
+       }
+       spin_unlock(&fl->lock);
 
        return ret;
 }
@@ -746,7 +756,7 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
        struct fastrpc_map *map = NULL;
        int err = 0;
 
-       if (!fastrpc_map_find(fl, fd, ppmap))
+       if (!fastrpc_map_lookup(fl, fd, ppmap, true))
                return 0;
 
        map = kzalloc(sizeof(*map), GFP_KERNEL);
@@ -788,10 +798,8 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
                 * If subsystem VMIDs are defined in DTSI, then do
                 * hyp_assign from HLOS to those VM(s)
                 */
-               unsigned int perms = BIT(QCOM_SCM_VMID_HLOS);
-
                map->attr = attr;
-               err = qcom_scm_assign_mem(map->phys, (u64)map->size, &perms,
+               err = qcom_scm_assign_mem(map->phys, (u64)map->size, &fl->cctx->perms,
                                fl->cctx->vmperms, fl->cctx->vmcount);
                if (err) {
                        dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
@@ -1070,7 +1078,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
        for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
                if (!fdlist[i])
                        break;
-               if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
+               if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
                        fastrpc_map_put(mmap);
        }
 
@@ -1258,10 +1266,9 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
 
                /* Map if we have any heap VMIDs associated with this ADSP Static Process. */
                if (fl->cctx->vmcount) {
-                       unsigned int perms = BIT(QCOM_SCM_VMID_HLOS);
-
                        err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
-                                                       (u64)fl->cctx->remote_heap->size, &perms,
+                                                       (u64)fl->cctx->remote_heap->size,
+                                                       &fl->cctx->perms,
                                                        fl->cctx->vmperms, fl->cctx->vmcount);
                        if (err) {
                                dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
@@ -1309,7 +1316,7 @@ err_invoke:
                perm.perm = QCOM_SCM_PERM_RWX;
                err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
                                                (u64)fl->cctx->remote_heap->size,
-                                               &(fl->cctx->vmperms[0].vmid), &perm, 1);
+                                               &fl->cctx->perms, &perm, 1);
                if (err)
                        dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
                                fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
@@ -1433,12 +1440,7 @@ err_invoke:
        fl->init_mem = NULL;
        fastrpc_buf_free(imem);
 err_alloc:
-       if (map) {
-               spin_lock(&fl->lock);
-               list_del(&map->node);
-               spin_unlock(&fl->lock);
-               fastrpc_map_put(map);
-       }
+       fastrpc_map_put(map);
 err:
        kfree(args);
 
@@ -1514,10 +1516,8 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
                fastrpc_context_put(ctx);
        }
 
-       list_for_each_entry_safe(map, m, &fl->maps, node) {
-               list_del(&map->node);
+       list_for_each_entry_safe(map, m, &fl->maps, node)
                fastrpc_map_put(map);
-       }
 
        list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
                list_del(&buf->node);
@@ -1894,12 +1894,11 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
        /* Add memory to static PD pool, protection thru hypervisor */
        if (req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
                struct qcom_scm_vmperm perm;
-               int err = 0;
 
                perm.vmid = QCOM_SCM_VMID_HLOS;
                perm.perm = QCOM_SCM_PERM_RWX;
                err = qcom_scm_assign_mem(buf->phys, buf->size,
-                       &(fl->cctx->vmperms[0].vmid), &perm, 1);
+                       &fl->cctx->perms, &perm, 1);
                if (err) {
                        dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
                                        buf->phys, buf->size, err);
index 4a08b62..a81b890 100644 (file)
@@ -702,13 +702,15 @@ void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
        if (cl->state == MEI_FILE_UNINITIALIZED) {
                ret = mei_cl_link(cl);
                if (ret)
-                       goto out;
+                       goto notlinked;
                /* update pointers */
                cl->cldev = cldev;
        }
 
        ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
-out:
+       if (ret)
+               mei_cl_unlink(cl);
+notlinked:
        mutex_unlock(&bus->device_lock);
        if (ret)
                return ERR_PTR(ret);
@@ -758,7 +760,7 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
        if (cl->state == MEI_FILE_UNINITIALIZED) {
                ret = mei_cl_link(cl);
                if (ret)
-                       goto out;
+                       goto notlinked;
                /* update pointers */
                cl->cldev = cldev;
        }
@@ -785,6 +787,9 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
        }
 
 out:
+       if (ret)
+               mei_cl_unlink(cl);
+notlinked:
        mutex_unlock(&bus->device_lock);
 
        return ret;
@@ -1277,7 +1282,6 @@ static void mei_cl_bus_dev_release(struct device *dev)
        mei_cl_flush_queues(cldev->cl, NULL);
        mei_me_cl_put(cldev->me_cl);
        mei_dev_bus_put(cldev->bus);
-       mei_cl_unlink(cldev->cl);
        kfree(cldev->cl);
        kfree(cldev);
 }
index 99966cd..bdc65d5 100644 (file)
 
 #define MEI_DEV_ID_RPL_S      0x7A68  /* Raptor Lake Point S */
 
+#define MEI_DEV_ID_MTL_M      0x7E70  /* Meteor Lake Point M */
+
 /*
  * MEI HW Section
  */
index 704cd0c..5bf0d50 100644 (file)
@@ -118,6 +118,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
 
+       {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
+
        /* required last entry */
        {0, }
 };
index aa7b05d..4f8d962 100644 (file)
@@ -56,8 +56,6 @@ struct vmci_guest_device {
 
        bool exclusive_vectors;
 
-       struct tasklet_struct datagram_tasklet;
-       struct tasklet_struct bm_tasklet;
        struct wait_queue_head inout_wq;
 
        void *data_buffer;
@@ -304,9 +302,8 @@ static int vmci_check_host_caps(struct pci_dev *pdev)
  * This function assumes that it has exclusive access to the data
  * in register(s) for the duration of the call.
  */
-static void vmci_dispatch_dgs(unsigned long data)
+static void vmci_dispatch_dgs(struct vmci_guest_device *vmci_dev)
 {
-       struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data;
        u8 *dg_in_buffer = vmci_dev->data_buffer;
        struct vmci_datagram *dg;
        size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE;
@@ -465,10 +462,8 @@ static void vmci_dispatch_dgs(unsigned long data)
  * Scans the notification bitmap for raised flags, clears them
  * and handles the notifications.
  */
-static void vmci_process_bitmap(unsigned long data)
+static void vmci_process_bitmap(struct vmci_guest_device *dev)
 {
-       struct vmci_guest_device *dev = (struct vmci_guest_device *)data;
-
        if (!dev->notification_bitmap) {
                dev_dbg(dev->dev, "No bitmap present in %s\n", __func__);
                return;
@@ -486,13 +481,13 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev)
        struct vmci_guest_device *dev = _dev;
 
        /*
-        * If we are using MSI-X with exclusive vectors then we simply schedule
-        * the datagram tasklet, since we know the interrupt was meant for us.
+        * If we are using MSI-X with exclusive vectors then we simply call
+        * vmci_dispatch_dgs(), since we know the interrupt was meant for us.
         * Otherwise we must read the ICR to determine what to do.
         */
 
        if (dev->exclusive_vectors) {
-               tasklet_schedule(&dev->datagram_tasklet);
+               vmci_dispatch_dgs(dev);
        } else {
                unsigned int icr;
 
@@ -502,12 +497,12 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev)
                        return IRQ_NONE;
 
                if (icr & VMCI_ICR_DATAGRAM) {
-                       tasklet_schedule(&dev->datagram_tasklet);
+                       vmci_dispatch_dgs(dev);
                        icr &= ~VMCI_ICR_DATAGRAM;
                }
 
                if (icr & VMCI_ICR_NOTIFICATION) {
-                       tasklet_schedule(&dev->bm_tasklet);
+                       vmci_process_bitmap(dev);
                        icr &= ~VMCI_ICR_NOTIFICATION;
                }
 
@@ -536,7 +531,7 @@ static irqreturn_t vmci_interrupt_bm(int irq, void *_dev)
        struct vmci_guest_device *dev = _dev;
 
        /* For MSI-X we can just assume it was meant for us. */
-       tasklet_schedule(&dev->bm_tasklet);
+       vmci_process_bitmap(dev);
 
        return IRQ_HANDLED;
 }
@@ -638,10 +633,6 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
        vmci_dev->iobase = iobase;
        vmci_dev->mmio_base = mmio_base;
 
-       tasklet_init(&vmci_dev->datagram_tasklet,
-                    vmci_dispatch_dgs, (unsigned long)vmci_dev);
-       tasklet_init(&vmci_dev->bm_tasklet,
-                    vmci_process_bitmap, (unsigned long)vmci_dev);
        init_waitqueue_head(&vmci_dev->inout_wq);
 
        if (mmio_base != NULL) {
@@ -808,8 +799,9 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
         * Request IRQ for legacy or MSI interrupts, or for first
         * MSI-X vector.
         */
-       error = request_irq(pci_irq_vector(pdev, 0), vmci_interrupt,
-                           IRQF_SHARED, KBUILD_MODNAME, vmci_dev);
+       error = request_threaded_irq(pci_irq_vector(pdev, 0), NULL,
+                                    vmci_interrupt, IRQF_SHARED,
+                                    KBUILD_MODNAME, vmci_dev);
        if (error) {
                dev_err(&pdev->dev, "Irq %u in use: %d\n",
                        pci_irq_vector(pdev, 0), error);
@@ -823,9 +815,9 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
         * between the vectors.
         */
        if (vmci_dev->exclusive_vectors) {
-               error = request_irq(pci_irq_vector(pdev, 1),
-                                   vmci_interrupt_bm, 0, KBUILD_MODNAME,
-                                   vmci_dev);
+               error = request_threaded_irq(pci_irq_vector(pdev, 1), NULL,
+                                            vmci_interrupt_bm, 0,
+                                            KBUILD_MODNAME, vmci_dev);
                if (error) {
                        dev_err(&pdev->dev,
                                "Failed to allocate irq %u: %d\n",
@@ -833,9 +825,11 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
                        goto err_free_irq;
                }
                if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) {
-                       error = request_irq(pci_irq_vector(pdev, 2),
-                                           vmci_interrupt_dma_datagram,
-                                           0, KBUILD_MODNAME, vmci_dev);
+                       error = request_threaded_irq(pci_irq_vector(pdev, 2),
+                                                    NULL,
+                                                   vmci_interrupt_dma_datagram,
+                                                    0, KBUILD_MODNAME,
+                                                    vmci_dev);
                        if (error) {
                                dev_err(&pdev->dev,
                                        "Failed to allocate irq %u: %d\n",
@@ -871,8 +865,6 @@ err_free_bm_irq:
 
 err_free_irq:
        free_irq(pci_irq_vector(pdev, 0), vmci_dev);
-       tasklet_kill(&vmci_dev->datagram_tasklet);
-       tasklet_kill(&vmci_dev->bm_tasklet);
 
 err_disable_msi:
        pci_free_irq_vectors(pdev);
@@ -943,9 +935,6 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
        free_irq(pci_irq_vector(pdev, 0), vmci_dev);
        pci_free_irq_vectors(pdev);
 
-       tasklet_kill(&vmci_dev->datagram_tasklet);
-       tasklet_kill(&vmci_dev->bm_tasklet);
-
        if (vmci_dev->notification_bitmap) {
                /*
                 * The device reset above cleared the bitmap state of the
index 89ef0c8..9e73c34 100644 (file)
 #define ESDHC_TUNING_START_TAP_DEFAULT 0x1
 #define ESDHC_TUNING_START_TAP_MASK    0x7f
 #define ESDHC_TUNING_CMD_CRC_CHECK_DISABLE     (1 << 7)
+#define ESDHC_TUNING_STEP_DEFAULT      0x1
 #define ESDHC_TUNING_STEP_MASK         0x00070000
 #define ESDHC_TUNING_STEP_SHIFT                16
 
@@ -1368,7 +1369,7 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
        struct cqhci_host *cq_host = host->mmc->cqe_private;
-       int tmp;
+       u32 tmp;
 
        if (esdhc_is_usdhc(imx_data)) {
                /*
@@ -1423,17 +1424,24 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
 
                if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
                        tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL);
-                       tmp |= ESDHC_STD_TUNING_EN |
-                               ESDHC_TUNING_START_TAP_DEFAULT;
-                       if (imx_data->boarddata.tuning_start_tap) {
-                               tmp &= ~ESDHC_TUNING_START_TAP_MASK;
+                       tmp |= ESDHC_STD_TUNING_EN;
+
+                       /*
+                        * ROM code or bootloader may config the start tap
+                        * and step, unmask them first.
+                        */
+                       tmp &= ~(ESDHC_TUNING_START_TAP_MASK | ESDHC_TUNING_STEP_MASK);
+                       if (imx_data->boarddata.tuning_start_tap)
                                tmp |= imx_data->boarddata.tuning_start_tap;
-                       }
+                       else
+                               tmp |= ESDHC_TUNING_START_TAP_DEFAULT;
 
                        if (imx_data->boarddata.tuning_step) {
-                               tmp &= ~ESDHC_TUNING_STEP_MASK;
                                tmp |= imx_data->boarddata.tuning_step
                                        << ESDHC_TUNING_STEP_SHIFT;
+                       } else {
+                               tmp |= ESDHC_TUNING_STEP_DEFAULT
+                                       << ESDHC_TUNING_STEP_SHIFT;
                        }
 
                        /* Disable the CMD CRC check for tuning, if not, need to
index b16e12e..3db9f32 100644 (file)
@@ -1492,9 +1492,11 @@ static int sunxi_mmc_remove(struct platform_device *pdev)
        struct sunxi_mmc_host *host = mmc_priv(mmc);
 
        mmc_remove_host(mmc);
-       pm_runtime_force_suspend(&pdev->dev);
-       disable_irq(host->irq);
-       sunxi_mmc_disable(host);
+       pm_runtime_disable(&pdev->dev);
+       if (!pm_runtime_status_suspended(&pdev->dev)) {
+               disable_irq(host->irq);
+               sunxi_mmc_disable(host);
+       }
        dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
        mmc_free_host(mmc);
 
index c26755f..f6f3b43 100644 (file)
@@ -35,12 +35,13 @@ config NET_DSA_LANTIQ_GSWIP
          the xrx200 / VR9 SoC.
 
 config NET_DSA_MT7530
-       tristate "MediaTek MT753x and MT7621 Ethernet switch support"
+       tristate "MediaTek MT7530 and MT7531 Ethernet switch support"
        select NET_DSA_TAG_MTK
        select MEDIATEK_GE_PHY
        help
-         This enables support for the MediaTek MT7530, MT7531, and MT7621
-         Ethernet switch chips.
+         This enables support for the MediaTek MT7530 and MT7531 Ethernet
+         switch chips. Multi-chip module MT7530 in MT7621AT, MT7621DAT,
+         MT7621ST and MT7623AI SoCs is supported.
 
 config NET_DSA_MV88E6060
        tristate "Marvell 88E6060 ethernet switch chip support"
index 47b54ec..6178a96 100644 (file)
@@ -540,10 +540,10 @@ int ksz9477_fdb_del(struct ksz_device *dev, int port,
                ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
 
                /* clear forwarding port */
-               alu_table[2] &= ~BIT(port);
+               alu_table[1] &= ~BIT(port);
 
                /* if there is no port to forward, clear table */
-               if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
+               if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
                        alu_table[0] = 0;
                        alu_table[1] = 0;
                        alu_table[2] = 0;
index c1a633c..e315f66 100644 (file)
@@ -104,7 +104,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
        },
        {
                .compatible = "microchip,ksz8563",
-               .data = &ksz_switch_chips[KSZ9893]
+               .data = &ksz_switch_chips[KSZ8563]
        },
        {
                .compatible = "microchip,ksz9567",
index 0805f24..c26b859 100644 (file)
@@ -356,7 +356,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
 
        if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) ||
            (port_priv->flags & IFF_BROADCAST && rxb->pkt_type == PACKET_BROADCAST))
-               rxb->offload_fwd_mark = 1;
+               rxb->offload_fwd_mark = port_priv->priv->forwarding;
 
        netif_rx(rxb);
 
index cbf17fc..ec57312 100644 (file)
@@ -3969,7 +3969,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
                test_info->timeout = HWRM_CMD_TIMEOUT;
        for (i = 0; i < bp->num_tests; i++) {
                char *str = test_info->string[i];
-               char *fw_str = resp->test0_name + i * 32;
+               char *fw_str = resp->test_name[i];
 
                if (i == BNXT_MACLPBK_TEST_IDX) {
                        strcpy(str, "Mac loopback test (offline)");
@@ -3980,14 +3980,9 @@ void bnxt_ethtool_init(struct bnxt *bp)
                } else if (i == BNXT_IRQ_TEST_IDX) {
                        strcpy(str, "Interrupt_test (offline)");
                } else {
-                       strscpy(str, fw_str, ETH_GSTRING_LEN);
-                       strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
-                       if (test_info->offline_mask & (1 << i))
-                               strncat(str, " (offline)",
-                                       ETH_GSTRING_LEN - strlen(str));
-                       else
-                               strncat(str, " (online)",
-                                       ETH_GSTRING_LEN - strlen(str));
+                       snprintf(str, ETH_GSTRING_LEN, "%s test (%s)",
+                                fw_str, test_info->offline_mask & (1 << i) ?
+                                       "offline" : "online");
                }
        }
 
index 2686a71..a540887 100644 (file)
@@ -10249,14 +10249,7 @@ struct hwrm_selftest_qlist_output {
        u8      unused_0;
        __le16  test_timeout;
        u8      unused_1[2];
-       char    test0_name[32];
-       char    test1_name[32];
-       char    test2_name[32];
-       char    test3_name[32];
-       char    test4_name[32];
-       char    test5_name[32];
-       char    test6_name[32];
-       char    test7_name[32];
+       char    test_name[8][32];
        u8      eyescope_target_BER_support;
        #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED  0x0UL
        #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED  0x1UL
index 59debdc..5874729 100644 (file)
@@ -11166,7 +11166,7 @@ static void tg3_reset_task(struct work_struct *work)
        rtnl_lock();
        tg3_full_lock(tp, 0);
 
-       if (!netif_running(tp->dev)) {
+       if (tp->pcierr_recovery || !netif_running(tp->dev)) {
                tg3_flag_clear(tp, RESET_TASK_PENDING);
                tg3_full_unlock(tp);
                rtnl_unlock();
@@ -18101,6 +18101,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
 
        netdev_info(netdev, "PCI I/O error detected\n");
 
+       /* Want to make sure that the reset task doesn't run */
+       tg3_reset_task_cancel(tp);
+
        rtnl_lock();
 
        /* Could be second call or maybe we don't have netdev yet */
@@ -18117,9 +18120,6 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
 
        tg3_timer_stop(tp);
 
-       /* Want to make sure that the reset task doesn't run */
-       tg3_reset_task_cancel(tp);
-
        netif_device_detach(netdev);
 
        /* Clean up software state, even if MMIO is blocked */
index bf0190e..00e2108 100644 (file)
@@ -450,7 +450,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
                /* ring full, shall not happen because queue is stopped if full
                 * below
                 */
-               netif_stop_queue(tx->adapter->netdev);
+               netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
 
                spin_unlock_irqrestore(&tx->lock, flags);
 
@@ -493,7 +493,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
 
        if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
                /* ring can get full with next frame */
-               netif_stop_queue(tx->adapter->netdev);
+               netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
        }
 
        spin_unlock_irqrestore(&tx->lock, flags);
@@ -503,11 +503,14 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
 
 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
 {
+       struct tsnep_tx_entry *entry;
+       struct netdev_queue *nq;
        unsigned long flags;
        int budget = 128;
-       struct tsnep_tx_entry *entry;
-       int count;
        int length;
+       int count;
+
+       nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
 
        spin_lock_irqsave(&tx->lock, flags);
 
@@ -564,8 +567,8 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
        } while (likely(budget));
 
        if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
-           netif_queue_stopped(tx->adapter->netdev)) {
-               netif_wake_queue(tx->adapter->netdev);
+           netif_tx_queue_stopped(nq)) {
+               netif_tx_wake_queue(nq);
        }
 
        spin_unlock_irqrestore(&tx->lock, flags);
index 3f80329..027fff9 100644 (file)
@@ -2410,6 +2410,9 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
 
        cleaned = qman_p_poll_dqrr(np->p, budget);
 
+       if (np->xdp_act & XDP_REDIRECT)
+               xdp_do_flush();
+
        if (cleaned < budget) {
                napi_complete_done(napi, cleaned);
                qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
@@ -2417,9 +2420,6 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
                qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
        }
 
-       if (np->xdp_act & XDP_REDIRECT)
-               xdp_do_flush();
-
        return cleaned;
 }
 
index 0c35abb..2e79d18 100644 (file)
@@ -1993,10 +1993,15 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
                if (rx_cleaned >= budget ||
                    txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
                        work_done = budget;
+                       if (ch->xdp.res & XDP_REDIRECT)
+                               xdp_do_flush();
                        goto out;
                }
        } while (store_cleaned);
 
+       if (ch->xdp.res & XDP_REDIRECT)
+               xdp_do_flush();
+
        /* Update NET DIM with the values for this CDAN */
        dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
                                ch->stats.bytes_per_cdan);
@@ -2032,9 +2037,7 @@ out:
                txc_fq->dq_bytes = 0;
        }
 
-       if (ch->xdp.res & XDP_REDIRECT)
-               xdp_do_flush_map();
-       else if (rx_cleaned && ch->xdp.res & XDP_TX)
+       if (rx_cleaned && ch->xdp.res & XDP_TX)
                dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
 
        return work_done;
index 644f3c9..2341597 100644 (file)
@@ -3191,7 +3191,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
        for (q = 0; q < fep->num_rx_queues; q++) {
                rxq = fep->rx_queue[q];
                for (i = 0; i < rxq->bd.ring_size; i++)
-                       page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page);
+                       page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
 
                for (i = 0; i < XDP_STATS_TOTAL; i++)
                        rxq->stats[i] = 0;
index 0d1bab4..2a9f1ee 100644 (file)
@@ -249,6 +249,7 @@ struct iavf_cloud_filter {
 
 /* board specific private data structure */
 struct iavf_adapter {
+       struct workqueue_struct *wq;
        struct work_struct reset_task;
        struct work_struct adminq_task;
        struct delayed_work client_task;
@@ -459,7 +460,6 @@ struct iavf_device {
 
 /* needed by iavf_ethtool.c */
 extern char iavf_driver_name[];
-extern struct workqueue_struct *iavf_wq;
 
 static inline const char *iavf_state_str(enum iavf_state_t state)
 {
index d79ead5..6f171d1 100644 (file)
@@ -532,7 +532,7 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
        if (changed_flags & IAVF_FLAG_LEGACY_RX) {
                if (netif_running(netdev)) {
                        adapter->flags |= IAVF_FLAG_RESET_NEEDED;
-                       queue_work(iavf_wq, &adapter->reset_task);
+                       queue_work(adapter->wq, &adapter->reset_task);
                }
        }
 
@@ -672,7 +672,7 @@ static int iavf_set_ringparam(struct net_device *netdev,
 
        if (netif_running(netdev)) {
                adapter->flags |= IAVF_FLAG_RESET_NEEDED;
-               queue_work(iavf_wq, &adapter->reset_task);
+               queue_work(adapter->wq, &adapter->reset_task);
        }
 
        return 0;
@@ -1433,7 +1433,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
        adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
        spin_unlock_bh(&adapter->fdir_fltr_lock);
 
-       mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+       mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
 
 ret:
        if (err && fltr)
@@ -1474,7 +1474,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
        spin_unlock_bh(&adapter->fdir_fltr_lock);
 
        if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
-               mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+               mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
 
        return err;
 }
@@ -1658,7 +1658,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
        spin_unlock_bh(&adapter->adv_rss_lock);
 
        if (!err)
-               mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+               mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
 
        mutex_unlock(&adapter->crit_lock);
 
index adc02ad..4b09785 100644 (file)
@@ -49,7 +49,6 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver")
 MODULE_LICENSE("GPL v2");
 
 static const struct net_device_ops iavf_netdev_ops;
-struct workqueue_struct *iavf_wq;
 
 int iavf_status_to_errno(enum iavf_status status)
 {
@@ -277,7 +276,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
        if (!(adapter->flags &
              (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
                adapter->flags |= IAVF_FLAG_RESET_NEEDED;
-               queue_work(iavf_wq, &adapter->reset_task);
+               queue_work(adapter->wq, &adapter->reset_task);
        }
 }
 
@@ -291,7 +290,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
 void iavf_schedule_request_stats(struct iavf_adapter *adapter)
 {
        adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
-       mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+       mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
 }
 
 /**
@@ -411,7 +410,7 @@ static irqreturn_t iavf_msix_aq(int irq, void *data)
 
        if (adapter->state != __IAVF_REMOVE)
                /* schedule work on the private workqueue */
-               queue_work(iavf_wq, &adapter->adminq_task);
+               queue_work(adapter->wq, &adapter->adminq_task);
 
        return IRQ_HANDLED;
 }
@@ -1034,7 +1033,7 @@ int iavf_replace_primary_mac(struct iavf_adapter *adapter,
 
        /* schedule the watchdog task to immediately process the request */
        if (f) {
-               queue_work(iavf_wq, &adapter->watchdog_task.work);
+               mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
                return 0;
        }
        return -ENOMEM;
@@ -1257,7 +1256,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
        adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
        if (CLIENT_ENABLED(adapter))
                adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
-       mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+       mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
 }
 
 /**
@@ -1414,7 +1413,7 @@ void iavf_down(struct iavf_adapter *adapter)
                adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
        }
 
-       mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+       mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
 }
 
 /**
@@ -2248,7 +2247,7 @@ iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
 
        if (aq_required) {
                adapter->aq_required |= aq_required;
-               mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+               mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
        }
 }
 
@@ -2693,6 +2692,15 @@ static void iavf_watchdog_task(struct work_struct *work)
                goto restart_watchdog;
        }
 
+       if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
+           adapter->netdev_registered &&
+           !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) &&
+           rtnl_trylock()) {
+               netdev_update_features(adapter->netdev);
+               rtnl_unlock();
+               adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
+       }
+
        if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
                iavf_change_state(adapter, __IAVF_COMM_FAILED);
 
@@ -2700,7 +2708,7 @@ static void iavf_watchdog_task(struct work_struct *work)
                adapter->aq_required = 0;
                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
                mutex_unlock(&adapter->crit_lock);
-               queue_work(iavf_wq, &adapter->reset_task);
+               queue_work(adapter->wq, &adapter->reset_task);
                return;
        }
 
@@ -2708,31 +2716,31 @@ static void iavf_watchdog_task(struct work_struct *work)
        case __IAVF_STARTUP:
                iavf_startup(adapter);
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(30));
                return;
        case __IAVF_INIT_VERSION_CHECK:
                iavf_init_version_check(adapter);
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(30));
                return;
        case __IAVF_INIT_GET_RESOURCES:
                iavf_init_get_resources(adapter);
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(1));
                return;
        case __IAVF_INIT_EXTENDED_CAPS:
                iavf_init_process_extended_caps(adapter);
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(1));
                return;
        case __IAVF_INIT_CONFIG_ADAPTER:
                iavf_init_config_adapter(adapter);
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(1));
                return;
        case __IAVF_INIT_FAILED:
@@ -2751,14 +2759,14 @@ static void iavf_watchdog_task(struct work_struct *work)
                        adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
                        iavf_shutdown_adminq(hw);
                        mutex_unlock(&adapter->crit_lock);
-                       queue_delayed_work(iavf_wq,
+                       queue_delayed_work(adapter->wq,
                                           &adapter->watchdog_task, (5 * HZ));
                        return;
                }
                /* Try again from failed step*/
                iavf_change_state(adapter, adapter->last_state);
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
                return;
        case __IAVF_COMM_FAILED:
                if (test_bit(__IAVF_IN_REMOVE_TASK,
@@ -2789,13 +2797,14 @@ static void iavf_watchdog_task(struct work_struct *work)
                adapter->aq_required = 0;
                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq,
+               queue_delayed_work(adapter->wq,
                                   &adapter->watchdog_task,
                                   msecs_to_jiffies(10));
                return;
        case __IAVF_RESETTING:
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+                                  HZ * 2);
                return;
        case __IAVF_DOWN:
        case __IAVF_DOWN_PENDING:
@@ -2834,9 +2843,9 @@ static void iavf_watchdog_task(struct work_struct *work)
                adapter->aq_required = 0;
                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
                dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
-               queue_work(iavf_wq, &adapter->reset_task);
+               queue_work(adapter->wq, &adapter->reset_task);
                mutex_unlock(&adapter->crit_lock);
-               queue_delayed_work(iavf_wq,
+               queue_delayed_work(adapter->wq,
                                   &adapter->watchdog_task, HZ * 2);
                return;
        }
@@ -2845,12 +2854,13 @@ static void iavf_watchdog_task(struct work_struct *work)
        mutex_unlock(&adapter->crit_lock);
 restart_watchdog:
        if (adapter->state >= __IAVF_DOWN)
-               queue_work(iavf_wq, &adapter->adminq_task);
+               queue_work(adapter->wq, &adapter->adminq_task);
        if (adapter->aq_required)
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                                   msecs_to_jiffies(20));
        else
-               queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
+               queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+                                  HZ * 2);
 }
 
 /**
@@ -2952,7 +2962,7 @@ static void iavf_reset_task(struct work_struct *work)
         */
        if (!mutex_trylock(&adapter->crit_lock)) {
                if (adapter->state != __IAVF_REMOVE)
-                       queue_work(iavf_wq, &adapter->reset_task);
+                       queue_work(adapter->wq, &adapter->reset_task);
 
                goto reset_finish;
        }
@@ -3116,7 +3126,7 @@ continue_reset:
        bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
        bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
 
-       mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
+       mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
 
        /* We were running when the reset started, so we need to restore some
         * state here.
@@ -3208,7 +3218,7 @@ static void iavf_adminq_task(struct work_struct *work)
                if (adapter->state == __IAVF_REMOVE)
                        return;
 
-               queue_work(iavf_wq, &adapter->adminq_task);
+               queue_work(adapter->wq, &adapter->adminq_task);
                goto out;
        }
 
@@ -3232,24 +3242,6 @@ static void iavf_adminq_task(struct work_struct *work)
        } while (pending);
        mutex_unlock(&adapter->crit_lock);
 
-       if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
-               if (adapter->netdev_registered ||
-                   !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
-                       struct net_device *netdev = adapter->netdev;
-
-                       rtnl_lock();
-                       netdev_update_features(netdev);
-                       rtnl_unlock();
-                       /* Request VLAN offload settings */
-                       if (VLAN_V2_ALLOWED(adapter))
-                               iavf_set_vlan_offload_features
-                                       (adapter, 0, netdev->features);
-
-                       iavf_set_queue_vlan_tag_loc(adapter);
-               }
-
-               adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
-       }
        if ((adapter->flags &
             (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
            adapter->state == __IAVF_RESETTING)
@@ -4349,7 +4341,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
 
        if (netif_running(netdev)) {
                adapter->flags |= IAVF_FLAG_RESET_NEEDED;
-               queue_work(iavf_wq, &adapter->reset_task);
+               queue_work(adapter->wq, &adapter->reset_task);
        }
 
        return 0;
@@ -4898,6 +4890,13 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw = &adapter->hw;
        hw->back = adapter;
 
+       adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+                                             iavf_driver_name);
+       if (!adapter->wq) {
+               err = -ENOMEM;
+               goto err_alloc_wq;
+       }
+
        adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
        iavf_change_state(adapter, __IAVF_STARTUP);
 
@@ -4942,7 +4941,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
        INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
        INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
-       queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+       queue_delayed_work(adapter->wq, &adapter->watchdog_task,
                           msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
 
        /* Setup the wait queue for indicating transition to down status */
@@ -4954,6 +4953,8 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        return 0;
 
 err_ioremap:
+       destroy_workqueue(adapter->wq);
+err_alloc_wq:
        free_netdev(netdev);
 err_alloc_etherdev:
        pci_disable_pcie_error_reporting(pdev);
@@ -5023,7 +5024,7 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
                return err;
        }
 
-       queue_work(iavf_wq, &adapter->reset_task);
+       queue_work(adapter->wq, &adapter->reset_task);
 
        netif_device_attach(adapter->netdev);
 
@@ -5170,6 +5171,8 @@ static void iavf_remove(struct pci_dev *pdev)
        }
        spin_unlock_bh(&adapter->adv_rss_lock);
 
+       destroy_workqueue(adapter->wq);
+
        free_netdev(netdev);
 
        pci_disable_pcie_error_reporting(pdev);
@@ -5196,24 +5199,11 @@ static struct pci_driver iavf_driver = {
  **/
 static int __init iavf_init_module(void)
 {
-       int ret;
-
        pr_info("iavf: %s\n", iavf_driver_string);
 
        pr_info("%s\n", iavf_copyright);
 
-       iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
-                                 iavf_driver_name);
-       if (!iavf_wq) {
-               pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
-               return -ENOMEM;
-       }
-
-       ret = pci_register_driver(&iavf_driver);
-       if (ret)
-               destroy_workqueue(iavf_wq);
-
-       return ret;
+       return pci_register_driver(&iavf_driver);
 }
 
 module_init(iavf_init_module);
@@ -5227,7 +5217,6 @@ module_init(iavf_init_module);
 static void __exit iavf_exit_module(void)
 {
        pci_unregister_driver(&iavf_driver);
-       destroy_workqueue(iavf_wq);
 }
 
 module_exit(iavf_exit_module);
index 24a701f..365ca0c 100644 (file)
@@ -1952,7 +1952,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                        if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
                                adapter->flags |= IAVF_FLAG_RESET_PENDING;
                                dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
-                               queue_work(iavf_wq, &adapter->reset_task);
+                               queue_work(adapter->wq, &adapter->reset_task);
                        }
                        break;
                default:
@@ -2226,6 +2226,14 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
 
                iavf_process_config(adapter);
                adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
+
+               /* Request VLAN offload settings */
+               if (VLAN_V2_ALLOWED(adapter))
+                       iavf_set_vlan_offload_features(adapter, 0,
+                                                      netdev->features);
+
+               iavf_set_queue_vlan_tag_loc(adapter);
+
                was_mac_changed = !ether_addr_equal(netdev->dev_addr,
                                                    adapter->hw.mac.addr);
 
index 94aa834..a596e07 100644 (file)
@@ -3235,9 +3235,6 @@ int ice_vsi_release(struct ice_vsi *vsi)
                }
        }
 
-       if (vsi->type == ICE_VSI_PF)
-               ice_devlink_destroy_pf_port(pf);
-
        if (vsi->type == ICE_VSI_VF &&
            vsi->agg_node && vsi->agg_node->valid)
                vsi->agg_node->num_vsis--;
index a9a7f8b..237ede2 100644 (file)
@@ -4590,7 +4590,7 @@ static void ice_print_wake_reason(struct ice_pf *pf)
 }
 
 /**
- * ice_register_netdev - register netdev and devlink port
+ * ice_register_netdev - register netdev
  * @pf: pointer to the PF struct
  */
 static int ice_register_netdev(struct ice_pf *pf)
@@ -4602,11 +4602,6 @@ static int ice_register_netdev(struct ice_pf *pf)
        if (!vsi || !vsi->netdev)
                return -EIO;
 
-       err = ice_devlink_create_pf_port(pf);
-       if (err)
-               goto err_devlink_create;
-
-       SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
        err = register_netdev(vsi->netdev);
        if (err)
                goto err_register_netdev;
@@ -4617,8 +4612,6 @@ static int ice_register_netdev(struct ice_pf *pf)
 
        return 0;
 err_register_netdev:
-       ice_devlink_destroy_pf_port(pf);
-err_devlink_create:
        free_netdev(vsi->netdev);
        vsi->netdev = NULL;
        clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
@@ -4636,6 +4629,7 @@ static int
 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
 {
        struct device *dev = &pdev->dev;
+       struct ice_vsi *vsi;
        struct ice_pf *pf;
        struct ice_hw *hw;
        int i, err;
@@ -4918,6 +4912,18 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
        pcie_print_link_status(pf->pdev);
 
 probe_done:
+       err = ice_devlink_create_pf_port(pf);
+       if (err)
+               goto err_create_pf_port;
+
+       vsi = ice_get_main_vsi(pf);
+       if (!vsi || !vsi->netdev) {
+               err = -EINVAL;
+               goto err_netdev_reg;
+       }
+
+       SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
+
        err = ice_register_netdev(pf);
        if (err)
                goto err_netdev_reg;
@@ -4955,6 +4961,8 @@ err_init_aux_unroll:
 err_devlink_reg_param:
        ice_devlink_unregister_params(pf);
 err_netdev_reg:
+       ice_devlink_destroy_pf_port(pf);
+err_create_pf_port:
 err_send_version_unroll:
        ice_vsi_release_all(pf);
 err_alloc_sw_unroll:
@@ -5083,6 +5091,7 @@ static void ice_remove(struct pci_dev *pdev)
        ice_setup_mc_magic_wake(pf);
        ice_vsi_release_all(pf);
        mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
+       ice_devlink_destroy_pf_port(pf);
        ice_set_wake(pf);
        ice_free_irq_msix_misc(pf);
        ice_for_each_vsi(pf, i) {
index 497b777..8a41ad8 100644 (file)
@@ -1012,7 +1012,6 @@ static void otx2_pool_refill_task(struct work_struct *work)
        rbpool = cq->rbpool;
        free_ptrs = cq->pool_ptrs;
 
-       get_cpu();
        while (cq->pool_ptrs) {
                if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
                        /* Schedule a WQ if we fails to free atleast half of the
@@ -1032,7 +1031,6 @@ static void otx2_pool_refill_task(struct work_struct *work)
                pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
                cq->pool_ptrs--;
        }
-       put_cpu();
        cq->refill_task_sched = false;
 }
 
@@ -1387,9 +1385,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
                        err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
                        if (err)
                                goto err_mem;
-                       get_cpu();
                        pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
-                       put_cpu();
                        sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
                }
        }
@@ -1435,21 +1431,18 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
        if (err)
                goto fail;
 
-       get_cpu();
        /* Allocate pointers and free them to aura/pool */
        for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
                pool = &pfvf->qset.pool[pool_id];
                for (ptr = 0; ptr < num_ptrs; ptr++) {
                        err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
                        if (err)
-                               goto err_mem;
+                               return -ENOMEM;
                        pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
                                                   bufptr + OTX2_HEAD_ROOM);
                }
        }
-err_mem:
-       put_cpu();
-       return err ? -ENOMEM : 0;
+       return 0;
 fail:
        otx2_mbox_reset(&pfvf->mbox.mbox, 0);
        otx2_aura_pool_free(pfvf);
index 5bee3c3..3d22cc6 100644 (file)
@@ -736,8 +736,10 @@ static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
        u64 ptrs[2];
 
        ptrs[1] = buf;
+       get_cpu();
        /* Free only one buffer at time during init and teardown */
        __cn10k_aura_freeptr(pfvf, aura, ptrs, 2);
+       put_cpu();
 }
 
 /* Alloc pointer from pool/aura */
index 6dac76f..09d441e 100644 (file)
@@ -637,7 +637,7 @@ mlx5e_htb_update_children(struct mlx5e_htb *htb, struct mlx5e_qos_node *node,
                if (child->bw_share == old_bw_share)
                        continue;
 
-               err_one = mlx5_qos_update_node(htb->mdev, child->hw_id, child->bw_share,
+               err_one = mlx5_qos_update_node(htb->mdev, child->bw_share,
                                               child->max_average_bw, child->hw_id);
                if (!err && err_one) {
                        err = err_one;
@@ -671,7 +671,7 @@ mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
        mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share);
        mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw);
 
-       err = mlx5_qos_update_node(htb->mdev, node->parent->hw_id, bw_share,
+       err = mlx5_qos_update_node(htb->mdev, bw_share,
                                   max_average_bw, node->hw_id);
        if (err) {
                NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
index 585bdc8..4ad19c9 100644 (file)
@@ -578,7 +578,6 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
 {
        enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
        u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
-       bool unaligned = xsk ? xsk->unaligned : false;
        u16 max_mtu_pkts;
 
        if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
@@ -591,7 +590,7 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
         * needed number of WQEs exceeds the maximum.
         */
        max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
-                            mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, unaligned));
+                            mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, xsk->unaligned));
        if (params->log_rq_mtu_frames > max_mtu_pkts) {
                mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n",
                              1 << params->log_rq_mtu_frames, xsk->chunk_size);
index 1cbd2eb..f2c2c75 100644 (file)
@@ -477,7 +477,6 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
        struct mlx5e_sample_flow *sample_flow;
        struct mlx5e_sample_attr *sample_attr;
        struct mlx5_flow_attr *pre_attr;
-       u32 tunnel_id = attr->tunnel_id;
        struct mlx5_eswitch *esw;
        u32 default_tbl_id;
        u32 obj_id;
@@ -522,7 +521,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
        restore_obj.sample.group_id = sample_attr->group_num;
        restore_obj.sample.rate = sample_attr->rate;
        restore_obj.sample.trunc_size = sample_attr->trunc_size;
-       restore_obj.sample.tunnel_id = tunnel_id;
+       restore_obj.sample.tunnel_id = attr->tunnel_id;
        err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id);
        if (err)
                goto err_obj_id;
@@ -548,7 +547,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
        /* For decap action, do decap in the original flow table instead of the
         * default flow table.
         */
-       if (tunnel_id)
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
                pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
        pre_attr->modify_hdr = sample_flow->restore->modify_hdr;
        pre_attr->flags = MLX5_ATTR_FLAG_SAMPLE;
index a92e19c..8bed9c3 100644 (file)
@@ -122,11 +122,8 @@ struct mlx5e_ipsec_aso {
        u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
        dma_addr_t dma_addr;
        struct mlx5_aso *aso;
-       /* IPsec ASO caches data on every query call,
-        * so in nested calls, we can use this boolean to save
-        * recursive calls to mlx5e_ipsec_aso_query()
-        */
-       u8 use_cache : 1;
+       /* Protect ASO WQ access, as it is global to whole IPsec */
+       spinlock_t lock;
 };
 
 struct mlx5e_ipsec {
index 8e36142..2461462 100644 (file)
@@ -320,7 +320,6 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
        if (ret)
                goto unlock;
 
-       aso->use_cache = true;
        if (attrs->esn_trigger &&
            !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
                u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
@@ -333,7 +332,6 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
                    !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) ||
                    !MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable))
                        xfrm_state_check_expire(sa_entry->x);
-       aso->use_cache = false;
 
 unlock:
        spin_unlock(&sa_entry->x->lock);
@@ -398,6 +396,7 @@ int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
                goto err_aso_create;
        }
 
+       spin_lock_init(&aso->lock);
        ipsec->nb.notifier_call = mlx5e_ipsec_event;
        mlx5_notifier_register(mdev, &ipsec->nb);
 
@@ -456,13 +455,12 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
        struct mlx5e_hw_objs *res;
        struct mlx5_aso_wqe *wqe;
        u8 ds_cnt;
+       int ret;
 
        lockdep_assert_held(&sa_entry->x->lock);
-       if (aso->use_cache)
-               return 0;
-
        res = &mdev->mlx5e_res.hw_objs;
 
+       spin_lock_bh(&aso->lock);
        memset(aso->ctx, 0, sizeof(aso->ctx));
        wqe = mlx5_aso_get_wqe(aso->aso);
        ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
@@ -477,7 +475,9 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
        mlx5e_ipsec_aso_copy(ctrl, data);
 
        mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
-       return mlx5_aso_poll_cq(aso->aso, false);
+       ret = mlx5_aso_poll_cq(aso->aso, false);
+       spin_unlock_bh(&aso->lock);
+       return ret;
 }
 
 void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
index dbadaf1..243d5d7 100644 (file)
@@ -166,6 +166,7 @@ struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
  * it's different than the ht->mutex here.
  */
 static struct lock_class_key tc_ht_lock_key;
+static struct lock_class_key tc_ht_wq_key;
 
 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
 static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
@@ -5182,6 +5183,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
                return err;
 
        lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
+       lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
 
        mapping_id = mlx5_query_nic_system_image_guid(dev);
 
@@ -5288,6 +5290,7 @@ int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
                return err;
 
        lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
+       lockdep_init_map(&tc_ht->run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
 
        return 0;
 }
index 4f8a24d..75015d3 100644 (file)
@@ -22,15 +22,13 @@ struct mlx5_esw_rate_group {
 };
 
 static int esw_qos_tsar_config(struct mlx5_core_dev *dev, u32 *sched_ctx,
-                              u32 parent_ix, u32 tsar_ix,
-                              u32 max_rate, u32 bw_share)
+                              u32 tsar_ix, u32 max_rate, u32 bw_share)
 {
        u32 bitmask = 0;
 
        if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
                return -EOPNOTSUPP;
 
-       MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_ix);
        MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
        MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
        bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
@@ -51,7 +49,7 @@ static int esw_qos_group_config(struct mlx5_eswitch *esw, struct mlx5_esw_rate_g
        int err;
 
        err = esw_qos_tsar_config(dev, sched_ctx,
-                                 esw->qos.root_tsar_ix, group->tsar_ix,
+                                 group->tsar_ix,
                                  max_rate, bw_share);
        if (err)
                NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed");
@@ -67,23 +65,13 @@ static int esw_qos_vport_config(struct mlx5_eswitch *esw,
                                struct netlink_ext_ack *extack)
 {
        u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
-       struct mlx5_esw_rate_group *group = vport->qos.group;
        struct mlx5_core_dev *dev = esw->dev;
-       u32 parent_tsar_ix;
-       void *vport_elem;
        int err;
 
        if (!vport->qos.enabled)
                return -EIO;
 
-       parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
-       MLX5_SET(scheduling_context, sched_ctx, element_type,
-                SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
-       vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
-                                 element_attributes);
-       MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
-
-       err = esw_qos_tsar_config(dev, sched_ctx, parent_tsar_ix, vport->qos.esw_tsar_ix,
+       err = esw_qos_tsar_config(dev, sched_ctx, vport->qos.esw_tsar_ix,
                                  max_rate, bw_share);
        if (err) {
                esw_warn(esw->dev,
index 0dfd574..9daf55e 100644 (file)
@@ -1464,6 +1464,7 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
        mlx5_lag_disable_change(esw->dev);
        down_write(&esw->mode_lock);
        mlx5_eswitch_disable_locked(esw);
+       esw->mode = MLX5_ESWITCH_LEGACY;
        up_write(&esw->mode_lock);
        mlx5_lag_enable_change(esw->dev);
 }
index 96417c5..879555b 100644 (file)
@@ -677,6 +677,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
        mutex_lock(&dev->intf_state_mutex);
        if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) {
                mlx5_core_err(dev, "health works are not permitted at this stage\n");
+               mutex_unlock(&dev->intf_state_mutex);
                return;
        }
        mutex_unlock(&dev->intf_state_mutex);
index df134f6..3d5f2a4 100644 (file)
@@ -2098,7 +2098,7 @@ static void mlx5_core_verify_params(void)
        }
 }
 
-static int __init init(void)
+static int __init mlx5_init(void)
 {
        int err;
 
@@ -2133,7 +2133,7 @@ err_debug:
        return err;
 }
 
-static void __exit cleanup(void)
+static void __exit mlx5_cleanup(void)
 {
        mlx5e_cleanup();
        mlx5_sf_driver_unregister();
@@ -2141,5 +2141,5 @@ static void __exit cleanup(void)
        mlx5_unregister_debugfs();
 }
 
-module_init(init);
-module_exit(cleanup);
+module_init(mlx5_init);
+module_exit(mlx5_cleanup);
index 0777be2..8bce730 100644 (file)
@@ -62,13 +62,12 @@ int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id)
        return mlx5_qos_create_inner_node(mdev, MLX5_QOS_DEFAULT_DWRR_UID, 0, 0, id);
 }
 
-int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id,
+int mlx5_qos_update_node(struct mlx5_core_dev *mdev,
                         u32 bw_share, u32 max_avg_bw, u32 id)
 {
        u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
        u32 bitmask = 0;
 
-       MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
        MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
        MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
 
index 125e4e4..624ce82 100644 (file)
@@ -23,7 +23,7 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
 int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
                               u32 bw_share, u32 max_avg_bw, u32 *id);
 int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id);
-int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, u32 bw_share,
+int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 bw_share,
                         u32 max_avg_bw, u32 id);
 int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id);
 
index 5314c06..55b484b 100644 (file)
@@ -608,12 +608,12 @@ allocate_new:
                lan966x_fdma_rx_reload(rx);
        }
 
-       if (counter < weight && napi_complete_done(napi, counter))
-               lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
-
        if (redirect)
                xdp_do_flush();
 
+       if (counter < weight && napi_complete_done(napi, counter))
+               lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
+
        return counter;
 }
 
index e708c2d..b144f22 100644 (file)
@@ -1259,13 +1259,20 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
                gic->handler = NULL;
                gic->arg = NULL;
 
+               if (!i)
+                       snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
+                                pci_name(pdev));
+               else
+                       snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
+                                i - 1, pci_name(pdev));
+
                irq = pci_irq_vector(pdev, i);
                if (irq < 0) {
                        err = irq;
                        goto free_mask;
                }
 
-               err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic);
+               err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
                if (err)
                        goto free_mask;
                irq_set_affinity_and_hint(irq, req_mask);
index 7c2af48..cb1746b 100644 (file)
@@ -1438,6 +1438,10 @@ int qede_poll(struct napi_struct *napi, int budget)
        rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
                        qede_has_rx_work(fp->rxq)) ?
                        qede_rx_int(fp, budget) : 0;
+
+       if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
+               xdp_do_flush();
+
        /* Handle case where we are called by netpoll with a budget of 0 */
        if (rx_work_done < budget || !budget) {
                if (!qede_poll_is_more_work(fp)) {
@@ -1457,9 +1461,6 @@ int qede_poll(struct napi_struct *napi, int budget)
                qede_update_tx_producer(fp->xdp_tx);
        }
 
-       if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
-               xdp_do_flush_map();
-
        return rx_work_done;
 }
 
index b4e0fc7..0f54849 100644 (file)
@@ -1101,14 +1101,14 @@ static void ravb_error_interrupt(struct net_device *ndev)
        ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
        if (eis & EIS_QFS) {
                ris2 = ravb_read(ndev, RIS2);
-               ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
+               ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
                           RIS2);
 
                /* Receive Descriptor Empty int */
                if (ris2 & RIS2_QFF0)
                        priv->stats[RAVB_BE].rx_over_errors++;
 
-                   /* Receive Descriptor Empty int */
+               /* Receive Descriptor Empty int */
                if (ris2 & RIS2_QFF1)
                        priv->stats[RAVB_NC].rx_over_errors++;
 
@@ -2973,6 +2973,9 @@ static int __maybe_unused ravb_suspend(struct device *dev)
        else
                ret = ravb_close(ndev);
 
+       if (priv->info->ccc_gac)
+               ravb_ptp_stop(ndev);
+
        return ret;
 }
 
@@ -3011,6 +3014,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
        /* Restore descriptor base address table */
        ravb_write(ndev, priv->desc_bat_dma, DBAT);
 
+       if (priv->info->ccc_gac)
+               ravb_ptp_init(ndev, priv->pdev);
+
        if (netif_running(ndev)) {
                if (priv->wol_enabled) {
                        ret = ravb_wol_restore(ndev);
index 6441892..2370c77 100644 (file)
@@ -1074,8 +1074,11 @@ static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
                        port = NULL;
                        goto out;
                }
-               if (index == rdev->etha->index)
+               if (index == rdev->etha->index) {
+                       if (!of_device_is_available(port))
+                               port = NULL;
                        break;
+               }
        }
 
 out:
@@ -1106,7 +1109,7 @@ static int rswitch_etha_get_params(struct rswitch_device *rdev)
 
        port = rswitch_get_port_node(rdev);
        if (!port)
-               return -ENODEV;
+               return 0;       /* ignored */
 
        err = of_get_phy_mode(port, &rdev->etha->phy_interface);
        of_node_put(port);
@@ -1324,13 +1327,13 @@ static int rswitch_ether_port_init_all(struct rswitch_private *priv)
 {
        int i, err;
 
-       for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+       rswitch_for_each_enabled_port(priv, i) {
                err = rswitch_ether_port_init_one(priv->rdev[i]);
                if (err)
                        goto err_init_one;
        }
 
-       for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+       rswitch_for_each_enabled_port(priv, i) {
                err = rswitch_serdes_init(priv->rdev[i]);
                if (err)
                        goto err_serdes;
@@ -1339,12 +1342,12 @@ static int rswitch_ether_port_init_all(struct rswitch_private *priv)
        return 0;
 
 err_serdes:
-       for (i--; i >= 0; i--)
+       rswitch_for_each_enabled_port_continue_reverse(priv, i)
                rswitch_serdes_deinit(priv->rdev[i]);
        i = RSWITCH_NUM_PORTS;
 
 err_init_one:
-       for (i--; i >= 0; i--)
+       rswitch_for_each_enabled_port_continue_reverse(priv, i)
                rswitch_ether_port_deinit_one(priv->rdev[i]);
 
        return err;
@@ -1608,6 +1611,7 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index)
        netif_napi_add(ndev, &rdev->napi, rswitch_poll);
 
        port = rswitch_get_port_node(rdev);
+       rdev->disabled = !port;
        err = of_get_ethdev_address(port, ndev);
        of_node_put(port);
        if (err) {
@@ -1707,16 +1711,16 @@ static int rswitch_init(struct rswitch_private *priv)
        if (err)
                goto err_ether_port_init_all;
 
-       for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+       rswitch_for_each_enabled_port(priv, i) {
                err = register_netdev(priv->rdev[i]->ndev);
                if (err) {
-                       for (i--; i >= 0; i--)
+                       rswitch_for_each_enabled_port_continue_reverse(priv, i)
                                unregister_netdev(priv->rdev[i]->ndev);
                        goto err_register_netdev;
                }
        }
 
-       for (i = 0; i < RSWITCH_NUM_PORTS; i++)
+       rswitch_for_each_enabled_port(priv, i)
                netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n",
                            priv->rdev[i]->ndev->dev_addr);
 
index edbdd1b..49efb0f 100644 (file)
 #define RSWITCH_MAX_NUM_QUEUES 128
 
 #define RSWITCH_NUM_PORTS      3
+#define rswitch_for_each_enabled_port(priv, i)         \
+       for (i = 0; i < RSWITCH_NUM_PORTS; i++)         \
+               if (priv->rdev[i]->disabled)            \
+                       continue;                       \
+               else
+
+#define rswitch_for_each_enabled_port_continue_reverse(priv, i)        \
+       for (i--; i >= 0; i--)                                  \
+               if (priv->rdev[i]->disabled)                    \
+                       continue;                               \
+               else
 
 #define TX_RING_SIZE           1024
 #define RX_RING_SIZE           1024
@@ -938,6 +949,7 @@ struct rswitch_device {
        struct rswitch_gwca_queue *tx_queue;
        struct rswitch_gwca_queue *rx_queue;
        u8 ts_tag;
+       bool disabled;
 
        int port;
        struct rswitch_etha *etha;
index 9c2d40f..413f660 100644 (file)
@@ -186,11 +186,25 @@ static void dwmac5_handle_dma_err(struct net_device *ndev,
 int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
                              struct stmmac_safety_feature_cfg *safety_feat_cfg)
 {
+       struct stmmac_safety_feature_cfg all_safety_feats = {
+               .tsoee = 1,
+               .mrxpee = 1,
+               .mestee = 1,
+               .mrxee = 1,
+               .mtxee = 1,
+               .epsi = 1,
+               .edpp = 1,
+               .prtyen = 1,
+               .tmouten = 1,
+       };
        u32 value;
 
        if (!asp)
                return -EINVAL;
 
+       if (!safety_feat_cfg)
+               safety_feat_cfg = &all_safety_feats;
+
        /* 1. Enable Safety Features */
        value = readl(ioaddr + MTL_ECC_CONTROL);
        value |= MEEAO; /* MTL ECC Error Addr Status Override */
index 4a2e94f..c4542ec 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <linux/bitfield.h>
+#include <linux/delay.h>
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/device.h>
@@ -150,6 +151,7 @@ static const struct clk_ops g12a_ephy_pll_ops = {
 
 static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
 {
+       u32 value;
        int ret;
 
        /* Enable the phy clock */
@@ -163,18 +165,25 @@ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
 
        /* Initialize ephy control */
        writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0);
-       writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
-              FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
-              FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
-              PHY_CNTL1_CLK_EN |
-              PHY_CNTL1_CLKFREQ |
-              PHY_CNTL1_PHY_ENB,
-              priv->regs + ETH_PHY_CNTL1);
+
+       /* Make sure we get a 0 -> 1 transition on the enable bit */
+       value = FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
+               FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
+               FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
+               PHY_CNTL1_CLK_EN |
+               PHY_CNTL1_CLKFREQ;
+       writel(value, priv->regs + ETH_PHY_CNTL1);
        writel(PHY_CNTL2_USE_INTERNAL |
               PHY_CNTL2_SMI_SRC_MAC |
               PHY_CNTL2_RX_CLK_EPHY,
               priv->regs + ETH_PHY_CNTL2);
 
+       value |= PHY_CNTL1_PHY_ENB;
+       writel(value, priv->regs + ETH_PHY_CNTL1);
+
+       /* The phy needs a bit of time to power up */
+       mdelay(10);
+
        return 0;
 }
 
index fcd43d6..d10606f 100644 (file)
@@ -1044,7 +1044,6 @@ static int team_port_enter(struct team *team, struct team_port *port)
                        goto err_port_enter;
                }
        }
-       port->dev->priv_flags |= IFF_NO_ADDRCONF;
 
        return 0;
 
@@ -1058,7 +1057,6 @@ static void team_port_leave(struct team *team, struct team_port *port)
 {
        if (team->ops.port_leave)
                team->ops.port_leave(team, port);
-       port->dev->priv_flags &= ~IFF_NO_ADDRCONF;
        dev_put(team->dev);
 }
 
index 18b3de8..6df14dd 100644 (file)
@@ -1677,13 +1677,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
 
        received = virtnet_receive(rq, budget, &xdp_xmit);
 
+       if (xdp_xmit & VIRTIO_XDP_REDIR)
+               xdp_do_flush();
+
        /* Out of packets? */
        if (received < budget)
                virtqueue_napi_complete(napi, rq->vq, received);
 
-       if (xdp_xmit & VIRTIO_XDP_REDIR)
-               xdp_do_flush();
-
        if (xdp_xmit & VIRTIO_XDP_TX) {
                sq = virtnet_xdp_get_sq(vi);
                if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
index bff3128..b115902 100644 (file)
@@ -7937,6 +7937,9 @@ cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
        struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
 
+       if (chan->flags & IEEE80211_CHAN_DISABLED)
+               return -EINVAL;
+
        /* set_channel */
        chspec = channel_to_chanspec(&cfg->d11inf, chan);
        if (chspec != INVCHANSPEC) {
@@ -7961,7 +7964,7 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
        struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
        struct brcmf_dump_survey survey = {};
        struct ieee80211_supported_band *band;
-       struct ieee80211_channel *chan;
+       enum nl80211_band band_id;
        struct cca_msrmnt_query req;
        u32 noise;
        int err;
@@ -7974,26 +7977,25 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
                return -EBUSY;
        }
 
-       band = wiphy->bands[NL80211_BAND_2GHZ];
-       if (band && idx >= band->n_channels) {
-               idx -= band->n_channels;
-               band = NULL;
-       }
+       for (band_id = 0; band_id < NUM_NL80211_BANDS; band_id++) {
+               band = wiphy->bands[band_id];
+               if (!band)
+                       continue;
+               if (idx >= band->n_channels) {
+                       idx -= band->n_channels;
+                       continue;
+               }
 
-       if (!band || idx >= band->n_channels) {
-               band = wiphy->bands[NL80211_BAND_5GHZ];
-               if (idx >= band->n_channels)
-                       return -ENOENT;
+               info->channel = &band->channels[idx];
+               break;
        }
+       if (band_id == NUM_NL80211_BANDS)
+               return -ENOENT;
 
        /* Setting current channel to the requested channel */
-       chan = &band->channels[idx];
-       err = cfg80211_set_channel(wiphy, ndev, chan, NL80211_CHAN_HT20);
-       if (err) {
-               info->channel = chan;
-               info->filled = 0;
+       info->filled = 0;
+       if (cfg80211_set_channel(wiphy, ndev, info->channel, NL80211_CHAN_HT20))
                return 0;
-       }
 
        /* Disable mpc */
        brcmf_set_mpc(ifp, 0);
@@ -8028,7 +8030,6 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
        if (err)
                goto exit;
 
-       info->channel = chan;
        info->noise = noise;
        info->time = ACS_MSRMNT_DELAY;
        info->time_busy = ACS_MSRMNT_DELAY - survey.idle;
@@ -8040,7 +8041,7 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
                SURVEY_INFO_TIME_TX;
 
        brcmf_dbg(INFO, "OBSS dump: channel %d: survey duration %d\n",
-                 ieee80211_frequency_to_channel(chan->center_freq),
+                 ieee80211_frequency_to_channel(info->channel->center_freq),
                  ACS_MSRMNT_DELAY);
        brcmf_dbg(INFO, "noise(%d) busy(%llu) rx(%llu) tx(%llu)\n",
                  info->noise, info->time_busy, info->time_rx, info->time_tx);
index ae57a9a..b67f6d0 100644 (file)
@@ -1228,7 +1228,7 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
                                BRCMF_NROF_H2D_COMMON_MSGRINGS;
                max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
        }
-       if (max_flowrings > 256) {
+       if (max_flowrings > 512) {
                brcmf_err(bus, "invalid max_flowrings(%d)\n", max_flowrings);
                return -EIO;
        }
index f795548..0616181 100644 (file)
@@ -206,71 +206,103 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
 }
 
 static int
+mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
+                   struct mt76_queue_buf *buf, void *data)
+{
+       struct mt76_desc *desc = &q->desc[q->head];
+       struct mt76_queue_entry *entry = &q->entry[q->head];
+       struct mt76_txwi_cache *txwi = NULL;
+       u32 buf1 = 0, ctrl;
+       int idx = q->head;
+       int rx_token;
+
+       ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
+
+       if ((q->flags & MT_QFLAG_WED) &&
+           FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
+               txwi = mt76_get_rxwi(dev);
+               if (!txwi)
+                       return -ENOMEM;
+
+               rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
+               if (rx_token < 0) {
+                       mt76_put_rxwi(dev, txwi);
+                       return -ENOMEM;
+               }
+
+               buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
+               ctrl |= MT_DMA_CTL_TO_HOST;
+       }
+
+       WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
+       WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
+       WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+       WRITE_ONCE(desc->info, 0);
+
+       entry->dma_addr[0] = buf->addr;
+       entry->dma_len[0] = buf->len;
+       entry->txwi = txwi;
+       entry->buf = data;
+       entry->wcid = 0xffff;
+       entry->skip_buf1 = true;
+       q->head = (q->head + 1) % q->ndesc;
+       q->queued++;
+
+       return idx;
+}
+
+static int
 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
                 struct mt76_queue_buf *buf, int nbufs, u32 info,
                 struct sk_buff *skb, void *txwi)
 {
        struct mt76_queue_entry *entry;
        struct mt76_desc *desc;
-       u32 ctrl;
        int i, idx = -1;
+       u32 ctrl, next;
+
+       if (txwi) {
+               q->entry[q->head].txwi = DMA_DUMMY_DATA;
+               q->entry[q->head].skip_buf0 = true;
+       }
 
        for (i = 0; i < nbufs; i += 2, buf += 2) {
                u32 buf0 = buf[0].addr, buf1 = 0;
 
                idx = q->head;
-               q->head = (q->head + 1) % q->ndesc;
+               next = (q->head + 1) % q->ndesc;
 
                desc = &q->desc[idx];
                entry = &q->entry[idx];
 
-               if ((q->flags & MT_QFLAG_WED) &&
-                   FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
-                       struct mt76_txwi_cache *t = txwi;
-                       int rx_token;
-
-                       if (!t)
-                               return -ENOMEM;
-
-                       rx_token = mt76_rx_token_consume(dev, (void *)skb, t,
-                                                        buf[0].addr);
-                       buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
-                       ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len) |
-                              MT_DMA_CTL_TO_HOST;
-               } else {
-                       if (txwi) {
-                               q->entry[q->head].txwi = DMA_DUMMY_DATA;
-                               q->entry[q->head].skip_buf0 = true;
-                       }
-
-                       if (buf[0].skip_unmap)
-                               entry->skip_buf0 = true;
-                       entry->skip_buf1 = i == nbufs - 1;
-
-                       entry->dma_addr[0] = buf[0].addr;
-                       entry->dma_len[0] = buf[0].len;
-
-                       ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
-                       if (i < nbufs - 1) {
-                               entry->dma_addr[1] = buf[1].addr;
-                               entry->dma_len[1] = buf[1].len;
-                               buf1 = buf[1].addr;
-                               ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
-                               if (buf[1].skip_unmap)
-                                       entry->skip_buf1 = true;
-                       }
-
-                       if (i == nbufs - 1)
-                               ctrl |= MT_DMA_CTL_LAST_SEC0;
-                       else if (i == nbufs - 2)
-                               ctrl |= MT_DMA_CTL_LAST_SEC1;
+               if (buf[0].skip_unmap)
+                       entry->skip_buf0 = true;
+               entry->skip_buf1 = i == nbufs - 1;
+
+               entry->dma_addr[0] = buf[0].addr;
+               entry->dma_len[0] = buf[0].len;
+
+               ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
+               if (i < nbufs - 1) {
+                       entry->dma_addr[1] = buf[1].addr;
+                       entry->dma_len[1] = buf[1].len;
+                       buf1 = buf[1].addr;
+                       ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
+                       if (buf[1].skip_unmap)
+                               entry->skip_buf1 = true;
                }
 
+               if (i == nbufs - 1)
+                       ctrl |= MT_DMA_CTL_LAST_SEC0;
+               else if (i == nbufs - 2)
+                       ctrl |= MT_DMA_CTL_LAST_SEC1;
+
                WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
                WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
                WRITE_ONCE(desc->info, cpu_to_le32(info));
                WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
 
+               q->head = next;
                q->queued++;
        }
 
@@ -577,17 +609,9 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
        spin_lock_bh(&q->lock);
 
        while (q->queued < q->ndesc - 1) {
-               struct mt76_txwi_cache *t = NULL;
                struct mt76_queue_buf qbuf;
                void *buf = NULL;
 
-               if ((q->flags & MT_QFLAG_WED) &&
-                   FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
-                       t = mt76_get_rxwi(dev);
-                       if (!t)
-                               break;
-               }
-
                buf = page_frag_alloc(rx_page, q->buf_size, GFP_ATOMIC);
                if (!buf)
                        break;
@@ -601,7 +625,12 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
                qbuf.addr = addr + offset;
                qbuf.len = len - offset;
                qbuf.skip_unmap = false;
-               mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t);
+               if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
+                       dma_unmap_single(dev->dma_dev, addr, len,
+                                        DMA_FROM_DEVICE);
+                       skb_free_frag(buf);
+                       break;
+               }
                frames++;
        }
 
index 0a95c3d..8388e2a 100644 (file)
@@ -653,6 +653,13 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
 
                desc->buf0 = cpu_to_le32(phy_addr);
                token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
+               if (token < 0) {
+                       dma_unmap_single(dev->mt76.dma_dev, phy_addr,
+                                        wed->wlan.rx_size, DMA_TO_DEVICE);
+                       skb_free_frag(ptr);
+                       goto unmap;
+               }
+
                desc->token |= cpu_to_le32(FIELD_PREP(MT_DMA_CTL_TOKEN,
                                                      token));
                desc++;
index 24568b9..1f309d0 100644 (file)
@@ -764,11 +764,12 @@ int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
        spin_lock_bh(&dev->rx_token_lock);
        token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
                          GFP_ATOMIC);
+       if (token >= 0) {
+               t->ptr = ptr;
+               t->dma_addr = phys;
+       }
        spin_unlock_bh(&dev->rx_token_lock);
 
-       t->ptr = ptr;
-       t->dma_addr = phys;
-
        return token;
 }
 EXPORT_SYMBOL_GPL(mt76_rx_token_consume);
index 82a7458..bf72e5f 100644 (file)
@@ -696,8 +696,8 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
                struct rndis_query      *get;
                struct rndis_query_c    *get_c;
        } u;
-       int ret, buflen;
-       int resplen, respoffs, copylen;
+       int ret;
+       size_t buflen, resplen, respoffs, copylen;
 
        buflen = *len + sizeof(*u.get);
        if (buflen < CONTROL_BUFFER_SIZE)
@@ -732,22 +732,15 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
 
                if (respoffs > buflen) {
                        /* Device returned data offset outside buffer, error. */
-                       netdev_dbg(dev->net, "%s(%s): received invalid "
-                               "data offset: %d > %d\n", __func__,
-                               oid_to_string(oid), respoffs, buflen);
+                       netdev_dbg(dev->net,
+                                  "%s(%s): received invalid data offset: %zu > %zu\n",
+                                  __func__, oid_to_string(oid), respoffs, buflen);
 
                        ret = -EINVAL;
                        goto exit_unlock;
                }
 
-               if ((resplen + respoffs) > buflen) {
-                       /* Device would have returned more data if buffer would
-                        * have been big enough. Copy just the bits that we got.
-                        */
-                       copylen = buflen - respoffs;
-               } else {
-                       copylen = resplen;
-               }
+               copylen = min(resplen, buflen - respoffs);
 
                if (copylen > *len)
                        copylen = *len;
index e36aeb5..b317ce6 100644 (file)
@@ -829,7 +829,23 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
                        apple_nvme_remove_cq(anv);
                }
 
-               nvme_disable_ctrl(&anv->ctrl, shutdown);
+               /*
+                * Always disable the NVMe controller after shutdown.
+                * We need to do this to bring it back up later anyway, and we
+                * can't do it while the firmware is not running (e.g. in the
+                * resume reset path before RTKit is initialized), so for Apple
+                * controllers it makes sense to unconditionally do it here.
+                * Additionally, this sequence of events is reliable, while
+                * others (like disabling after bringing back the firmware on
+                * resume) seem to run into trouble under some circumstances.
+                *
+                * Both U-Boot and m1n1 also use this convention (i.e. an ANS
+                * NVMe controller is handed off with firmware shut down, in an
+                * NVMe disabled state, after a clean shutdown).
+                */
+               if (shutdown)
+                       nvme_disable_ctrl(&anv->ctrl, shutdown);
+               nvme_disable_ctrl(&anv->ctrl, false);
        }
 
        WRITE_ONCE(anv->ioq.enabled, false);
@@ -985,11 +1001,11 @@ static void apple_nvme_reset_work(struct work_struct *work)
                goto out;
        }
 
-       if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
-               apple_nvme_disable(anv, false);
-
        /* RTKit must be shut down cleanly for the (soft)-reset to work */
        if (apple_rtkit_is_running(anv->rtk)) {
+               /* reset the controller if it is enabled */
+               if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
+                       apple_nvme_disable(anv, false);
                dev_dbg(anv->dev, "Trying to shut down RTKit before reset.");
                ret = apple_rtkit_shutdown(anv->rtk);
                if (ret)
@@ -1493,7 +1509,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
        }
 
        ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops,
-                            NVME_QUIRK_SKIP_CID_GEN);
+                            NVME_QUIRK_SKIP_CID_GEN | NVME_QUIRK_IDENTIFY_CNS);
        if (ret) {
                dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl");
                goto put_dev;
index a863991..06f52db 100644 (file)
@@ -8,8 +8,13 @@
 #include <linux/io_uring.h>
 #include "nvme.h"
 
+enum {
+       NVME_IOCTL_VEC          = (1 << 0),
+       NVME_IOCTL_PARTITION    = (1 << 1),
+};
+
 static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
-               fmode_t mode)
+               unsigned int flags, fmode_t mode)
 {
        u32 effects;
 
@@ -17,6 +22,13 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
                return true;
 
        /*
+        * Do not allow unprivileged passthrough on partitions, as that allows an
+        * escape from the containment of the partition.
+        */
+       if (flags & NVME_IOCTL_PARTITION)
+               return false;
+
+       /*
         * Do not allow unprivileged processes to send vendor specific or fabrics
         * commands as we can't be sure about their effects.
         */
@@ -150,7 +162,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
 static int nvme_map_user_request(struct request *req, u64 ubuffer,
                unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
                u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
-               bool vec)
+               unsigned int flags)
 {
        struct request_queue *q = req->q;
        struct nvme_ns *ns = q->queuedata;
@@ -163,7 +175,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
                struct iov_iter iter;
 
                /* fixedbufs is only for non-vectored io */
-               if (WARN_ON_ONCE(vec))
+               if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
                        return -EINVAL;
                ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
                                rq_data_dir(req), &iter, ioucmd);
@@ -172,8 +184,8 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
                ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
        } else {
                ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
-                               bufflen, GFP_KERNEL, vec, 0, 0,
-                               rq_data_dir(req));
+                               bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
+                               0, rq_data_dir(req));
        }
 
        if (ret)
@@ -203,9 +215,9 @@ out:
 }
 
 static int nvme_submit_user_cmd(struct request_queue *q,
-               struct nvme_command *cmd, u64 ubuffer,
-               unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
-               u32 meta_seed, u64 *result, unsigned timeout, bool vec)
+               struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
+               void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
+               u64 *result, unsigned timeout, unsigned int flags)
 {
        struct nvme_ctrl *ctrl;
        struct request *req;
@@ -221,7 +233,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
        req->timeout = timeout;
        if (ubuffer && bufflen) {
                ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
-                               meta_len, meta_seed, &meta, NULL, vec);
+                               meta_len, meta_seed, &meta, NULL, flags);
                if (ret)
                        return ret;
        }
@@ -304,10 +316,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
        c.rw.apptag = cpu_to_le16(io.apptag);
        c.rw.appmask = cpu_to_le16(io.appmask);
 
-       return nvme_submit_user_cmd(ns->queue, &c,
-                       io.addr, length,
-                       metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
-                       false);
+       return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
+                       meta_len, lower_32_bits(io.slba), NULL, 0, 0);
 }
 
 static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
@@ -325,7 +335,8 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
 }
 
 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
-                       struct nvme_passthru_cmd __user *ucmd, fmode_t mode)
+               struct nvme_passthru_cmd __user *ucmd, unsigned int flags,
+               fmode_t mode)
 {
        struct nvme_passthru_cmd cmd;
        struct nvme_command c;
@@ -353,16 +364,15 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        c.common.cdw14 = cpu_to_le32(cmd.cdw14);
        c.common.cdw15 = cpu_to_le32(cmd.cdw15);
 
-       if (!nvme_cmd_allowed(ns, &c, mode))
+       if (!nvme_cmd_allowed(ns, &c, 0, mode))
                return -EACCES;
 
        if (cmd.timeout_ms)
                timeout = msecs_to_jiffies(cmd.timeout_ms);
 
        status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
-                       cmd.addr, cmd.data_len,
-                       nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
-                       0, &result, timeout, false);
+                       cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
+                       cmd.metadata_len, 0, &result, timeout, 0);
 
        if (status >= 0) {
                if (put_user(result, &ucmd->result))
@@ -373,8 +383,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 }
 
 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
-                       struct nvme_passthru_cmd64 __user *ucmd, bool vec,
-                       fmode_t mode)
+               struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags,
+               fmode_t mode)
 {
        struct nvme_passthru_cmd64 cmd;
        struct nvme_command c;
@@ -401,16 +411,15 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        c.common.cdw14 = cpu_to_le32(cmd.cdw14);
        c.common.cdw15 = cpu_to_le32(cmd.cdw15);
 
-       if (!nvme_cmd_allowed(ns, &c, mode))
+       if (!nvme_cmd_allowed(ns, &c, flags, mode))
                return -EACCES;
 
        if (cmd.timeout_ms)
                timeout = msecs_to_jiffies(cmd.timeout_ms);
 
        status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
-                       cmd.addr, cmd.data_len,
-                       nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
-                       0, &cmd.result, timeout, vec);
+                       cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
+                       cmd.metadata_len, 0, &cmd.result, timeout, flags);
 
        if (status >= 0) {
                if (put_user(cmd.result, &ucmd->result))
@@ -571,7 +580,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
        c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
 
-       if (!nvme_cmd_allowed(ns, &c, ioucmd->file->f_mode))
+       if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode))
                return -EACCES;
 
        d.metadata = READ_ONCE(cmd->metadata);
@@ -641,9 +650,9 @@ static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
 {
        switch (cmd) {
        case NVME_IOCTL_ADMIN_CMD:
-               return nvme_user_cmd(ctrl, NULL, argp, mode);
+               return nvme_user_cmd(ctrl, NULL, argp, 0, mode);
        case NVME_IOCTL_ADMIN64_CMD:
-               return nvme_user_cmd64(ctrl, NULL, argp, false, mode);
+               return nvme_user_cmd64(ctrl, NULL, argp, 0, mode);
        default:
                return sed_ioctl(ctrl->opal_dev, cmd, argp);
        }
@@ -668,14 +677,14 @@ struct nvme_user_io32 {
 #endif /* COMPAT_FOR_U64_ALIGNMENT */
 
 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
-               void __user *argp, fmode_t mode)
+               void __user *argp, unsigned int flags, fmode_t mode)
 {
        switch (cmd) {
        case NVME_IOCTL_ID:
                force_successful_syscall_return();
                return ns->head->ns_id;
        case NVME_IOCTL_IO_CMD:
-               return nvme_user_cmd(ns->ctrl, ns, argp, mode);
+               return nvme_user_cmd(ns->ctrl, ns, argp, flags, mode);
        /*
         * struct nvme_user_io can have different padding on some 32-bit ABIs.
         * Just accept the compat version as all fields that are used are the
@@ -686,37 +695,40 @@ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
 #endif
        case NVME_IOCTL_SUBMIT_IO:
                return nvme_submit_io(ns, argp);
-       case NVME_IOCTL_IO64_CMD:
-               return nvme_user_cmd64(ns->ctrl, ns, argp, false, mode);
        case NVME_IOCTL_IO64_CMD_VEC:
-               return nvme_user_cmd64(ns->ctrl, ns, argp, true, mode);
+               flags |= NVME_IOCTL_VEC;
+               fallthrough;
+       case NVME_IOCTL_IO64_CMD:
+               return nvme_user_cmd64(ns->ctrl, ns, argp, flags, mode);
        default:
                return -ENOTTY;
        }
 }
 
-static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg,
-                       fmode_t mode)
-{
-       if (is_ctrl_ioctl(cmd))
-               return nvme_ctrl_ioctl(ns->ctrl, cmd, arg, mode);
-       return nvme_ns_ioctl(ns, cmd, arg, mode);
-}
-
 int nvme_ioctl(struct block_device *bdev, fmode_t mode,
                unsigned int cmd, unsigned long arg)
 {
        struct nvme_ns *ns = bdev->bd_disk->private_data;
+       void __user *argp = (void __user *)arg;
+       unsigned int flags = 0;
 
-       return __nvme_ioctl(ns, cmd, (void __user *)arg, mode);
+       if (bdev_is_partition(bdev))
+               flags |= NVME_IOCTL_PARTITION;
+
+       if (is_ctrl_ioctl(cmd))
+               return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, mode);
+       return nvme_ns_ioctl(ns, cmd, argp, flags, mode);
 }
 
 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        struct nvme_ns *ns =
                container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
+       void __user *argp = (void __user *)arg;
 
-       return __nvme_ioctl(ns, cmd, (void __user *)arg, file->f_mode);
+       if (is_ctrl_ioctl(cmd))
+               return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, file->f_mode);
+       return nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode);
 }
 
 static int nvme_uring_cmd_checks(unsigned int issue_flags)
@@ -806,6 +818,10 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
        void __user *argp = (void __user *)arg;
        struct nvme_ns *ns;
        int srcu_idx, ret = -EWOULDBLOCK;
+       unsigned int flags = 0;
+
+       if (bdev_is_partition(bdev))
+               flags |= NVME_IOCTL_PARTITION;
 
        srcu_idx = srcu_read_lock(&head->srcu);
        ns = nvme_find_path(head);
@@ -821,7 +837,7 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
                return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
                                        mode);
 
-       ret = nvme_ns_ioctl(ns, cmd, argp, mode);
+       ret = nvme_ns_ioctl(ns, cmd, argp, flags, mode);
 out_unlock:
        srcu_read_unlock(&head->srcu, srcu_idx);
        return ret;
@@ -846,7 +862,7 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
                return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
                                file->f_mode);
 
-       ret = nvme_ns_ioctl(ns, cmd, argp, file->f_mode);
+       ret = nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode);
 out_unlock:
        srcu_read_unlock(&head->srcu, srcu_idx);
        return ret;
@@ -945,7 +961,7 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
        kref_get(&ns->kref);
        up_read(&ctrl->namespaces_rwsem);
 
-       ret = nvme_user_cmd(ctrl, ns, argp, mode);
+       ret = nvme_user_cmd(ctrl, ns, argp, 0, mode);
        nvme_put_ns(ns);
        return ret;
 
@@ -962,9 +978,9 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
 
        switch (cmd) {
        case NVME_IOCTL_ADMIN_CMD:
-               return nvme_user_cmd(ctrl, NULL, argp, file->f_mode);
+               return nvme_user_cmd(ctrl, NULL, argp, 0, file->f_mode);
        case NVME_IOCTL_ADMIN64_CMD:
-               return nvme_user_cmd64(ctrl, NULL, argp, false, file->f_mode);
+               return nvme_user_cmd64(ctrl, NULL, argp, 0, file->f_mode);
        case NVME_IOCTL_IO_CMD:
                return nvme_dev_user_cmd(ctrl, argp, file->f_mode);
        case NVME_IOCTL_RESET:
index b13bacc..1ff8843 100644 (file)
@@ -1362,7 +1362,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
        else
                nvme_poll_irqdisable(nvmeq);
 
-       if (blk_mq_request_completed(req)) {
+       if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
                dev_warn(dev->ctrl.device,
                         "I/O %d QID %d timeout, completion polled\n",
                         req->tag, nvmeq->qid);
@@ -2533,7 +2533,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
         */
        result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
        if (result < 0)
-               return result;
+               goto disable;
 
        dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
 
@@ -2586,8 +2586,13 @@ static int nvme_pci_enable(struct nvme_dev *dev)
        pci_enable_pcie_error_reporting(pdev);
        pci_save_state(pdev);
 
-       return nvme_pci_configure_admin_queue(dev);
+       result = nvme_pci_configure_admin_queue(dev);
+       if (result)
+               goto free_irq;
+       return result;
 
+ free_irq:
+       pci_free_irq_vectors(pdev);
  disable:
        pci_disable_device(pdev);
        return result;
@@ -3495,7 +3500,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_SINGLE_VECTOR |
                                NVME_QUIRK_128_BYTES_SQES |
                                NVME_QUIRK_SHARED_TAGS |
-                               NVME_QUIRK_SKIP_CID_GEN },
+                               NVME_QUIRK_SKIP_CID_GEN |
+                               NVME_QUIRK_IDENTIFY_CNS },
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
        { 0, }
 };
index a0d2713..99ec91e 100644 (file)
@@ -225,7 +225,7 @@ config PCIE_ARTPEC6_EP
 config PCIE_BT1
        tristate "Baikal-T1 PCIe controller"
        depends on MIPS_BAIKAL_T1 || COMPILE_TEST
-       depends on PCI_MSI_IRQ_DOMAIN
+       depends on PCI_MSI
        select PCIE_DW_HOST
        help
          Enables support for the PCIe controller in the Baikal-T1 SoC to work
index 7585e80..afc6355 100644 (file)
@@ -255,7 +255,7 @@ static int imx8_pcie_phy_probe(struct platform_device *pdev)
                imx8_phy->perst =
                        devm_reset_control_get_exclusive(dev, "perst");
                if (IS_ERR(imx8_phy->perst))
-                       dev_err_probe(dev, PTR_ERR(imx8_phy->perst),
+                       return dev_err_probe(dev, PTR_ERR(imx8_phy->perst),
                                      "Failed to get PCIE PHY PERST control\n");
        }
 
index 95c6dbb..ce511ad 100644 (file)
@@ -99,6 +99,7 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
        struct gpio_desc *standby_gpio;
        struct gpio_desc *enable_gpio;
        u32 max_bitrate = 0;
+       int err;
 
        can_transceiver_phy = devm_kzalloc(dev, sizeof(struct can_transceiver_phy), GFP_KERNEL);
        if (!can_transceiver_phy)
@@ -124,8 +125,8 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
                return PTR_ERR(phy);
        }
 
-       device_property_read_u32(dev, "max-bitrate", &max_bitrate);
-       if (!max_bitrate)
+       err = device_property_read_u32(dev, "max-bitrate", &max_bitrate);
+       if ((err != -EINVAL) && !max_bitrate)
                dev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit\n");
        phy->attrs.max_link_rate = max_bitrate;
 
index 8807e59..a52a9bf 100644 (file)
@@ -401,26 +401,13 @@ static const struct hsphy_init_seq init_seq_femtophy[] = {
        HSPHY_INIT_CFG(0x90, 0x60, 0),
 };
 
-static const struct hsphy_init_seq init_seq_mdm9607[] = {
-       HSPHY_INIT_CFG(0x80, 0x44, 0),
-       HSPHY_INIT_CFG(0x81, 0x38, 0),
-       HSPHY_INIT_CFG(0x82, 0x24, 0),
-       HSPHY_INIT_CFG(0x83, 0x13, 0),
-};
-
 static const struct hsphy_data hsphy_data_femtophy = {
        .init_seq = init_seq_femtophy,
        .init_seq_num = ARRAY_SIZE(init_seq_femtophy),
 };
 
-static const struct hsphy_data hsphy_data_mdm9607 = {
-       .init_seq = init_seq_mdm9607,
-       .init_seq_num = ARRAY_SIZE(init_seq_mdm9607),
-};
-
 static const struct of_device_id qcom_snps_hsphy_match[] = {
        { .compatible = "qcom,usb-hs-28nm-femtophy", .data = &hsphy_data_femtophy, },
-       { .compatible = "qcom,usb-hs-28nm-mdm9607", .data = &hsphy_data_mdm9607, },
        { },
 };
 MODULE_DEVICE_TABLE(of, qcom_snps_hsphy_match);
index ec6594e..e7588a9 100644 (file)
@@ -126,7 +126,7 @@ r8a779f0_eth_serdes_chan_setting(struct r8a779f0_eth_serdes_channel *channel)
                r8a779f0_eth_serdes_write32(channel->addr, 0x0160, 0x180, 0x0007);
                r8a779f0_eth_serdes_write32(channel->addr, 0x01ac, 0x180, 0x0000);
                r8a779f0_eth_serdes_write32(channel->addr, 0x00c4, 0x180, 0x0310);
-               r8a779f0_eth_serdes_write32(channel->addr, 0x00c8, 0x380, 0x0101);
+               r8a779f0_eth_serdes_write32(channel->addr, 0x00c8, 0x180, 0x0101);
                ret = r8a779f0_eth_serdes_reg_wait(channel, 0x00c8, 0x0180, BIT(0), 0);
                if (ret)
                        return ret;
index e6ededc..a0bc10a 100644 (file)
@@ -485,8 +485,10 @@ static int rockchip_usb2phy_power_on(struct phy *phy)
                return ret;
 
        ret = property_enable(base, &rport->port_cfg->phy_sus, false);
-       if (ret)
+       if (ret) {
+               clk_disable_unprepare(rphy->clk480m);
                return ret;
+       }
 
        /* waiting for the utmi_clk to become stable */
        usleep_range(1500, 2000);
index e827b79..56de410 100644 (file)
@@ -254,6 +254,9 @@ static int sp_usb_phy_probe(struct platform_device *pdev)
                return PTR_ERR(usbphy->phy_regs);
 
        usbphy->moon4_res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "moon4");
+       if (!usbphy->moon4_res_mem)
+               return -EINVAL;
+
        usbphy->moon4_regs = devm_ioremap(&pdev->dev, usbphy->moon4_res_mem->start,
                                          resource_size(usbphy->moon4_res_mem));
        if (!usbphy->moon4_regs)
index 15a3bcf..b905902 100644 (file)
@@ -23,7 +23,7 @@ config PHY_DM816X_USB
 
 config PHY_AM654_SERDES
        tristate "TI AM654 SERDES support"
-       depends on OF && ARCH_K3 || COMPILE_TEST
+       depends on OF && (ARCH_K3 || COMPILE_TEST)
        depends on COMMON_CLK
        select GENERIC_PHY
        select MULTIPLEXER
@@ -35,7 +35,7 @@ config PHY_AM654_SERDES
 
 config PHY_J721E_WIZ
        tristate "TI J721E WIZ (SERDES Wrapper) support"
-       depends on OF && ARCH_K3 || COMPILE_TEST
+       depends on OF && (ARCH_K3 || COMPILE_TEST)
        depends on HAS_IOMEM && OF_ADDRESS
        depends on COMMON_CLK
        select GENERIC_PHY
index 3106a21..d7b244d 100644 (file)
@@ -6,9 +6,10 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/gpio/driver.h>
 #include <linux/pinctrl/pinctrl.h>
+
 #include <linux/mfd/abx500/ab8500.h>
+
 #include "pinctrl-abx500.h"
 
 /* All the pins that can be used for GPIO and some other functions */
index b93af1f..45aa958 100644 (file)
@@ -6,9 +6,10 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/gpio/driver.h>
 #include <linux/pinctrl/pinctrl.h>
+
 #include <linux/mfd/abx500/ab8500.h>
+
 #include "pinctrl-abx500.h"
 
 /* All the pins that can be used for GPIO and some other functions */
index 7aa5345..28c3403 100644 (file)
@@ -6,33 +6,37 @@
  *
  * Driver allows to use AxB5xx unused pins to be used as GPIO
  */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/init.h>
+#include <linux/bitops.h>
 #include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
 #include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
-#include <linux/interrupt.h>
-#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
 #include <linux/mfd/abx500.h>
 #include <linux/mfd/abx500/ab8500.h>
-#include <linux/pinctrl/pinctrl.h>
+
 #include <linux/pinctrl/consumer.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/pinconf.h>
-#include <linux/pinctrl/pinconf-generic.h>
 #include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
 
-#include "pinctrl-abx500.h"
 #include "../core.h"
 #include "../pinconf.h"
 #include "../pinctrl-utils.h"
 
+#include "pinctrl-abx500.h"
+
 /*
  * GPIO registers offset
  * Bank: 0x10
index 90bb12f..d675220 100644 (file)
@@ -2,6 +2,10 @@
 #ifndef PINCTRL_PINCTRL_ABx500_H
 #define PINCTRL_PINCTRL_ABx500_H
 
+#include <linux/types.h>
+
+struct pinctrl_pin_desc;
+
 /* Package definitions */
 #define PINCTRL_AB8500 0
 #define PINCTRL_AB8505 1
index 758d21f..490e095 100644 (file)
@@ -1,6 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/kernel.h>
+#include <linux/types.h>
+
 #include <linux/pinctrl/pinctrl.h>
+
 #include "pinctrl-nomadik.h"
 
 /* All the pins that can be used for GPIO and some other functions */
index c0d7c86..1552222 100644 (file)
@@ -1,6 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/kernel.h>
+#include <linux/types.h>
+
 #include <linux/pinctrl/pinctrl.h>
+
 #include "pinctrl-nomadik.h"
 
 /* All the pins that can be used for GPIO and some other functions */
index f7d0251..86a6380 100644 (file)
@@ -7,30 +7,34 @@
  *   Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com>
  * Copyright (C) 2011-2013 Linus Walleij <linus.walleij@linaro.org>
  */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
+#include <linux/bitops.h>
 #include <linux/clk.h>
+#include <linux/device.h>
 #include <linux/err.h>
 #include <linux/gpio/driver.h>
-#include <linux/spinlock.h>
+#include <linux/init.h>
 #include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/of_device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
 #include <linux/of_address.h>
-#include <linux/bitops.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/* Since we request GPIOs from ourself */
+#include <linux/pinctrl/consumer.h>
 #include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/pinconf.h>
-/* Since we request GPIOs from ourself */
-#include <linux/pinctrl/consumer.h>
-#include "pinctrl-nomadik.h"
+
 #include "../core.h"
 #include "../pinctrl-utils.h"
 
+#include "pinctrl-nomadik.h"
+
 /*
  * The GPIO module in the Nomadik family of Systems-on-Chip is an
  * AMBA device, managing 32 pins and alternate functions.  The logic block
@@ -907,8 +911,6 @@ static int nmk_gpio_get_mode(struct nmk_gpio_chip *nmk_chip, int offset)
        return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0);
 }
 
-#include <linux/seq_file.h>
-
 static void nmk_gpio_dbg_show_one(struct seq_file *s,
        struct pinctrl_dev *pctldev, struct gpio_chip *chip,
        unsigned offset, unsigned gpio)
index 84e2977..1ef2559 100644 (file)
@@ -2,6 +2,11 @@
 #ifndef PINCTRL_PINCTRL_NOMADIK_H
 #define PINCTRL_PINCTRL_NOMADIK_H
 
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
 /* Package definitions */
 #define PINCTRL_NMK_STN8815    0
 #define PINCTRL_NMK_DB8500     1
index da974ff..5eeac92 100644 (file)
@@ -926,19 +926,19 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
        RK_MUXROUTE_PMU(0, RK_PB5, 4, 0x0110, WRITE_MASK_VAL(3, 2, 1)), /* PWM1 IO mux M1 */
        RK_MUXROUTE_PMU(0, RK_PC1, 1, 0x0110, WRITE_MASK_VAL(5, 4, 0)), /* PWM2 IO mux M0 */
        RK_MUXROUTE_PMU(0, RK_PB6, 4, 0x0110, WRITE_MASK_VAL(5, 4, 1)), /* PWM2 IO mux M1 */
-       RK_MUXROUTE_PMU(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */
+       RK_MUXROUTE_GRF(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */
        RK_MUXROUTE_GRF(2, RK_PA1, 4, 0x0300, WRITE_MASK_VAL(0, 0, 1)), /* CAN0 IO mux M1 */
        RK_MUXROUTE_GRF(1, RK_PA1, 3, 0x0300, WRITE_MASK_VAL(2, 2, 0)), /* CAN1 IO mux M0 */
        RK_MUXROUTE_GRF(4, RK_PC3, 3, 0x0300, WRITE_MASK_VAL(2, 2, 1)), /* CAN1 IO mux M1 */
        RK_MUXROUTE_GRF(4, RK_PB5, 3, 0x0300, WRITE_MASK_VAL(4, 4, 0)), /* CAN2 IO mux M0 */
        RK_MUXROUTE_GRF(2, RK_PB2, 4, 0x0300, WRITE_MASK_VAL(4, 4, 1)), /* CAN2 IO mux M1 */
        RK_MUXROUTE_GRF(4, RK_PC4, 1, 0x0300, WRITE_MASK_VAL(6, 6, 0)), /* HPDIN IO mux M0 */
-       RK_MUXROUTE_PMU(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */
+       RK_MUXROUTE_GRF(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */
        RK_MUXROUTE_GRF(3, RK_PB1, 3, 0x0300, WRITE_MASK_VAL(8, 8, 0)), /* GMAC1 IO mux M0 */
        RK_MUXROUTE_GRF(4, RK_PA7, 3, 0x0300, WRITE_MASK_VAL(8, 8, 1)), /* GMAC1 IO mux M1 */
        RK_MUXROUTE_GRF(4, RK_PD1, 1, 0x0300, WRITE_MASK_VAL(10, 10, 0)), /* HDMITX IO mux M0 */
-       RK_MUXROUTE_PMU(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */
-       RK_MUXROUTE_PMU(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */
+       RK_MUXROUTE_GRF(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */
+       RK_MUXROUTE_GRF(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */
        RK_MUXROUTE_GRF(4, RK_PB4, 1, 0x0300, WRITE_MASK_VAL(14, 14, 1)), /* I2C2 IO mux M1 */
        RK_MUXROUTE_GRF(1, RK_PA0, 1, 0x0304, WRITE_MASK_VAL(0, 0, 0)), /* I2C3 IO mux M0 */
        RK_MUXROUTE_GRF(3, RK_PB6, 4, 0x0304, WRITE_MASK_VAL(0, 0, 1)), /* I2C3 IO mux M1 */
@@ -964,7 +964,7 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
        RK_MUXROUTE_GRF(4, RK_PC3, 1, 0x0308, WRITE_MASK_VAL(12, 12, 1)), /* PWM15 IO mux M1 */
        RK_MUXROUTE_GRF(3, RK_PD2, 3, 0x0308, WRITE_MASK_VAL(14, 14, 0)), /* SDMMC2 IO mux M0 */
        RK_MUXROUTE_GRF(3, RK_PA5, 5, 0x0308, WRITE_MASK_VAL(14, 14, 1)), /* SDMMC2 IO mux M1 */
-       RK_MUXROUTE_PMU(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */
+       RK_MUXROUTE_GRF(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */
        RK_MUXROUTE_GRF(2, RK_PD3, 3, 0x030c, WRITE_MASK_VAL(0, 0, 1)), /* SPI0 IO mux M1 */
        RK_MUXROUTE_GRF(2, RK_PB5, 3, 0x030c, WRITE_MASK_VAL(2, 2, 0)), /* SPI1 IO mux M0 */
        RK_MUXROUTE_GRF(3, RK_PC3, 3, 0x030c, WRITE_MASK_VAL(2, 2, 1)), /* SPI1 IO mux M1 */
@@ -973,8 +973,8 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
        RK_MUXROUTE_GRF(4, RK_PB3, 4, 0x030c, WRITE_MASK_VAL(6, 6, 0)), /* SPI3 IO mux M0 */
        RK_MUXROUTE_GRF(4, RK_PC2, 2, 0x030c, WRITE_MASK_VAL(6, 6, 1)), /* SPI3 IO mux M1 */
        RK_MUXROUTE_GRF(2, RK_PB4, 2, 0x030c, WRITE_MASK_VAL(8, 8, 0)), /* UART1 IO mux M0 */
-       RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */
-       RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */
+       RK_MUXROUTE_GRF(3, RK_PD6, 4, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */
+       RK_MUXROUTE_GRF(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */
        RK_MUXROUTE_GRF(1, RK_PD5, 2, 0x030c, WRITE_MASK_VAL(10, 10, 1)), /* UART2 IO mux M1 */
        RK_MUXROUTE_GRF(1, RK_PA1, 2, 0x030c, WRITE_MASK_VAL(12, 12, 0)), /* UART3 IO mux M0 */
        RK_MUXROUTE_GRF(3, RK_PB7, 4, 0x030c, WRITE_MASK_VAL(12, 12, 1)), /* UART3 IO mux M1 */
@@ -1004,13 +1004,13 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
        RK_MUXROUTE_GRF(3, RK_PD6, 5, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
        RK_MUXROUTE_GRF(4, RK_PA0, 4, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
        RK_MUXROUTE_GRF(3, RK_PC4, 5, 0x0314, WRITE_MASK_VAL(1, 0, 2)), /* PDM IO mux M2 */
-       RK_MUXROUTE_PMU(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */
+       RK_MUXROUTE_GRF(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */
        RK_MUXROUTE_GRF(2, RK_PD0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 1)), /* PCIE20 IO mux M1 */
        RK_MUXROUTE_GRF(1, RK_PB0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 2)), /* PCIE20 IO mux M2 */
-       RK_MUXROUTE_PMU(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */
+       RK_MUXROUTE_GRF(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */
        RK_MUXROUTE_GRF(2, RK_PD2, 4, 0x0314, WRITE_MASK_VAL(5, 4, 1)), /* PCIE30X1 IO mux M1 */
        RK_MUXROUTE_GRF(1, RK_PA5, 4, 0x0314, WRITE_MASK_VAL(5, 4, 2)), /* PCIE30X1 IO mux M2 */
-       RK_MUXROUTE_PMU(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */
+       RK_MUXROUTE_GRF(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */
        RK_MUXROUTE_GRF(2, RK_PD4, 4, 0x0314, WRITE_MASK_VAL(7, 6, 1)), /* PCIE30X2 IO mux M1 */
        RK_MUXROUTE_GRF(4, RK_PC2, 4, 0x0314, WRITE_MASK_VAL(7, 6, 2)), /* PCIE30X2 IO mux M2 */
 };
@@ -2436,10 +2436,19 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
        case RK3308:
        case RK3368:
        case RK3399:
+       case RK3568:
        case RK3588:
                pull_type = bank->pull_type[pin_num / 8];
                data >>= bit;
                data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1;
+               /*
+                * In the TRM, pull-up being 1 for everything except the GPIO0_D3-D6,
+                * where that pull up value becomes 3.
+                */
+               if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) {
+                       if (data == 3)
+                               data = 1;
+               }
 
                return rockchip_pull_list[pull_type][data];
        default:
@@ -2497,7 +2506,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
                        }
                }
                /*
-                * In the TRM, pull-up being 1 for everything except the GPIO0_D0-D6,
+                * In the TRM, pull-up being 1 for everything except the GPIO0_D3-D6,
                 * where that pull up value becomes 3.
                 */
                if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) {
index 2b3335a..2510129 100644 (file)
@@ -499,7 +499,6 @@ static int sppctl_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
        return 0;
 }
 
-#ifdef CONFIG_DEBUG_FS
 static void sppctl_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 {
        const char *label;
@@ -521,7 +520,6 @@ static void sppctl_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
                seq_puts(s, "\n");
        }
 }
-#endif
 
 static int sppctl_gpio_new(struct platform_device *pdev, struct sppctl_pdata *pctl)
 {
@@ -550,9 +548,8 @@ static int sppctl_gpio_new(struct platform_device *pdev, struct sppctl_pdata *pc
        gchip->get              = sppctl_gpio_get;
        gchip->set              = sppctl_gpio_set;
        gchip->set_config       = sppctl_gpio_set_config;
-#ifdef CONFIG_DEBUG_FS
-       gchip->dbg_show         = sppctl_gpio_dbg_show;
-#endif
+       gchip->dbg_show         = IS_ENABLED(CONFIG_DEBUG_FS) ?
+                                 sppctl_gpio_dbg_show : NULL;
        gchip->base             = -1;
        gchip->ngpio            = sppctl_gpio_list_sz;
        gchip->names            = sppctl_gpio_list_s;
index 43e7651..c6537a1 100644 (file)
@@ -1700,8 +1700,10 @@ int ssam_request_sync(struct ssam_controller *ctrl,
                return status;
 
        status = ssam_request_sync_init(rqst, spec->flags);
-       if (status)
+       if (status) {
+               ssam_request_sync_free(rqst);
                return status;
+       }
 
        ssam_request_sync_set_resp(rqst, rsp);
 
index f556557..6913297 100644 (file)
@@ -916,6 +916,20 @@ static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data)
        if (sshp_parse_command(dev, data, &command, &command_data))
                return;
 
+       /*
+        * Check if the message was intended for us. If not, drop it.
+        *
+        * Note: We will need to change this to handle debug messages. On newer
+        * generation devices, these seem to be sent to tid_out=0x03. We as
+        * host can still receive them as they can be forwarded via an override
+        * option on SAM, but doing so does not change tid_out=0x00.
+        */
+       if (command->tid_out != 0x00) {
+               rtl_warn(rtl, "rtl: dropping message not intended for us (tid = %#04x)\n",
+                        command->tid_out);
+               return;
+       }
+
        if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid)))
                ssh_rtl_rx_event(rtl, command, &command_data);
        else
index 439d282..8d92498 100644 (file)
@@ -932,7 +932,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
        if (enable_stb && (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB)) {
                err = amd_pmc_s2d_init(dev);
                if (err)
-                       return err;
+                       goto err_pci_dev_put;
        }
 
        platform_set_drvdata(pdev, dev);
index c685a70..cb15acd 100644 (file)
@@ -121,6 +121,10 @@ static struct quirk_entry quirk_asus_tablet_mode = {
        .tablet_switch_mode = asus_wmi_lid_flip_rog_devid,
 };
 
+static struct quirk_entry quirk_asus_ignore_fan = {
+       .wmi_ignore_fan = true,
+};
+
 static int dmi_matched(const struct dmi_system_id *dmi)
 {
        pr_info("Identified laptop model '%s'\n", dmi->ident);
@@ -473,6 +477,15 @@ static const struct dmi_system_id asus_quirks[] = {
                },
                .driver_data = &quirk_asus_tablet_mode,
        },
+       {
+               .callback = dmi_matched,
+               .ident = "ASUS VivoBook E410MA",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "E410MA"),
+               },
+               .driver_data = &quirk_asus_ignore_fan,
+       },
        {},
 };
 
@@ -511,6 +524,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0x30, { KEY_VOLUMEUP } },
        { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
        { KE_KEY, 0x32, { KEY_MUTE } },
+       { KE_KEY, 0x33, { KEY_SCREENLOCK } },
        { KE_KEY, 0x35, { KEY_SCREENLOCK } },
        { KE_KEY, 0x38, { KEY_PROG3 } }, /* Armoury Crate */
        { KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
@@ -544,6 +558,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
        { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
        { KE_KEY, 0x82, { KEY_CAMERA } },
+       { KE_KEY, 0x85, { KEY_CAMERA } },
        { KE_KEY, 0x86, { KEY_PROG1 } }, /* MyASUS Key */
        { KE_KEY, 0x88, { KEY_RFKILL  } }, /* Radio Toggle Key */
        { KE_KEY, 0x8A, { KEY_PROG1 } }, /* Color enhancement mode */
index 6f81b28..104188d 100644 (file)
@@ -2243,7 +2243,9 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
        asus->fan_type = FAN_TYPE_NONE;
        asus->agfn_pwm = -1;
 
-       if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL))
+       if (asus->driver->quirks->wmi_ignore_fan)
+               asus->fan_type = FAN_TYPE_NONE;
+       else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL))
                asus->fan_type = FAN_TYPE_SPEC83;
        else if (asus_wmi_has_agfn_fan(asus))
                asus->fan_type = FAN_TYPE_AGFN;
@@ -2436,6 +2438,9 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
 
        *available = false;
 
+       if (asus->fan_type == FAN_TYPE_NONE)
+               return 0;
+
        err = fan_curve_get_factory_default(asus, fan_dev);
        if (err) {
                return 0;
index 6531699..a478ebf 100644 (file)
@@ -38,6 +38,7 @@ struct quirk_entry {
        bool store_backlight_power;
        bool wmi_backlight_set_devstate;
        bool wmi_force_als_set;
+       bool wmi_ignore_fan;
        enum asus_wmi_tablet_switch_mode tablet_switch_mode;
        int wapf;
        /*
index c82b3d6..c517bd4 100644 (file)
@@ -61,7 +61,7 @@ static const struct key_entry dell_wmi_keymap_type_0012[] = {
        /* privacy mic mute */
        { KE_KEY, 0x0001, { KEY_MICMUTE } },
        /* privacy camera mute */
-       { KE_SW,  0x0002, { SW_CAMERA_LENS_COVER } },
+       { KE_VSW, 0x0002, { SW_CAMERA_LENS_COVER } },
        { KE_END, 0},
 };
 
@@ -115,11 +115,15 @@ bool dell_privacy_process_event(int type, int code, int status)
 
        switch (code) {
        case DELL_PRIVACY_AUDIO_EVENT: /* Mic mute */
-       case DELL_PRIVACY_CAMERA_EVENT: /* Camera mute */
                priv->last_status = status;
                sparse_keymap_report_entry(priv->input_dev, key, 1, true);
                ret = true;
                break;
+       case DELL_PRIVACY_CAMERA_EVENT: /* Camera mute */
+               priv->last_status = status;
+               sparse_keymap_report_entry(priv->input_dev, key, !(status & CAMERA_STATUS), false);
+               ret = true;
+               break;
        default:
                dev_dbg(&priv->wdev->dev, "unknown event type 0x%04x 0x%04x\n", type, code);
        }
@@ -292,7 +296,7 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
 {
        struct privacy_wmi_data *priv;
        struct key_entry *keymap;
-       int ret, i;
+       int ret, i, j;
 
        ret = wmi_has_guid(DELL_PRIVACY_GUID);
        if (!ret)
@@ -304,6 +308,11 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
 
        dev_set_drvdata(&wdev->dev, priv);
        priv->wdev = wdev;
+
+       ret = get_current_status(priv->wdev);
+       if (ret)
+               return ret;
+
        /* create evdev passing interface */
        priv->input_dev = devm_input_allocate_device(&wdev->dev);
        if (!priv->input_dev)
@@ -318,9 +327,20 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
        /* remap the keymap code with Dell privacy key type 0x12 as prefix
         * KEY_MICMUTE scancode will be reported as 0x120001
         */
-       for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0012); i++) {
-               keymap[i] = dell_wmi_keymap_type_0012[i];
-               keymap[i].code |= (0x0012 << 16);
+       for (i = 0, j = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0012); i++) {
+               /*
+                * Unlike keys where only presses matter, userspace may act
+                * on switches in both of their positions. Only register
+                * SW_CAMERA_LENS_COVER if it is actually there.
+                */
+               if (dell_wmi_keymap_type_0012[i].type == KE_VSW &&
+                   dell_wmi_keymap_type_0012[i].sw.code == SW_CAMERA_LENS_COVER &&
+                   !(priv->features_present & BIT(DELL_PRIVACY_TYPE_CAMERA)))
+                       continue;
+
+               keymap[j] = dell_wmi_keymap_type_0012[i];
+               keymap[j].code |= (0x0012 << 16);
+               j++;
        }
        ret = sparse_keymap_setup(priv->input_dev, keymap, NULL);
        kfree(keymap);
@@ -331,11 +351,12 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
        priv->input_dev->name = "Dell Privacy Driver";
        priv->input_dev->id.bustype = BUS_HOST;
 
-       ret = input_register_device(priv->input_dev);
-       if (ret)
-               return ret;
+       /* Report initial camera-cover status */
+       if (priv->features_present & BIT(DELL_PRIVACY_TYPE_CAMERA))
+               input_report_switch(priv->input_dev, SW_CAMERA_LENS_COVER,
+                                   !(priv->last_status & CAMERA_STATUS));
 
-       ret = get_current_status(priv->wdev);
+       ret = input_register_device(priv->input_dev);
        if (ret)
                return ret;
 
index 435d2d3..0eb5bfd 100644 (file)
@@ -1621,6 +1621,12 @@ static const struct dmi_system_id set_fn_lock_led_list[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion R7000P2020H"),
                }
        },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion 5 15ARH05"),
+               }
+       },
        {}
 };
 
index b2342b3..74dc2cf 100644 (file)
@@ -181,6 +181,9 @@ int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
                return PTR_ERR(int3472->regulator.gpio);
        }
 
+       /* Ensure the pin is in output mode and non-active state */
+       gpiod_direction_output(int3472->regulator.gpio, 0);
+
        cfg.dev = &int3472->adev->dev;
        cfg.init_data = &init_data;
        cfg.ena_gpiod = int3472->regulator.gpio;
index 974a132..c42c3fa 100644 (file)
@@ -168,6 +168,8 @@ static int skl_int3472_map_gpio_to_clk(struct int3472_discrete_device *int3472,
                        return (PTR_ERR(gpio));
 
                int3472->clock.ena_gpio = gpio;
+               /* Ensure the pin is in output mode and non-active state */
+               gpiod_direction_output(int3472->clock.ena_gpio, 0);
                break;
        case INT3472_GPIO_TYPE_PRIVACY_LED:
                gpio = acpi_get_and_request_gpiod(path, pin, "int3472,privacy-led");
@@ -175,6 +177,8 @@ static int skl_int3472_map_gpio_to_clk(struct int3472_discrete_device *int3472,
                        return (PTR_ERR(gpio));
 
                int3472->clock.led_gpio = gpio;
+               /* Ensure the pin is in output mode and non-active state */
+               gpiod_direction_output(int3472->clock.led_gpio, 0);
                break;
        default:
                dev_err(int3472->dev, "Invalid GPIO type 0x%02x for clock\n", type);
index f1d802f..3a15d32 100644 (file)
@@ -1029,6 +1029,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,          adl_core_init),
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,        adl_core_init),
        X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE,          mtl_core_init),
+       X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L,        mtl_core_init),
        {}
 };
 
index ca76076..b362241 100644 (file)
@@ -46,7 +46,8 @@ static struct {
        {SIMATIC_IPC_IPC427D, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE},
        {SIMATIC_IPC_IPC427E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_427E},
        {SIMATIC_IPC_IPC477E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_427E},
-       {SIMATIC_IPC_IPC427G, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
+       {SIMATIC_IPC_IPCBX_39A, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
+       {SIMATIC_IPC_IPCPX_39A, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227G},
 };
 
 static int register_platform_devices(u32 station_id)
index 7156ae2..537d6a2 100644 (file)
@@ -1887,14 +1887,21 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
                break;
        }
 
-       ret = sony_call_snc_handle(handle, probe_base, &result);
-       if (ret)
-               return ret;
+       /*
+        * Only probe if there is a separate probe_base, otherwise the probe call
+        * is equivalent to __sony_nc_kbd_backlight_mode_set(0), resulting in
+        * the keyboard backlight being turned off.
+        */
+       if (probe_base) {
+               ret = sony_call_snc_handle(handle, probe_base, &result);
+               if (ret)
+                       return ret;
 
-       if ((handle == 0x0137 && !(result & 0x02)) ||
-                       !(result & 0x01)) {
-               dprintk("no backlight keyboard found\n");
-               return 0;
+               if ((handle == 0x0137 && !(result & 0x02)) ||
+                               !(result & 0x01)) {
+                       dprintk("no backlight keyboard found\n");
+                       return 0;
+               }
        }
 
        kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL);
index 1195293..a959468 100644 (file)
@@ -10311,9 +10311,11 @@ static DEFINE_MUTEX(dytc_mutex);
 static int dytc_capabilities;
 static bool dytc_mmc_get_available;
 
-static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *profile)
+static int convert_dytc_to_profile(int funcmode, int dytcmode,
+               enum platform_profile_option *profile)
 {
-       if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
+       switch (funcmode) {
+       case DYTC_FUNCTION_MMC:
                switch (dytcmode) {
                case DYTC_MODE_MMC_LOWPOWER:
                        *profile = PLATFORM_PROFILE_LOW_POWER;
@@ -10329,8 +10331,7 @@ static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *p
                        return -EINVAL;
                }
                return 0;
-       }
-       if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
+       case DYTC_FUNCTION_PSC:
                switch (dytcmode) {
                case DYTC_MODE_PSC_LOWPOWER:
                        *profile = PLATFORM_PROFILE_LOW_POWER;
@@ -10344,6 +10345,14 @@ static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *p
                default: /* Unknown mode */
                        return -EINVAL;
                }
+               return 0;
+       case DYTC_FUNCTION_AMT:
+               /* For now return balanced. It's the closest we have to 'auto' */
+               *profile =  PLATFORM_PROFILE_BALANCED;
+               return 0;
+       default:
+               /* Unknown function */
+               return -EOPNOTSUPP;
        }
        return 0;
 }
@@ -10492,6 +10501,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
                err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output);
                if (err)
                        goto unlock;
+
                /* system supports AMT, activate it when on balanced */
                if (dytc_capabilities & BIT(DYTC_FC_AMT))
                        dytc_control_amt(profile == PLATFORM_PROFILE_BALANCED);
@@ -10507,7 +10517,7 @@ static void dytc_profile_refresh(void)
 {
        enum platform_profile_option profile;
        int output, err = 0;
-       int perfmode;
+       int perfmode, funcmode;
 
        mutex_lock(&dytc_mutex);
        if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
@@ -10522,8 +10532,9 @@ static void dytc_profile_refresh(void)
        if (err)
                return;
 
+       funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
        perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
-       convert_dytc_to_profile(perfmode, &profile);
+       convert_dytc_to_profile(funcmode, perfmode, &profile);
        if (profile != dytc_current_profile) {
                dytc_current_profile = profile;
                platform_profile_notify();
index baae312..f009953 100644 (file)
@@ -264,6 +264,23 @@ static const struct ts_dmi_data connect_tablet9_data = {
        .properties     = connect_tablet9_props,
 };
 
+static const struct property_entry csl_panther_tab_hd_props[] = {
+       PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+       PROPERTY_ENTRY_U32("touchscreen-min-y", 20),
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1526),
+       PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+       PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"),
+       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+       { }
+};
+
+static const struct ts_dmi_data csl_panther_tab_hd_data = {
+       .acpi_name      = "MSSL1680:00",
+       .properties     = csl_panther_tab_hd_props,
+};
+
 static const struct property_entry cube_iwork8_air_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
        PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
@@ -1125,6 +1142,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                },
        },
        {
+               /* CSL Panther Tab HD */
+               .driver_data = (void *)&csl_panther_tab_hd_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "CSL Computer GmbH & Co. KG"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "CSL Panther Tab HD"),
+               },
+       },
+       {
                /* CUBE iwork8 Air */
                .driver_data = (void *)&cube_iwork8_air_data,
                .matches = {
index de176c2..2a52c99 100644 (file)
@@ -257,7 +257,7 @@ config RESET_SUNXI
 
 config RESET_TI_SCI
        tristate "TI System Control Interface (TI-SCI) reset driver"
-       depends on TI_SCI_PROTOCOL || COMPILE_TEST
+       depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n)
        help
          This enables the reset driver support over TI System Control Interface
          available on some new TI's SoCs. If you wish to use reset resources
index 146fd5d..15abac9 100644 (file)
@@ -47,7 +47,6 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct uniphier_glue_reset_priv *priv;
        struct resource *res;
-       resource_size_t size;
        int i, ret;
 
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -60,7 +59,6 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
                return -EINVAL;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       size = resource_size(res);
        priv->rdata.membase = devm_ioremap_resource(dev, res);
        if (IS_ERR(priv->rdata.membase))
                return PTR_ERR(priv->rdata.membase);
@@ -96,7 +94,7 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
 
        spin_lock_init(&priv->rdata.lock);
        priv->rdata.rcdev.owner = THIS_MODULE;
-       priv->rdata.rcdev.nr_resets = size * BITS_PER_BYTE;
+       priv->rdata.rcdev.nr_resets = resource_size(res) * BITS_PER_BYTE;
        priv->rdata.rcdev.ops = &reset_simple_ops;
        priv->rdata.rcdev.of_node = dev->of_node;
        priv->rdata.active_low = true;
index 49cc18a..29a2865 100644 (file)
@@ -981,6 +981,9 @@ queue_rtpg:
  *
  * Returns true if and only if alua_rtpg_work() will be called asynchronously.
  * That function is responsible for calling @qdata->fn().
+ *
+ * Context: may be called from atomic context (alua_check()) only if the caller
+ *     holds an sdev reference.
  */
 static bool alua_rtpg_queue(struct alua_port_group *pg,
                            struct scsi_device *sdev,
@@ -989,8 +992,6 @@ static bool alua_rtpg_queue(struct alua_port_group *pg,
        int start_queue = 0;
        unsigned long flags;
 
-       might_sleep();
-
        if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
                return false;
 
index e9c2d30..8c038cc 100644 (file)
@@ -704,7 +704,7 @@ static int hisi_sas_init_device(struct domain_device *device)
                int_to_scsilun(0, &lun);
 
                while (retry-- > 0) {
-                       rc = sas_clear_task_set(device, lun.scsi_lun);
+                       rc = sas_abort_task_set(device, lun.scsi_lun);
                        if (rc == TMF_RESP_FUNC_COMPLETE) {
                                hisi_sas_release_task(hisi_hba, device);
                                break;
@@ -1316,7 +1316,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
                                device->linkrate = phy->sas_phy.linkrate;
 
                        hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
-               } else
+               } else if (!port->port_attached)
                        port->id = 0xff;
        }
 }
index 4dbf51e..f6da348 100644 (file)
@@ -5850,7 +5850,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
 {
        struct Scsi_Host *sh;
 
-       sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
+       sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
        if (sh == NULL) {
                dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
                return -ENOMEM;
index 1d1cf64..0454d94 100644 (file)
@@ -849,7 +849,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
                                       enum iscsi_host_param param, char *buf)
 {
        struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
-       struct iscsi_session *session = tcp_sw_host->session;
+       struct iscsi_session *session;
        struct iscsi_conn *conn;
        struct iscsi_tcp_conn *tcp_conn;
        struct iscsi_sw_tcp_conn *tcp_sw_conn;
@@ -859,6 +859,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
 
        switch (param) {
        case ISCSI_HOST_PARAM_IPADDRESS:
+               session = tcp_sw_host->session;
                if (!session)
                        return -ENOTCONN;
 
@@ -959,11 +960,13 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
        if (!cls_session)
                goto remove_host;
        session = cls_session->dd_data;
-       tcp_sw_host = iscsi_host_priv(shost);
-       tcp_sw_host->session = session;
 
        if (iscsi_tcp_r2tpool_alloc(session))
                goto remove_session;
+
+       /* We are now fully setup so expose the session to sysfs. */
+       tcp_sw_host = iscsi_host_priv(shost);
+       tcp_sw_host->session = session;
        return cls_session;
 
 remove_session:
@@ -983,10 +986,17 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
        if (WARN_ON_ONCE(session->leadconn))
                return;
 
+       iscsi_session_remove(cls_session);
+       /*
+        * Our get_host_param needs to access the session, so remove the
+        * host from sysfs before freeing the session to make sure userspace
+        * is no longer accessing the callout.
+        */
+       iscsi_host_remove(shost, false);
+
        iscsi_tcp_r2tpool_free(cls_session->dd_data);
-       iscsi_session_teardown(cls_session);
 
-       iscsi_host_remove(shost, false);
+       iscsi_session_free(cls_session);
        iscsi_host_free(shost);
 }
 
index ef2fc86..127f3d7 100644 (file)
@@ -3104,17 +3104,32 @@ dec_session_count:
 }
 EXPORT_SYMBOL_GPL(iscsi_session_setup);
 
-/**
- * iscsi_session_teardown - destroy session, host, and cls_session
- * @cls_session: iscsi session
+/*
+ * issi_session_remove - Remove session from iSCSI class.
  */
-void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+void iscsi_session_remove(struct iscsi_cls_session *cls_session)
 {
        struct iscsi_session *session = cls_session->dd_data;
-       struct module *owner = cls_session->transport->owner;
        struct Scsi_Host *shost = session->host;
 
        iscsi_remove_session(cls_session);
+       /*
+        * host removal only has to wait for its children to be removed from
+        * sysfs, and iscsi_tcp needs to do iscsi_host_remove before freeing
+        * the session, so drop the session count here.
+        */
+       iscsi_host_dec_session_cnt(shost);
+}
+EXPORT_SYMBOL_GPL(iscsi_session_remove);
+
+/**
+ * iscsi_session_free - Free iscsi session and it's resources
+ * @cls_session: iscsi session
+ */
+void iscsi_session_free(struct iscsi_cls_session *cls_session)
+{
+       struct iscsi_session *session = cls_session->dd_data;
+       struct module *owner = cls_session->transport->owner;
 
        iscsi_pool_free(&session->cmdpool);
        kfree(session->password);
@@ -3132,10 +3147,19 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
        kfree(session->discovery_parent_type);
 
        iscsi_free_session(cls_session);
-
-       iscsi_host_dec_session_cnt(shost);
        module_put(owner);
 }
+EXPORT_SYMBOL_GPL(iscsi_session_free);
+
+/**
+ * iscsi_session_teardown - destroy session and cls_session
+ * @cls_session: iscsi session
+ */
+void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+{
+       iscsi_session_remove(cls_session);
+       iscsi_session_free(cls_session);
+}
 EXPORT_SYMBOL_GPL(iscsi_session_teardown);
 
 /**
index 0e3b6ba..0f13853 100644 (file)
@@ -212,7 +212,7 @@ static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
                break;
        case IMX8MP_HDMIBLK_PD_LCDIF:
                regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
-                               BIT(7) | BIT(16) | BIT(17) | BIT(18) |
+                               BIT(16) | BIT(17) | BIT(18) |
                                BIT(19) | BIT(20));
                regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11));
                regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0,
@@ -241,6 +241,7 @@ static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
                regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(1));
                break;
        case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY:
+               regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(7));
                regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
                regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
                regmap_clear_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
@@ -270,7 +271,7 @@ static void imx8mp_hdmi_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
                                  BIT(4) | BIT(5) | BIT(6));
                regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11));
                regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0,
-                                 BIT(7) | BIT(16) | BIT(17) | BIT(18) |
+                                 BIT(16) | BIT(17) | BIT(18) |
                                  BIT(19) | BIT(20));
                break;
        case IMX8MP_HDMIBLK_PD_PAI:
@@ -298,6 +299,7 @@ static void imx8mp_hdmi_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
        case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY:
                regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
                regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
+               regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(7));
                regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
                break;
        case IMX8MP_HDMIBLK_PD_HDCP:
@@ -590,7 +592,6 @@ static int imx8mp_blk_ctrl_probe(struct platform_device *pdev)
                        ret = PTR_ERR(domain->power_dev);
                        goto cleanup_pds;
                }
-               dev_set_name(domain->power_dev, "%s", data->name);
 
                domain->genpd.name = data->name;
                domain->genpd.power_on = imx8mp_blk_ctrl_power_on;
index 28144c6..32ed9dc 100644 (file)
@@ -66,8 +66,8 @@ static u32 __init imx8mq_soc_revision(void)
        ocotp_base = of_iomap(np, 0);
        WARN_ON(!ocotp_base);
        clk = of_clk_get_by_name(np, NULL);
-       if (!clk) {
-               WARN_ON(!clk);
+       if (IS_ERR(clk)) {
+               WARN_ON(IS_ERR(clk));
                return 0;
        }
 
index cd44f17..d51abb4 100644 (file)
@@ -461,9 +461,10 @@ static int apr_add_device(struct device *dev, struct device_node *np,
                goto out;
        }
 
+       /* Protection domain is optional, it does not exist on older platforms */
        ret = of_property_read_string_index(np, "qcom,protection-domain",
                                            1, &adev->service_path);
-       if (ret < 0) {
+       if (ret < 0 && ret != -EINVAL) {
                dev_err(dev, "Failed to read second value of qcom,protection-domain\n");
                goto out;
        }
index e9b854e..144ea68 100644 (file)
@@ -1708,12 +1708,16 @@ static int cpr_probe(struct platform_device *pdev)
 
        ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd);
        if (ret)
-               return ret;
+               goto err_remove_genpd;
 
        platform_set_drvdata(pdev, drv);
        cpr_debugfs_init(drv);
 
        return 0;
+
+err_remove_genpd:
+       pm_genpd_remove(&drv->pd);
+       return ret;
 }
 
 static int cpr_remove(struct platform_device *pdev)
index db1441c..690ab71 100644 (file)
@@ -86,7 +86,7 @@ struct vchiq_service_params_kernel {
 
 struct vchiq_instance;
 
-extern enum vchiq_status vchiq_initialise(struct vchiq_instance **pinstance);
+extern int vchiq_initialise(struct vchiq_instance **pinstance);
 extern enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance);
 extern enum vchiq_status vchiq_connect(struct vchiq_instance *instance);
 extern enum vchiq_status vchiq_open_service(struct vchiq_instance *instance,
index 2851ef6..cd20eb1 100644 (file)
@@ -100,10 +100,10 @@ vchiq_dump_platform_use_state(struct vchiq_state *state);
 extern void
 vchiq_dump_service_use_state(struct vchiq_state *state);
 
-extern enum vchiq_status
+extern int
 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
                   enum USE_TYPE_E use_type);
-extern enum vchiq_status
+extern int
 vchiq_release_internal(struct vchiq_state *state,
                       struct vchiq_service *service);
 
index bac1114..2b95b45 100644 (file)
@@ -73,8 +73,8 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
 {
        struct se_session *sess = se_cmd->se_sess;
 
-       assert_spin_locked(&sess->sess_cmd_lock);
-       WARN_ON_ONCE(!irqs_disabled());
+       lockdep_assert_held(&sess->sess_cmd_lock);
+
        /*
         * If command already reached CMD_T_COMPLETE state within
         * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
index f17ab23..77bd47d 100644 (file)
@@ -909,15 +909,20 @@ __thermal_cooling_device_register(struct device_node *np,
        cdev->devdata = devdata;
 
        ret = cdev->ops->get_max_state(cdev, &cdev->max_state);
-       if (ret)
-               goto out_kfree_type;
+       if (ret) {
+               kfree(cdev->type);
+               goto out_ida_remove;
+       }
 
        thermal_cooling_device_setup_sysfs(cdev);
+
        ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
        if (ret) {
+               kfree(cdev->type);
                thermal_cooling_device_destroy_sysfs(cdev);
-               goto out_kfree_type;
+               goto out_ida_remove;
        }
+
        ret = device_register(&cdev->device);
        if (ret)
                goto out_kfree_type;
@@ -943,6 +948,8 @@ out_kfree_type:
        thermal_cooling_device_destroy_sysfs(cdev);
        kfree(cdev->type);
        put_device(&cdev->device);
+
+       /* thermal_release() takes care of the rest */
        cdev = NULL;
 out_ida_remove:
        ida_free(&thermal_cdev_ida, id);
index 81252e3..56008eb 100644 (file)
@@ -427,13 +427,6 @@ int tb_retimer_scan(struct tb_port *port, bool add)
 {
        u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
        int ret, i, last_idx = 0;
-       struct usb4_port *usb4;
-
-       usb4 = port->usb4;
-       if (!usb4)
-               return 0;
-
-       pm_runtime_get_sync(&usb4->dev);
 
        /*
         * Send broadcast RT to make sure retimer indices facing this
@@ -441,7 +434,7 @@ int tb_retimer_scan(struct tb_port *port, bool add)
         */
        ret = usb4_port_enumerate_retimers(port);
        if (ret)
-               goto out;
+               return ret;
 
        /*
         * Enable sideband channel for each retimer. We can do this
@@ -471,12 +464,11 @@ int tb_retimer_scan(struct tb_port *port, bool add)
                        break;
        }
 
-       if (!last_idx) {
-               ret = 0;
-               goto out;
-       }
+       if (!last_idx)
+               return 0;
 
        /* Add on-board retimers if they do not exist already */
+       ret = 0;
        for (i = 1; i <= last_idx; i++) {
                struct tb_retimer *rt;
 
@@ -490,10 +482,6 @@ int tb_retimer_scan(struct tb_port *port, bool add)
                }
        }
 
-out:
-       pm_runtime_mark_last_busy(&usb4->dev);
-       pm_runtime_put_autosuspend(&usb4->dev);
-
        return ret;
 }
 
index 4628458..3f1ab30 100644 (file)
@@ -628,11 +628,15 @@ static void tb_scan_port(struct tb_port *port)
                         * Downstream switch is reachable through two ports.
                         * Only scan on the primary port (link_nr == 0).
                         */
+
+       if (port->usb4)
+               pm_runtime_get_sync(&port->usb4->dev);
+
        if (tb_wait_for_port(port, false) <= 0)
-               return;
+               goto out_rpm_put;
        if (port->remote) {
                tb_port_dbg(port, "port already has a remote\n");
-               return;
+               goto out_rpm_put;
        }
 
        tb_retimer_scan(port, true);
@@ -647,12 +651,12 @@ static void tb_scan_port(struct tb_port *port)
                 */
                if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
                        tb_scan_xdomain(port);
-               return;
+               goto out_rpm_put;
        }
 
        if (tb_switch_configure(sw)) {
                tb_switch_put(sw);
-               return;
+               goto out_rpm_put;
        }
 
        /*
@@ -681,7 +685,7 @@ static void tb_scan_port(struct tb_port *port)
 
        if (tb_switch_add(sw)) {
                tb_switch_put(sw);
-               return;
+               goto out_rpm_put;
        }
 
        /* Link the switches using both links if available */
@@ -733,6 +737,12 @@ static void tb_scan_port(struct tb_port *port)
 
        tb_add_dp_resources(sw);
        tb_scan_switch(sw);
+
+out_rpm_put:
+       if (port->usb4) {
+               pm_runtime_mark_last_busy(&port->usb4->dev);
+               pm_runtime_put_autosuspend(&port->usb4->dev);
+       }
 }
 
 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
index 2c3cf7f..1fc3c29 100644 (file)
@@ -1275,7 +1275,7 @@ static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
                return;
        } else if (!ret) {
                /* Use maximum link rate if the link valid is not set */
-               ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
+               ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
                if (ret < 0) {
                        tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
                        return;
index cfa8348..3c51e47 100644 (file)
@@ -1419,12 +1419,19 @@ static int tb_xdomain_get_properties(struct tb_xdomain *xd)
         * registered, we notify the userspace that it has changed.
         */
        if (!update) {
-               struct tb_port *port;
+               /*
+                * Now disable lane 1 if bonding was not enabled. Do
+                * this only if bonding was possible at the beginning
+                * (that is we are the connection manager and there are
+                * two lanes).
+                */
+               if (xd->bonding_possible) {
+                       struct tb_port *port;
 
-               /* Now disable lane 1 if bonding was not enabled */
-               port = tb_port_at(xd->route, tb_xdomain_parent(xd));
-               if (!port->bonded)
-                       tb_port_disable(port->dual_link_port);
+                       port = tb_port_at(xd->route, tb_xdomain_parent(xd));
+                       if (!port->bonded)
+                               tb_port_disable(port->dual_link_port);
+               }
 
                if (device_add(&xd->dev)) {
                        dev_err(&xd->dev, "failed to add XDomain device\n");
index 314a05e..64770c6 100644 (file)
 #define PCI_DEVICE_ID_EXAR_XR17V4358           0x4358
 #define PCI_DEVICE_ID_EXAR_XR17V8358           0x8358
 
+#define PCI_DEVICE_ID_SEALEVEL_710xC           0x1001
+#define PCI_DEVICE_ID_SEALEVEL_720xC           0x1002
+#define PCI_DEVICE_ID_SEALEVEL_740xC           0x1004
+#define PCI_DEVICE_ID_SEALEVEL_780xC           0x1008
+#define PCI_DEVICE_ID_SEALEVEL_716xC           0x1010
+
 #define UART_EXAR_INT0         0x80
 #define UART_EXAR_8XMODE       0x88    /* 8X sampling rate select */
 #define UART_EXAR_SLEEP                0x8b    /* Sleep mode */
@@ -638,6 +644,8 @@ exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
                nr_ports = BIT(((pcidev->device & 0x38) >> 3) - 1);
        else if (board->num_ports)
                nr_ports = board->num_ports;
+       else if (pcidev->vendor == PCI_VENDOR_ID_SEALEVEL)
+               nr_ports = pcidev->device & 0xff;
        else
                nr_ports = pcidev->device & 0x0f;
 
@@ -864,6 +872,12 @@ static const struct pci_device_id exar_pci_tbl[] = {
        EXAR_DEVICE(COMMTECH, 4224PCI335, pbn_fastcom335_4),
        EXAR_DEVICE(COMMTECH, 2324PCI335, pbn_fastcom335_4),
        EXAR_DEVICE(COMMTECH, 2328PCI335, pbn_fastcom335_8),
+
+       EXAR_DEVICE(SEALEVEL, 710xC, pbn_exar_XR17V35x),
+       EXAR_DEVICE(SEALEVEL, 720xC, pbn_exar_XR17V35x),
+       EXAR_DEVICE(SEALEVEL, 740xC, pbn_exar_XR17V35x),
+       EXAR_DEVICE(SEALEVEL, 780xC, pbn_exar_XR17V35x),
+       EXAR_DEVICE(SEALEVEL, 716xC, pbn_exar_XR17V35x),
        { 0, }
 };
 MODULE_DEVICE_TABLE(pci, exar_pci_tbl);
index d75c39f..d8c2f34 100644 (file)
@@ -1466,6 +1466,10 @@ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
        struct circ_buf *xmit = &uap->port.state->xmit;
        int count = uap->fifosize >> 1;
 
+       if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
+           !uap->rs485_tx_started)
+               pl011_rs485_tx_start(uap);
+
        if (uap->port.x_char) {
                if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
                        return true;
@@ -1477,10 +1481,6 @@ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
                return false;
        }
 
-       if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
-           !uap->rs485_tx_started)
-               pl011_rs485_tx_start(uap);
-
        /* If we are using DMA mode, try to send some characters. */
        if (pl011_dma_tx_irq(uap))
                return true;
index f1c06e1..9cd7479 100644 (file)
@@ -2657,13 +2657,7 @@ static void __init atmel_console_get_options(struct uart_port *port, int *baud,
        else if (mr == ATMEL_US_PAR_ODD)
                *parity = 'o';
 
-       /*
-        * The serial core only rounds down when matching this to a
-        * supported baud rate. Make sure we don't end up slightly
-        * lower than one of those, as it would make us fall through
-        * to a much lower baud rate than we really want.
-        */
-       *baud = port->uartclk / (16 * (quot - 1));
+       *baud = port->uartclk / (16 * quot);
 }
 
 static int __init atmel_console_setup(struct console *co, char *options)
index a3ed9b3..7ce7bb1 100644 (file)
@@ -171,6 +171,7 @@ static int configure_kgdboc(void)
        int err = -ENODEV;
        char *cptr = config;
        struct console *cons;
+       int cookie;
 
        if (!strlen(config) || isspace(config[0])) {
                err = 0;
@@ -189,20 +190,9 @@ static int configure_kgdboc(void)
        if (kgdboc_register_kbd(&cptr))
                goto do_register;
 
-       /*
-        * tty_find_polling_driver() can call uart_set_options()
-        * (via poll_init) to configure the uart. Take the console_list_lock
-        * in order to synchronize against register_console(), which can also
-        * configure the uart via uart_set_options(). This also allows safe
-        * traversal of the console list.
-        */
-       console_list_lock();
-
        p = tty_find_polling_driver(cptr, &tty_line);
-       if (!p) {
-               console_list_unlock();
+       if (!p)
                goto noconfig;
-       }
 
        /*
         * Take console_lock to serialize device() callback with
@@ -211,7 +201,8 @@ static int configure_kgdboc(void)
         */
        console_lock();
 
-       for_each_console(cons) {
+       cookie = console_srcu_read_lock();
+       for_each_console_srcu(cons) {
                int idx;
                if (cons->device && cons->device(cons, &idx) == p &&
                    idx == tty_line) {
@@ -219,11 +210,10 @@ static int configure_kgdboc(void)
                        break;
                }
        }
+       console_srcu_read_unlock(cookie);
 
        console_unlock();
 
-       console_list_unlock();
-
        kgdb_tty_driver = p;
        kgdb_tty_line = tty_line;
 
index 3d54a43..9576ba8 100644 (file)
@@ -749,7 +749,7 @@ static void pch_dma_tx_complete(void *arg)
                uart_xmit_advance(port, sg_dma_len(sg));
 
        async_tx_ack(priv->desc_tx);
-       dma_unmap_sg(port->dev, sg, priv->orig_nent, DMA_TO_DEVICE);
+       dma_unmap_sg(port->dev, priv->sg_tx_p, priv->orig_nent, DMA_TO_DEVICE);
        priv->tx_dma_use = 0;
        priv->nent = 0;
        priv->orig_nent = 0;
index b487823..57f04f8 100644 (file)
@@ -864,9 +864,10 @@ out_unlock:
        return IRQ_HANDLED;
 }
 
-static void get_tx_fifo_size(struct qcom_geni_serial_port *port)
+static int setup_fifos(struct qcom_geni_serial_port *port)
 {
        struct uart_port *uport;
+       u32 old_rx_fifo_depth = port->rx_fifo_depth;
 
        uport = &port->uport;
        port->tx_fifo_depth = geni_se_get_tx_fifo_depth(&port->se);
@@ -874,6 +875,16 @@ static void get_tx_fifo_size(struct qcom_geni_serial_port *port)
        port->rx_fifo_depth = geni_se_get_rx_fifo_depth(&port->se);
        uport->fifosize =
                (port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE;
+
+       if (port->rx_fifo && (old_rx_fifo_depth != port->rx_fifo_depth) && port->rx_fifo_depth) {
+               port->rx_fifo = devm_krealloc(uport->dev, port->rx_fifo,
+                                             port->rx_fifo_depth * sizeof(u32),
+                                             GFP_KERNEL);
+               if (!port->rx_fifo)
+                       return -ENOMEM;
+       }
+
+       return 0;
 }
 
 
@@ -888,6 +899,7 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
        u32 rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT;
        u32 proto;
        u32 pin_swap;
+       int ret;
 
        proto = geni_se_read_proto(&port->se);
        if (proto != GENI_SE_UART) {
@@ -897,7 +909,9 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
 
        qcom_geni_serial_stop_rx(uport);
 
-       get_tx_fifo_size(port);
+       ret = setup_fifos(port);
+       if (ret)
+               return ret;
 
        writel(rxstale, uport->membase + SE_UART_RX_STALE_CNT);
 
@@ -1516,7 +1530,7 @@ static int qcom_geni_serial_remove(struct platform_device *pdev)
        return 0;
 }
 
-static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev)
+static int qcom_geni_serial_sys_suspend(struct device *dev)
 {
        struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
        struct uart_port *uport = &port->uport;
@@ -1533,7 +1547,7 @@ static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev)
        return uart_suspend_port(private_data->drv, uport);
 }
 
-static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
+static int qcom_geni_serial_sys_resume(struct device *dev)
 {
        int ret;
        struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
@@ -1581,10 +1595,12 @@ static int qcom_geni_serial_sys_hib_resume(struct device *dev)
 }
 
 static const struct dev_pm_ops qcom_geni_serial_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(qcom_geni_serial_sys_suspend,
-                                       qcom_geni_serial_sys_resume)
-       .restore = qcom_geni_serial_sys_hib_resume,
-       .thaw = qcom_geni_serial_sys_hib_resume,
+       .suspend = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
+       .resume = pm_sleep_ptr(qcom_geni_serial_sys_resume),
+       .freeze = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
+       .poweroff = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
+       .restore = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume),
+       .thaw = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume),
 };
 
 static const struct of_device_id qcom_geni_serial_match_table[] = {
index b9fbbee..ec874f3 100644 (file)
@@ -2212,6 +2212,9 @@ EXPORT_SYMBOL_GPL(uart_parse_options);
  * @parity: parity character - 'n' (none), 'o' (odd), 'e' (even)
  * @bits: number of data bits
  * @flow: flow control character - 'r' (rts)
+ *
+ * Locking: Caller must hold console_list_lock in order to serialize
+ * early initialization of the serial-console lock.
  */
 int
 uart_set_options(struct uart_port *port, struct console *co,
@@ -2619,7 +2622,9 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options)
 
        if (!ret && options) {
                uart_parse_options(options, &baud, &parity, &bits, &flow);
+               console_list_lock();
                ret = uart_set_options(port, NULL, baud, parity, bits, flow);
+               console_list_unlock();
        }
 out:
        mutex_unlock(&tport->mutex);
index bda61be..3a1c4d3 100644 (file)
@@ -1234,12 +1234,14 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
         * clock scaling is in progress
         */
        ufshcd_scsi_block_requests(hba);
+       mutex_lock(&hba->wb_mutex);
        down_write(&hba->clk_scaling_lock);
 
        if (!hba->clk_scaling.is_allowed ||
            ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
                ret = -EBUSY;
                up_write(&hba->clk_scaling_lock);
+               mutex_unlock(&hba->wb_mutex);
                ufshcd_scsi_unblock_requests(hba);
                goto out;
        }
@@ -1251,12 +1253,16 @@ out:
        return ret;
 }
 
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
 {
-       if (writelock)
-               up_write(&hba->clk_scaling_lock);
-       else
-               up_read(&hba->clk_scaling_lock);
+       up_write(&hba->clk_scaling_lock);
+
+       /* Enable Write Booster if we have scaled up else disable it */
+       if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
+               ufshcd_wb_toggle(hba, scale_up);
+
+       mutex_unlock(&hba->wb_mutex);
+
        ufshcd_scsi_unblock_requests(hba);
        ufshcd_release(hba);
 }
@@ -1273,7 +1279,6 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
 {
        int ret = 0;
-       bool is_writelock = true;
 
        ret = ufshcd_clock_scaling_prepare(hba);
        if (ret)
@@ -1302,15 +1307,8 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
                }
        }
 
-       /* Enable Write Booster if we have scaled up else disable it */
-       if (ufshcd_enable_wb_if_scaling_up(hba)) {
-               downgrade_write(&hba->clk_scaling_lock);
-               is_writelock = false;
-               ufshcd_wb_toggle(hba, scale_up);
-       }
-
 out_unprepare:
-       ufshcd_clock_scaling_unprepare(hba, is_writelock);
+       ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
        return ret;
 }
 
@@ -6066,9 +6064,11 @@ static void ufshcd_force_error_recovery(struct ufs_hba *hba)
 
 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
 {
+       mutex_lock(&hba->wb_mutex);
        down_write(&hba->clk_scaling_lock);
        hba->clk_scaling.is_allowed = allow;
        up_write(&hba->clk_scaling_lock);
+       mutex_unlock(&hba->wb_mutex);
 }
 
 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
@@ -9793,6 +9793,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        /* Initialize mutex for exception event control */
        mutex_init(&hba->ee_ctrl_mutex);
 
+       mutex_init(&hba->wb_mutex);
        init_rwsem(&hba->clk_scaling_lock);
 
        ufshcd_init_clk_gating(hba);
index 5adcb34..ccfaebc 100644 (file)
@@ -2614,6 +2614,7 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
        u8 req_on_hw_ring = 0;
        unsigned long flags;
        int ret = 0;
+       int val;
 
        if (!ep || !request || !ep->desc)
                return -EINVAL;
@@ -2649,6 +2650,13 @@ found:
 
        /* Update ring only if removed request is on pending_req_list list */
        if (req_on_hw_ring && link_trb) {
+               /* Stop DMA */
+               writel(EP_CMD_DFLUSH, &priv_dev->regs->ep_cmd);
+
+               /* wait for DFLUSH cleared */
+               readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
+                                         !(val & EP_CMD_DFLUSH), 1, 1000);
+
                link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma +
                        ((priv_req->end_trb + 1) * TRB_SIZE)));
                link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) |
@@ -2660,6 +2668,10 @@ found:
 
        cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
 
+       req = cdns3_next_request(&priv_ep->pending_req_list);
+       if (req)
+               cdns3_rearm_transfer(priv_ep, 1);
+
 not_found:
        spin_unlock_irqrestore(&priv_dev->lock, flags);
        return ret;
index 484b1cd..27c6012 100644 (file)
@@ -1294,12 +1294,12 @@ static void ci_extcon_wakeup_int(struct ci_hdrc *ci)
        cable_id = &ci->platdata->id_extcon;
        cable_vbus = &ci->platdata->vbus_extcon;
 
-       if ((!IS_ERR(cable_id->edev) || !IS_ERR(ci->role_switch))
+       if ((!IS_ERR(cable_id->edev) || ci->role_switch)
                && ci->is_otg &&
                (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS))
                ci_irq(ci);
 
-       if ((!IS_ERR(cable_vbus->edev) || !IS_ERR(ci->role_switch))
+       if ((!IS_ERR(cable_vbus->edev) || ci->role_switch)
                && ci->is_otg &&
                (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS))
                ci_irq(ci);
index 77e73fc..9eca403 100644 (file)
@@ -44,6 +44,9 @@
 #define USB_PRODUCT_USB5534B                   0x5534
 #define USB_VENDOR_CYPRESS                     0x04b4
 #define USB_PRODUCT_CY7C65632                  0x6570
+#define USB_VENDOR_TEXAS_INSTRUMENTS           0x0451
+#define USB_PRODUCT_TUSB8041_USB3              0x8140
+#define USB_PRODUCT_TUSB8041_USB2              0x8142
 #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND       0x01
 #define HUB_QUIRK_DISABLE_AUTOSUSPEND          0x02
 
@@ -5854,6 +5857,16 @@ static const struct usb_device_id hub_id_table[] = {
       .idVendor = USB_VENDOR_GENESYS_LOGIC,
       .bInterfaceClass = USB_CLASS_HUB,
       .driver_info = HUB_QUIRK_CHECK_PORT_AUTOSUSPEND},
+    { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+                       | USB_DEVICE_ID_MATCH_PRODUCT,
+      .idVendor = USB_VENDOR_TEXAS_INSTRUMENTS,
+      .idProduct = USB_PRODUCT_TUSB8041_USB2,
+      .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
+    { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+                       | USB_DEVICE_ID_MATCH_PRODUCT,
+      .idVendor = USB_VENDOR_TEXAS_INSTRUMENTS,
+      .idProduct = USB_PRODUCT_TUSB8041_USB3,
+      .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
     { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS,
       .bDeviceClass = USB_CLASS_HUB},
     { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
index 6d93428..533baa8 100644 (file)
@@ -37,6 +37,71 @@ bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
 }
 EXPORT_SYMBOL_GPL(usb_acpi_power_manageable);
 
+#define UUID_USB_CONTROLLER_DSM "ce2ee385-00e6-48cb-9f05-2edb927c4899"
+#define USB_DSM_DISABLE_U1_U2_FOR_PORT 5
+
+/**
+ * usb_acpi_port_lpm_incapable - check if lpm should be disabled for a port.
+ * @hdev: USB device belonging to the usb hub
+ * @index: zero based port index
+ *
+ * Some USB3 ports may not support USB3 link power management U1/U2 states
+ * due to different retimer setup. ACPI provides _DSM method which returns 0x01
+ * if U1 and U2 states should be disabled. Evaluate _DSM with:
+ * Arg0: UUID = ce2ee385-00e6-48cb-9f05-2edb927c4899
+ * Arg1: Revision ID = 0
+ * Arg2: Function Index = 5
+ * Arg3: (empty)
+ *
+ * Return 1 if USB3 port is LPM incapable, negative on error, otherwise 0
+ */
+
+int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index)
+{
+       union acpi_object *obj;
+       acpi_handle port_handle;
+       int port1 = index + 1;
+       guid_t guid;
+       int ret;
+
+       ret = guid_parse(UUID_USB_CONTROLLER_DSM, &guid);
+       if (ret)
+               return ret;
+
+       port_handle = usb_get_hub_port_acpi_handle(hdev, port1);
+       if (!port_handle) {
+               dev_dbg(&hdev->dev, "port-%d no acpi handle\n", port1);
+               return -ENODEV;
+       }
+
+       if (!acpi_check_dsm(port_handle, &guid, 0,
+                           BIT(USB_DSM_DISABLE_U1_U2_FOR_PORT))) {
+               dev_dbg(&hdev->dev, "port-%d no _DSM function %d\n",
+                       port1, USB_DSM_DISABLE_U1_U2_FOR_PORT);
+               return -ENODEV;
+       }
+
+       obj = acpi_evaluate_dsm(port_handle, &guid, 0,
+                               USB_DSM_DISABLE_U1_U2_FOR_PORT, NULL);
+
+       if (!obj)
+               return -ENODEV;
+
+       if (obj->type != ACPI_TYPE_INTEGER) {
+               dev_dbg(&hdev->dev, "evaluate port-%d _DSM failed\n", port1);
+               ACPI_FREE(obj);
+               return -EINVAL;
+       }
+
+       if (obj->integer.value == 0x01)
+               ret = 1;
+
+       ACPI_FREE(obj);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_acpi_port_lpm_incapable);
+
 /**
  * usb_acpi_set_power_state - control usb port's power via acpi power
  * resource
index b2f72b0..be954a9 100644 (file)
@@ -3,6 +3,7 @@
 config USB_DWC3
        tristate "DesignWare USB3 DRD Core Support"
        depends on (USB || USB_GADGET) && HAS_DMA
+       depends on (EXTCON || EXTCON=n)
        select USB_XHCI_PLATFORM if USB_XHCI_HCD
        select USB_ROLE_SWITCH if USB_DWC3_DUAL_ROLE
        help
@@ -44,7 +45,6 @@ config USB_DWC3_GADGET
 config USB_DWC3_DUAL_ROLE
        bool "Dual Role mode"
        depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3))
-       depends on (EXTCON=y || EXTCON=USB_DWC3)
        help
          This is the default mode of working of DWC3 controller where
          both host and gadget features are enabled.
index 96121d1..0853536 100644 (file)
@@ -393,6 +393,7 @@ static void gadget_info_attr_release(struct config_item *item)
        WARN_ON(!list_empty(&gi->string_list));
        WARN_ON(!list_empty(&gi->available_func));
        kfree(gi->composite.gadget_driver.function);
+       kfree(gi->composite.gadget_driver.driver.name);
        kfree(gi);
 }
 
@@ -1572,7 +1573,6 @@ static const struct usb_gadget_driver configfs_driver_template = {
        .max_speed      = USB_SPEED_SUPER_PLUS,
        .driver = {
                .owner          = THIS_MODULE,
-               .name           = "configfs-gadget",
        },
        .match_existing_only = 1,
 };
@@ -1623,13 +1623,21 @@ static struct config_group *gadgets_make(
 
        gi->composite.gadget_driver = configfs_driver_template;
 
+       gi->composite.gadget_driver.driver.name = kasprintf(GFP_KERNEL,
+                                                           "configfs-gadget.%s", name);
+       if (!gi->composite.gadget_driver.driver.name)
+               goto err;
+
        gi->composite.gadget_driver.function = kstrdup(name, GFP_KERNEL);
        gi->composite.name = gi->composite.gadget_driver.function;
 
        if (!gi->composite.gadget_driver.function)
-               goto err;
+               goto out_free_driver_name;
 
        return &gi->group;
+
+out_free_driver_name:
+       kfree(gi->composite.gadget_driver.driver.name);
 err:
        kfree(gi);
        return ERR_PTR(-ENOMEM);
index 73dc10a..523a961 100644 (file)
@@ -279,6 +279,9 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
        struct usb_request *req = ffs->ep0req;
        int ret;
 
+       if (!req)
+               return -EINVAL;
+
        req->zero     = len < le16_to_cpu(ffs->ev.setup.wLength);
 
        spin_unlock_irq(&ffs->ev.waitq.lock);
@@ -1892,10 +1895,14 @@ static void functionfs_unbind(struct ffs_data *ffs)
        ENTER();
 
        if (!WARN_ON(!ffs->gadget)) {
+               /* dequeue before freeing ep0req */
+               usb_ep_dequeue(ffs->gadget->ep0, ffs->ep0req);
+               mutex_lock(&ffs->mutex);
                usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
                ffs->ep0req = NULL;
                ffs->gadget = NULL;
                clear_bit(FFS_FL_BOUND, &ffs->flags);
+               mutex_unlock(&ffs->mutex);
                ffs_data_put(ffs);
        }
 }
index c36bcfa..424bb3b 100644 (file)
@@ -83,7 +83,9 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
 /* peak (theoretical) bulk transfer rate in bits-per-second */
 static inline unsigned ncm_bitrate(struct usb_gadget *g)
 {
-       if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
+       if (!g)
+               return 0;
+       else if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
                return 4250000000U;
        else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
                return 3750000000U;
index 01c3ead..d605bc2 100644 (file)
@@ -229,6 +229,7 @@ static void put_ep (struct ep_data *data)
  */
 
 static const char *CHIP;
+static DEFINE_MUTEX(sb_mutex);         /* Serialize superblock operations */
 
 /*----------------------------------------------------------------------*/
 
@@ -2010,13 +2011,20 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
 {
        struct inode    *inode;
        struct dev_data *dev;
+       int             rc;
 
-       if (the_device)
-               return -ESRCH;
+       mutex_lock(&sb_mutex);
+
+       if (the_device) {
+               rc = -ESRCH;
+               goto Done;
+       }
 
        CHIP = usb_get_gadget_udc_name();
-       if (!CHIP)
-               return -ENODEV;
+       if (!CHIP) {
+               rc = -ENODEV;
+               goto Done;
+       }
 
        /* superblock */
        sb->s_blocksize = PAGE_SIZE;
@@ -2053,13 +2061,17 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
         * from binding to a controller.
         */
        the_device = dev;
-       return 0;
+       rc = 0;
+       goto Done;
 
-Enomem:
+ Enomem:
        kfree(CHIP);
        CHIP = NULL;
+       rc = -ENOMEM;
 
-       return -ENOMEM;
+ Done:
+       mutex_unlock(&sb_mutex);
+       return rc;
 }
 
 /* "mount -t gadgetfs path /dev/gadget" ends up here */
@@ -2081,6 +2093,7 @@ static int gadgetfs_init_fs_context(struct fs_context *fc)
 static void
 gadgetfs_kill_sb (struct super_block *sb)
 {
+       mutex_lock(&sb_mutex);
        kill_litter_super (sb);
        if (the_device) {
                put_dev (the_device);
@@ -2088,6 +2101,7 @@ gadgetfs_kill_sb (struct super_block *sb)
        }
        kfree(CHIP);
        CHIP = NULL;
+       mutex_unlock(&sb_mutex);
 }
 
 /*----------------------------------------------------------------------*/
index 53e38f8..c06dd1a 100644 (file)
@@ -293,6 +293,7 @@ static const struct uvc_descriptor_header * const uvc_fs_streaming_cls[] = {
        (const struct uvc_descriptor_header *) &uvc_format_yuv,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+       (const struct uvc_descriptor_header *) &uvc_color_matching,
        (const struct uvc_descriptor_header *) &uvc_format_mjpg,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
@@ -305,6 +306,7 @@ static const struct uvc_descriptor_header * const uvc_hs_streaming_cls[] = {
        (const struct uvc_descriptor_header *) &uvc_format_yuv,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+       (const struct uvc_descriptor_header *) &uvc_color_matching,
        (const struct uvc_descriptor_header *) &uvc_format_mjpg,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
@@ -317,6 +319,7 @@ static const struct uvc_descriptor_header * const uvc_ss_streaming_cls[] = {
        (const struct uvc_descriptor_header *) &uvc_format_yuv,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+       (const struct uvc_descriptor_header *) &uvc_color_matching,
        (const struct uvc_descriptor_header *) &uvc_format_mjpg,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
index 9cea785..38d06e5 100644 (file)
@@ -29,7 +29,7 @@
 #include "ehci-fsl.h"
 
 #define DRIVER_DESC "Freescale EHCI Host controller driver"
-#define DRV_NAME "ehci-fsl"
+#define DRV_NAME "fsl-ehci"
 
 static struct hc_driver __read_mostly fsl_ehci_hc_driver;
 
index 79d679b..fb988e4 100644 (file)
@@ -78,9 +78,12 @@ static const char hcd_name[] = "xhci_hcd";
 static struct hc_driver __read_mostly xhci_pci_hc_driver;
 
 static int xhci_pci_setup(struct usb_hcd *hcd);
+static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+                                     struct usb_tt *tt, gfp_t mem_flags);
 
 static const struct xhci_driver_overrides xhci_pci_overrides __initconst = {
        .reset = xhci_pci_setup,
+       .update_hub_device = xhci_pci_update_hub_device,
 };
 
 /* called after powerup, by probe or system-pm "wakeup" */
@@ -352,8 +355,38 @@ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
                                NULL);
        ACPI_FREE(obj);
 }
+
+static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       struct xhci_hub *rhub = &xhci->usb3_rhub;
+       int ret;
+       int i;
+
+       /* This is not the usb3 roothub we are looking for */
+       if (hcd != rhub->hcd)
+               return;
+
+       if (hdev->maxchild > rhub->num_ports) {
+               dev_err(&hdev->dev, "USB3 roothub port number mismatch\n");
+               return;
+       }
+
+       for (i = 0; i < hdev->maxchild; i++) {
+               ret = usb_acpi_port_lpm_incapable(hdev, i);
+
+               dev_dbg(&hdev->dev, "port-%d disable U1/U2 _DSM: %d\n", i + 1, ret);
+
+               if (ret >= 0) {
+                       rhub->ports[i]->lpm_incapable = ret;
+                       continue;
+               }
+       }
+}
+
 #else
 static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
+static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev) { }
 #endif /* CONFIG_ACPI */
 
 /* called during probe() after chip reset completes */
@@ -386,6 +419,16 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
        return xhci_pci_reinit(xhci, pdev);
 }
 
+static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+                                     struct usb_tt *tt, gfp_t mem_flags)
+{
+       /* Check if acpi claims some USB3 roothub ports are lpm incapable */
+       if (!hdev->parent)
+               xhci_find_lpm_incapable_ports(hcd, hdev);
+
+       return xhci_update_hub_device(hcd, hdev, tt, mem_flags);
+}
+
 /*
  * We need to register our own PCI probe function (instead of the USB core's
  * function) in order to create a second roothub under xHCI.
@@ -455,6 +498,8 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
        if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
                pm_runtime_allow(&dev->dev);
 
+       dma_set_max_seg_size(&dev->dev, UINT_MAX);
+
        return 0;
 
 put_usb3_hcd:
index ddc3003..f5b0e1c 100644 (file)
@@ -1169,7 +1169,10 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
        struct xhci_virt_ep *ep;
        struct xhci_ring *ring;
 
-       ep = &xhci->devs[slot_id]->eps[ep_index];
+       ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+       if (!ep)
+               return;
+
        if ((ep->ep_state & EP_HAS_STREAMS) ||
                        (ep->ep_state & EP_GETTING_NO_STREAMS)) {
                int stream_id;
index 79d7931..2b280be 100644 (file)
@@ -3974,6 +3974,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        struct xhci_virt_device *virt_dev;
        struct xhci_slot_ctx *slot_ctx;
+       unsigned long flags;
        int i, ret;
 
        /*
@@ -4000,7 +4001,11 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
                virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
        virt_dev->udev = NULL;
        xhci_disable_slot(xhci, udev->slot_id);
+
+       spin_lock_irqsave(&xhci->lock, flags);
        xhci_free_virt_device(xhci, udev->slot_id);
+       spin_unlock_irqrestore(&xhci->lock, flags);
+
 }
 
 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
@@ -5044,6 +5049,7 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
                        struct usb_device *udev, enum usb3_link_state state)
 {
        struct xhci_hcd *xhci;
+       struct xhci_port *port;
        u16 hub_encoded_timeout;
        int mel;
        int ret;
@@ -5060,6 +5066,13 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
        if (xhci_check_tier_policy(xhci, udev, state) < 0)
                return USB3_LPM_DISABLED;
 
+       /* If connected to root port then check port can handle lpm */
+       if (udev->parent && !udev->parent->parent) {
+               port = xhci->usb3_rhub.ports[udev->portnum - 1];
+               if (port->lpm_incapable)
+                       return USB3_LPM_DISABLED;
+       }
+
        hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
        mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
        if (mel < 0) {
@@ -5119,7 +5132,7 @@ static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
  * internal data structures for the device.
  */
-static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
                        struct usb_tt *tt, gfp_t mem_flags)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -5219,6 +5232,7 @@ static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
        xhci_free_command(xhci, config_cmd);
        return ret;
 }
+EXPORT_SYMBOL_GPL(xhci_update_hub_device);
 
 static int xhci_get_frame(struct usb_hcd *hcd)
 {
@@ -5502,6 +5516,8 @@ void xhci_init_driver(struct hc_driver *drv,
                        drv->check_bandwidth = over->check_bandwidth;
                if (over->reset_bandwidth)
                        drv->reset_bandwidth = over->reset_bandwidth;
+               if (over->update_hub_device)
+                       drv->update_hub_device = over->update_hub_device;
        }
 }
 EXPORT_SYMBOL_GPL(xhci_init_driver);
index c9f06c5..dcee7f3 100644 (file)
@@ -1735,6 +1735,7 @@ struct xhci_port {
        int                     hcd_portnum;
        struct xhci_hub         *rhub;
        struct xhci_port_cap    *port_cap;
+       unsigned int            lpm_incapable:1;
 };
 
 struct xhci_hub {
@@ -1943,6 +1944,8 @@ struct xhci_driver_overrides {
                             struct usb_host_endpoint *ep);
        int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
        void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
+       int (*update_hub_device)(struct usb_hcd *hcd, struct usb_device *hdev,
+                           struct usb_tt *tt, gfp_t mem_flags);
 };
 
 #define        XHCI_CFC_DELAY          10
@@ -2122,6 +2125,8 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
                       struct usb_host_endpoint *ep);
 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+                          struct usb_tt *tt, gfp_t mem_flags);
 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
 int xhci_ext_cap_init(struct xhci_hcd *xhci);
 
index f9427a6..1e3df27 100644 (file)
@@ -814,7 +814,7 @@ static int iowarrior_probe(struct usb_interface *interface,
                        break;
 
                case USB_DEVICE_ID_CODEMERCS_IOW100:
-                       dev->report_size = 13;
+                       dev->report_size = 12;
                        break;
                }
        }
index 94e7966..969c4c4 100644 (file)
 
 #include "onboard_usb_hub.h"
 
+static void onboard_hub_attach_usb_driver(struct work_struct *work);
+
 static struct usb_device_driver onboard_hub_usbdev_driver;
+static DECLARE_WORK(attach_usb_driver_work, onboard_hub_attach_usb_driver);
 
 /************************** Platform driver **************************/
 
@@ -45,7 +48,6 @@ struct onboard_hub {
        bool is_powered_on;
        bool going_away;
        struct list_head udev_list;
-       struct work_struct attach_usb_driver_work;
        struct mutex lock;
 };
 
@@ -271,8 +273,7 @@ static int onboard_hub_probe(struct platform_device *pdev)
         * This needs to be done deferred to avoid self-deadlocks on systems
         * with nested onboard hubs.
         */
-       INIT_WORK(&hub->attach_usb_driver_work, onboard_hub_attach_usb_driver);
-       schedule_work(&hub->attach_usb_driver_work);
+       schedule_work(&attach_usb_driver_work);
 
        return 0;
 }
@@ -285,9 +286,6 @@ static int onboard_hub_remove(struct platform_device *pdev)
 
        hub->going_away = true;
 
-       if (&hub->attach_usb_driver_work != current_work())
-               cancel_work_sync(&hub->attach_usb_driver_work);
-
        mutex_lock(&hub->lock);
 
        /* unbind the USB devices to avoid dangling references to this device */
@@ -433,13 +431,13 @@ static int __init onboard_hub_init(void)
 {
        int ret;
 
-       ret = platform_driver_register(&onboard_hub_driver);
+       ret = usb_register_device_driver(&onboard_hub_usbdev_driver, THIS_MODULE);
        if (ret)
                return ret;
 
-       ret = usb_register_device_driver(&onboard_hub_usbdev_driver, THIS_MODULE);
+       ret = platform_driver_register(&onboard_hub_driver);
        if (ret)
-               platform_driver_unregister(&onboard_hub_driver);
+               usb_deregister_device_driver(&onboard_hub_usbdev_driver);
 
        return ret;
 }
@@ -449,6 +447,8 @@ static void __exit onboard_hub_exit(void)
 {
        usb_deregister_device_driver(&onboard_hub_usbdev_driver);
        platform_driver_unregister(&onboard_hub_driver);
+
+       cancel_work_sync(&attach_usb_driver_work);
 }
 module_exit(onboard_hub_exit);
 
index 476f55d..44a21ec 100644 (file)
@@ -411,8 +411,10 @@ static int omap2430_probe(struct platform_device *pdev)
                memset(musb_res, 0, sizeof(*musb_res) * ARRAY_SIZE(musb_res));
 
                res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-               if (!res)
+               if (!res) {
+                       ret = -EINVAL;
                        goto err2;
+               }
 
                musb_res[i].start = res->start;
                musb_res[i].end = res->end;
index 67372ac..832ad59 100644 (file)
@@ -60,6 +60,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
        { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
        { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
+       { USB_DEVICE(0x0908, 0x0070) }, /* Siemens SCALANCE LPE-9000 USB Serial Console */
        { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
        { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
        { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
index dee79c7..ee5ac4e 100644 (file)
@@ -255,10 +255,16 @@ static void option_instat_callback(struct urb *urb);
 #define QUECTEL_PRODUCT_EP06                   0x0306
 #define QUECTEL_PRODUCT_EM05G                  0x030a
 #define QUECTEL_PRODUCT_EM060K                 0x030b
+#define QUECTEL_PRODUCT_EM05G_CS               0x030c
+#define QUECTEL_PRODUCT_EM05CN_SG              0x0310
 #define QUECTEL_PRODUCT_EM05G_SG               0x0311
+#define QUECTEL_PRODUCT_EM05CN                 0x0312
+#define QUECTEL_PRODUCT_EM05G_GR               0x0313
+#define QUECTEL_PRODUCT_EM05G_RS               0x0314
 #define QUECTEL_PRODUCT_EM12                   0x0512
 #define QUECTEL_PRODUCT_RM500Q                 0x0800
 #define QUECTEL_PRODUCT_RM520N                 0x0801
+#define QUECTEL_PRODUCT_EC200U                 0x0901
 #define QUECTEL_PRODUCT_EC200S_CN              0x6002
 #define QUECTEL_PRODUCT_EC200T                 0x6026
 #define QUECTEL_PRODUCT_RM500K                 0x7001
@@ -1159,8 +1165,18 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+       { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN, 0xff),
+         .driver_info = RSVD(6) | ZLP },
+       { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN_SG, 0xff),
+         .driver_info = RSVD(6) | ZLP },
        { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
          .driver_info = RSVD(6) | ZLP },
+       { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_CS, 0xff),
+         .driver_info = RSVD(6) | ZLP },
+       { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_GR, 0xff),
+         .driver_info = RSVD(6) | ZLP },
+       { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_RS, 0xff),
+         .driver_info = RSVD(6) | ZLP },
        { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_SG, 0xff),
          .driver_info = RSVD(6) | ZLP },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
@@ -1180,6 +1196,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
index 3f720fa..d73282c 100644 (file)
@@ -116,6 +116,19 @@ static int uas_use_uas_driver(struct usb_interface *intf,
        if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
                flags |= US_FL_NO_ATA_1X;
 
+       /*
+        * RTL9210-based enclosure from HIKSEMI, MD202 reportedly have issues
+        * with UAS.  This isn't distinguishable with just idVendor and
+        * idProduct, use manufacturer and product too.
+        *
+        * Reported-by: Hongling Zeng <zenghongling@kylinos.cn>
+        */
+       if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bda &&
+                       le16_to_cpu(udev->descriptor.idProduct) == 0x9210 &&
+                       (udev->manufacturer && !strcmp(udev->manufacturer, "HIKSEMI")) &&
+                       (udev->product && !strcmp(udev->product, "MD202")))
+               flags |= US_FL_IGNORE_UAS;
+
        usb_stor_adjust_quirks(udev, &flags);
 
        if (flags & US_FL_IGNORE_UAS) {
index 251778d..c7b763d 100644 (file)
@@ -83,13 +83,6 @@ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_REPORT_LUNS),
 
-/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
-UNUSUAL_DEV(0x0bda, 0x9210, 0x0000, 0x9999,
-               "Hiksemi",
-               "External HDD",
-               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
-               US_FL_IGNORE_UAS),
-
 /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
 UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
                "Initio Corporation",
index de66a29..9a68602 100644 (file)
@@ -419,6 +419,18 @@ static const char * const pin_assignments[] = {
        [DP_PIN_ASSIGN_F] = "F",
 };
 
+/*
+ * Helper function to extract a peripheral's currently supported
+ * Pin Assignments from its DisplayPort alternate mode state.
+ */
+static u8 get_current_pin_assignments(struct dp_altmode *dp)
+{
+       if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_UFP_U_AS_DFP_D)
+               return DP_CAP_PIN_ASSIGN_DFP_D(dp->alt->vdo);
+       else
+               return DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo);
+}
+
 static ssize_t
 pin_assignment_store(struct device *dev, struct device_attribute *attr,
                     const char *buf, size_t size)
@@ -445,10 +457,7 @@ pin_assignment_store(struct device *dev, struct device_attribute *attr,
                goto out_unlock;
        }
 
-       if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D)
-               assignments = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo);
-       else
-               assignments = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo);
+       assignments = get_current_pin_assignments(dp);
 
        if (!(DP_CONF_GET_PIN_ASSIGN(conf) & assignments)) {
                ret = -EINVAL;
@@ -485,10 +494,7 @@ static ssize_t pin_assignment_show(struct device *dev,
 
        cur = get_count_order(DP_CONF_GET_PIN_ASSIGN(dp->data.conf));
 
-       if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D)
-               assignments = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo);
-       else
-               assignments = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo);
+       assignments = get_current_pin_assignments(dp);
 
        for (i = 0; assignments; assignments >>= 1, i++) {
                if (assignments & 1) {
index 904c7b4..59b366b 100644 (file)
@@ -4594,14 +4594,13 @@ static void run_state_machine(struct tcpm_port *port)
                tcpm_set_state(port, ready_state(port), 0);
                break;
        case DR_SWAP_CHANGE_DR:
-               if (port->data_role == TYPEC_HOST) {
-                       tcpm_unregister_altmodes(port);
+               tcpm_unregister_altmodes(port);
+               if (port->data_role == TYPEC_HOST)
                        tcpm_set_roles(port, true, port->pwr_role,
                                       TYPEC_DEVICE);
-               } else {
+               else
                        tcpm_set_roles(port, true, port->pwr_role,
                                       TYPEC_HOST);
-               }
                tcpm_ams_finish(port);
                tcpm_set_state(port, ready_state(port), 0);
                break;
index eabe519..1292241 100644 (file)
@@ -187,6 +187,7 @@ EXPORT_SYMBOL_GPL(ucsi_send_command);
 
 struct ucsi_work {
        struct delayed_work work;
+       struct list_head node;
        unsigned long delay;
        unsigned int count;
        struct ucsi_connector *con;
@@ -202,6 +203,7 @@ static void ucsi_poll_worker(struct work_struct *work)
        mutex_lock(&con->lock);
 
        if (!con->partner) {
+               list_del(&uwork->node);
                mutex_unlock(&con->lock);
                kfree(uwork);
                return;
@@ -209,10 +211,12 @@ static void ucsi_poll_worker(struct work_struct *work)
 
        ret = uwork->cb(con);
 
-       if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT))
+       if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT)) {
                queue_delayed_work(con->wq, &uwork->work, uwork->delay);
-       else
+       } else {
+               list_del(&uwork->node);
                kfree(uwork);
+       }
 
        mutex_unlock(&con->lock);
 }
@@ -236,6 +240,7 @@ static int ucsi_partner_task(struct ucsi_connector *con,
        uwork->con = con;
        uwork->cb = cb;
 
+       list_add_tail(&uwork->node, &con->partner_tasks);
        queue_delayed_work(con->wq, &uwork->work, delay);
 
        return 0;
@@ -1056,6 +1061,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
        INIT_WORK(&con->work, ucsi_handle_connector_change);
        init_completion(&con->complete);
        mutex_init(&con->lock);
+       INIT_LIST_HEAD(&con->partner_tasks);
        con->num = index + 1;
        con->ucsi = ucsi;
 
@@ -1420,8 +1426,20 @@ void ucsi_unregister(struct ucsi *ucsi)
                ucsi_unregister_altmodes(&ucsi->connector[i],
                                         UCSI_RECIPIENT_CON);
                ucsi_unregister_port_psy(&ucsi->connector[i]);
-               if (ucsi->connector[i].wq)
+
+               if (ucsi->connector[i].wq) {
+                       struct ucsi_work *uwork;
+
+                       mutex_lock(&ucsi->connector[i].lock);
+                       /*
+                        * queue delayed items immediately so they can execute
+                        * and free themselves before the wq is destroyed
+                        */
+                       list_for_each_entry(uwork, &ucsi->connector[i].partner_tasks, node)
+                               mod_delayed_work(ucsi->connector[i].wq, &uwork->work, 0);
+                       mutex_unlock(&ucsi->connector[i].lock);
                        destroy_workqueue(ucsi->connector[i].wq);
+               }
                typec_unregister_port(ucsi->connector[i].port);
        }
 
index c968474..60ce9fb 100644 (file)
@@ -322,6 +322,7 @@ struct ucsi_connector {
        struct work_struct work;
        struct completion complete;
        struct workqueue_struct *wq;
+       struct list_head partner_tasks;
 
        struct typec_port *port;
        struct typec_partner *partner;
index 23c24fe..2209372 100644 (file)
@@ -1856,24 +1856,33 @@ unwind:
  * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
  * hugetlbfs is in use.
  */
-static void vfio_test_domain_fgsp(struct vfio_domain *domain)
+static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
 {
-       struct page *pages;
        int ret, order = get_order(PAGE_SIZE * 2);
+       struct vfio_iova *region;
+       struct page *pages;
+       dma_addr_t start;
 
        pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
        if (!pages)
                return;
 
-       ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
-                       IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
-       if (!ret) {
-               size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
+       list_for_each_entry(region, regions, list) {
+               start = ALIGN(region->start, PAGE_SIZE * 2);
+               if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
+                       continue;
 
-               if (unmapped == PAGE_SIZE)
-                       iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
-               else
-                       domain->fgsp = true;
+               ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
+                               IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
+               if (!ret) {
+                       size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
+
+                       if (unmapped == PAGE_SIZE)
+                               iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
+                       else
+                               domain->fgsp = true;
+               }
+               break;
        }
 
        __free_pages(pages, order);
@@ -2326,7 +2335,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
                }
        }
 
-       vfio_test_domain_fgsp(domain);
+       vfio_test_domain_fgsp(domain, &iova_copy);
 
        /* replay mappings on new domains */
        ret = vfio_iommu_replay(iommu, domain);
index f2ae2e5..4a2ddf7 100644 (file)
@@ -1166,6 +1166,8 @@ int w1_process(void *data)
        /* remainder if it woke up early */
        unsigned long jremain = 0;
 
+       atomic_inc(&dev->refcnt);
+
        for (;;) {
 
                if (!jremain && dev->search_count) {
@@ -1193,8 +1195,10 @@ int w1_process(void *data)
                 */
                mutex_unlock(&dev->list_mutex);
 
-               if (kthread_should_stop())
+               if (kthread_should_stop()) {
+                       __set_current_state(TASK_RUNNING);
                        break;
+               }
 
                /* Only sleep when the search is active. */
                if (dev->search_count) {
index b3e1792..3a71c5e 100644 (file)
@@ -51,10 +51,9 @@ static struct w1_master *w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
        dev->search_count       = w1_search_count;
        dev->enable_pullup      = w1_enable_pullup;
 
-       /* 1 for w1_process to decrement
-        * 1 for __w1_remove_master_device to decrement
+       /* For __w1_remove_master_device to decrement
         */
-       atomic_set(&dev->refcnt, 2);
+       atomic_set(&dev->refcnt, 1);
 
        INIT_LIST_HEAD(&dev->slist);
        INIT_LIST_HEAD(&dev->async_list);
index cefa222..8daeed3 100644 (file)
@@ -880,7 +880,7 @@ affs_truncate(struct inode *inode)
        if (inode->i_size > AFFS_I(inode)->mmu_private) {
                struct address_space *mapping = inode->i_mapping;
                struct page *page;
-               void *fsdata;
+               void *fsdata = NULL;
                loff_t isize = inode->i_size;
                int res;
 
index de63572..9a780fa 100644 (file)
@@ -2034,7 +2034,7 @@ static int elf_core_dump(struct coredump_params *cprm)
         * The number of segs are recored into ELF header as 16bit value.
         * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
         */
-       segs = cprm->vma_count + elf_core_extra_phdrs();
+       segs = cprm->vma_count + elf_core_extra_phdrs(cprm);
 
        /* for notes section */
        segs++;
@@ -2074,7 +2074,7 @@ static int elf_core_dump(struct coredump_params *cprm)
        dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
 
        offset += cprm->vma_data_size;
-       offset += elf_core_extra_data_size();
+       offset += elf_core_extra_data_size(cprm);
        e_shoff = offset;
 
        if (e_phnum == PN_XNUM) {
index 096e352..a05eafc 100644 (file)
@@ -1509,7 +1509,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
        tmp->next = thread_list;
        thread_list = tmp;
 
-       segs = cprm->vma_count + elf_core_extra_phdrs();
+       segs = cprm->vma_count + elf_core_extra_phdrs(cprm);
 
        /* for notes section */
        segs++;
@@ -1555,7 +1555,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
        dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
 
        offset += cprm->vma_data_size;
-       offset += elf_core_extra_data_size();
+       offset += elf_core_extra_data_size(cprm);
        e_shoff = offset;
 
        if (e_phnum == PN_XNUM) {
index 8aeaada..3aa0422 100644 (file)
@@ -367,7 +367,14 @@ error:
        btrfs_print_tree(eb, 0);
        btrfs_err(fs_info, "block=%llu write time tree block corruption detected",
                  eb->start);
-       WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+       /*
+        * Be noisy if this is an extent buffer from a log tree. We don't abort
+        * a transaction in case there's a bad log tree extent buffer, we just
+        * fallback to a transaction commit. Still we want to know when there is
+        * a bad log tree extent buffer, as that may signal a bug somewhere.
+        */
+       WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) ||
+               btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID);
        return ret;
 }
 
index 834bbcb..af046d2 100644 (file)
@@ -3541,6 +3541,7 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
                struct extent_buffer *leaf = path->nodes[0];
                struct btrfs_file_extent_item *extent;
                u64 extent_end;
+               u8 type;
 
                if (path->slots[0] >= btrfs_header_nritems(leaf)) {
                        ret = btrfs_next_leaf(root, path);
@@ -3596,10 +3597,16 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
 
                extent = btrfs_item_ptr(leaf, path->slots[0],
                                        struct btrfs_file_extent_item);
+               type = btrfs_file_extent_type(leaf, extent);
 
-               if (btrfs_file_extent_disk_bytenr(leaf, extent) == 0 ||
-                   btrfs_file_extent_type(leaf, extent) ==
-                   BTRFS_FILE_EXTENT_PREALLOC) {
+               /*
+                * Can't access the extent's disk_bytenr field if this is an
+                * inline extent, since at that offset, it's where the extent
+                * data starts.
+                */
+               if (type == BTRFS_FILE_EXTENT_PREALLOC ||
+                   (type == BTRFS_FILE_EXTENT_REG &&
+                    btrfs_file_extent_disk_bytenr(leaf, extent) == 0)) {
                        /*
                         * Explicit hole or prealloc extent, search for delalloc.
                         * A prealloc extent is treated like a hole.
index a749367..37b86ac 100644 (file)
@@ -119,6 +119,12 @@ enum {
        /* Indicate that we want to commit the transaction. */
        BTRFS_FS_NEED_TRANS_COMMIT,
 
+       /*
+        * Indicate metadata over-commit is disabled. This is set when active
+        * zone tracking is needed.
+        */
+       BTRFS_FS_NO_OVERCOMMIT,
+
 #if BITS_PER_LONG == 32
        /* Indicate if we have error/warn message printed on 32bit systems */
        BTRFS_FS_32BIT_ERROR,
index d275bf2..af97413 100644 (file)
@@ -2765,9 +2765,19 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
 
                        /*
                         * Old roots should be searched when inserting qgroup
-                        * extent record
+                        * extent record.
+                        *
+                        * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case,
+                        * we may have some record inserted during
+                        * NO_ACCOUNTING (thus no old_roots populated), but
+                        * later we start rescan, which clears NO_ACCOUNTING,
+                        * leaving some inserted records without old_roots
+                        * populated.
+                        *
+                        * Those cases are rare and should not cause too much
+                        * time spent during commit_transaction().
                         */
-                       if (WARN_ON(!record->old_roots)) {
+                       if (!record->old_roots) {
                                /* Search commit root to find old_roots */
                                ret = btrfs_find_all_roots(&ctx, false);
                                if (ret < 0)
@@ -3357,6 +3367,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
        int err = -ENOMEM;
        int ret = 0;
        bool stopped = false;
+       bool did_leaf_rescans = false;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -3377,6 +3388,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
                }
 
                err = qgroup_rescan_leaf(trans, path);
+               did_leaf_rescans = true;
 
                if (err > 0)
                        btrfs_commit_transaction(trans);
@@ -3397,16 +3409,23 @@ out:
        mutex_unlock(&fs_info->qgroup_rescan_lock);
 
        /*
-        * only update status, since the previous part has already updated the
-        * qgroup info.
+        * Only update status, since the previous part has already updated the
+        * qgroup info, and only if we did any actual work. This also prevents
+        * race with a concurrent quota disable, which has already set
+        * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at
+        * btrfs_quota_disable().
         */
-       trans = btrfs_start_transaction(fs_info->quota_root, 1);
-       if (IS_ERR(trans)) {
-               err = PTR_ERR(trans);
+       if (did_leaf_rescans) {
+               trans = btrfs_start_transaction(fs_info->quota_root, 1);
+               if (IS_ERR(trans)) {
+                       err = PTR_ERR(trans);
+                       trans = NULL;
+                       btrfs_err(fs_info,
+                                 "fail to start transaction for status update: %d",
+                                 err);
+               }
+       } else {
                trans = NULL;
-               btrfs_err(fs_info,
-                         "fail to start transaction for status update: %d",
-                         err);
        }
 
        mutex_lock(&fs_info->qgroup_rescan_lock);
index d28ee4e..69c0950 100644 (file)
@@ -407,7 +407,8 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
                return 0;
 
        used = btrfs_space_info_used(space_info, true);
-       if (btrfs_is_zoned(fs_info) && (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
+       if (test_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags) &&
+           (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
                avail = 0;
        else
                avail = calc_available_free_space(fs_info, space_info, flush);
index fb52aa0..d432615 100644 (file)
@@ -2980,7 +2980,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
                ret = 0;
        if (ret) {
                blk_finish_plug(&plug);
-               btrfs_abort_transaction(trans, ret);
                btrfs_set_log_full_commit(trans);
                mutex_unlock(&root->log_mutex);
                goto out;
@@ -3045,15 +3044,12 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 
                blk_finish_plug(&plug);
                btrfs_set_log_full_commit(trans);
-
-               if (ret != -ENOSPC) {
-                       btrfs_abort_transaction(trans, ret);
-                       mutex_unlock(&log_root_tree->log_mutex);
-                       goto out;
-               }
+               if (ret != -ENOSPC)
+                       btrfs_err(fs_info,
+                                 "failed to update log for root %llu ret %d",
+                                 root->root_key.objectid, ret);
                btrfs_wait_tree_log_extents(log, mark);
                mutex_unlock(&log_root_tree->log_mutex);
-               ret = BTRFS_LOG_FORCE_COMMIT;
                goto out;
        }
 
@@ -3112,7 +3108,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
                goto out_wake_log_root;
        } else if (ret) {
                btrfs_set_log_full_commit(trans);
-               btrfs_abort_transaction(trans, ret);
                mutex_unlock(&log_root_tree->log_mutex);
                goto out_wake_log_root;
        }
@@ -3826,7 +3821,10 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
                                              path->slots[0]);
                        if (tmp.type == BTRFS_DIR_INDEX_KEY)
                                last_old_dentry_offset = tmp.offset;
+               } else if (ret < 0) {
+                       err = ret;
                }
+
                goto done;
        }
 
@@ -3846,19 +3844,34 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
                 */
                if (tmp.type == BTRFS_DIR_INDEX_KEY)
                        last_old_dentry_offset = tmp.offset;
+       } else if (ret < 0) {
+               err = ret;
+               goto done;
        }
+
        btrfs_release_path(path);
 
        /*
-        * Find the first key from this transaction again.  See the note for
-        * log_new_dir_dentries, if we're logging a directory recursively we
-        * won't be holding its i_mutex, which means we can modify the directory
-        * while we're logging it.  If we remove an entry between our first
-        * search and this search we'll not find the key again and can just
-        * bail.
+        * Find the first key from this transaction again or the one we were at
+        * in the loop below in case we had to reschedule. We may be logging the
+        * directory without holding its VFS lock, which happen when logging new
+        * dentries (through log_new_dir_dentries()) or in some cases when we
+        * need to log the parent directory of an inode. This means a dir index
+        * key might be deleted from the inode's root, and therefore we may not
+        * find it anymore. If we can't find it, just move to the next key. We
+        * can not bail out and ignore, because if we do that we will simply
+        * not log dir index keys that come after the one that was just deleted
+        * and we can end up logging a dir index range that ends at (u64)-1
+        * (@last_offset is initialized to that), resulting in removing dir
+        * entries we should not remove at log replay time.
         */
 search:
        ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
+       if (ret > 0)
+               ret = btrfs_next_item(root, path);
+       if (ret < 0)
+               err = ret;
+       /* If ret is 1, there are no more keys in the inode's root. */
        if (ret != 0)
                goto done;
 
@@ -5580,8 +5593,10 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans,
         * LOG_INODE_EXISTS mode) and slow down other fsyncs or transaction
         * commits.
         */
-       if (ctx->num_conflict_inodes >= MAX_CONFLICT_INODES)
+       if (ctx->num_conflict_inodes >= MAX_CONFLICT_INODES) {
+               btrfs_set_log_full_commit(trans);
                return BTRFS_LOG_FORCE_COMMIT;
+       }
 
        inode = btrfs_iget(root->fs_info->sb, ino, root);
        /*
index aa25fa3..bcfef75 100644 (file)
@@ -768,8 +768,11 @@ static noinline struct btrfs_device *device_list_add(const char *path,
                                        BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
 
        error = lookup_bdev(path, &path_devt);
-       if (error)
+       if (error) {
+               btrfs_err(NULL, "failed to lookup block device for path %s: %d",
+                         path, error);
                return ERR_PTR(error);
+       }
 
        if (fsid_change_in_progress) {
                if (!has_metadata_uuid)
@@ -836,6 +839,9 @@ static noinline struct btrfs_device *device_list_add(const char *path,
                unsigned int nofs_flag;
 
                if (fs_devices->opened) {
+                       btrfs_err(NULL,
+               "device %s belongs to fsid %pU, and the fs is already mounted",
+                                 path, fs_devices->fsid);
                        mutex_unlock(&fs_devices->device_list_mutex);
                        return ERR_PTR(-EBUSY);
                }
@@ -905,6 +911,9 @@ static noinline struct btrfs_device *device_list_add(const char *path,
                         * generation are equal.
                         */
                        mutex_unlock(&fs_devices->device_list_mutex);
+                       btrfs_err(NULL,
+"device %s already registered with a higher generation, found %llu expect %llu",
+                                 path, found_transid, device->generation);
                        return ERR_PTR(-EEXIST);
                }
 
@@ -2005,42 +2014,42 @@ static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
        return num_devices;
 }
 
+static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info,
+                                    struct block_device *bdev, int copy_num)
+{
+       struct btrfs_super_block *disk_super;
+       const size_t len = sizeof(disk_super->magic);
+       const u64 bytenr = btrfs_sb_offset(copy_num);
+       int ret;
+
+       disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr);
+       if (IS_ERR(disk_super))
+               return;
+
+       memset(&disk_super->magic, 0, len);
+       folio_mark_dirty(virt_to_folio(disk_super));
+       btrfs_release_disk_super(disk_super);
+
+       ret = sync_blockdev_range(bdev, bytenr, bytenr + len - 1);
+       if (ret)
+               btrfs_warn(fs_info, "error clearing superblock number %d (%d)",
+                       copy_num, ret);
+}
+
 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
                               struct block_device *bdev,
                               const char *device_path)
 {
-       struct btrfs_super_block *disk_super;
        int copy_num;
 
        if (!bdev)
                return;
 
        for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
-               struct page *page;
-               int ret;
-
-               disk_super = btrfs_read_dev_one_super(bdev, copy_num, false);
-               if (IS_ERR(disk_super))
-                       continue;
-
-               if (bdev_is_zoned(bdev)) {
+               if (bdev_is_zoned(bdev))
                        btrfs_reset_sb_log_zones(bdev, copy_num);
-                       continue;
-               }
-
-               memset(&disk_super->magic, 0, sizeof(disk_super->magic));
-
-               page = virt_to_page(disk_super);
-               set_page_dirty(page);
-               lock_page(page);
-               /* write_on_page() unlocks the page */
-               ret = write_one_page(page);
-               if (ret)
-                       btrfs_warn(fs_info,
-                               "error clearing superblock number %d (%d)",
-                               copy_num, ret);
-               btrfs_release_disk_super(disk_super);
-
+               else
+                       btrfs_scratch_superblock(fs_info, bdev, copy_num);
        }
 
        /* Notify udev that device has changed */
index a759668..1f503e8 100644 (file)
@@ -539,6 +539,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
                }
                atomic_set(&zone_info->active_zones_left,
                           max_active_zones - nactive);
+               /* Overcommit does not work well with active zone tacking. */
+               set_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags);
        }
 
        /* Validate superblock log */
index 5db73c0..cbc18b4 100644 (file)
@@ -278,6 +278,7 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
         * ( for NTLMSSP_AV_NB_DOMAIN_NAME followed by NTLMSSP_AV_EOL ) +
         * unicode length of a netbios domain name
         */
+       kfree_sensitive(ses->auth_key.response);
        ses->auth_key.len = size + 2 * dlen;
        ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL);
        if (!ses->auth_key.response) {
index d371259..b2a04b4 100644 (file)
@@ -2606,11 +2606,14 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
        INIT_LIST_HEAD(&tcon->pending_opens);
        tcon->status = TID_GOOD;
 
-       /* schedule query interfaces poll */
        INIT_DELAYED_WORK(&tcon->query_interfaces,
                          smb2_query_server_interfaces);
-       queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
-                          (SMB_INTERFACE_POLL_INTERVAL * HZ));
+       if (ses->server->dialect >= SMB30_PROT_ID &&
+           (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+               /* schedule query interfaces poll */
+               queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+                                  (SMB_INTERFACE_POLL_INTERVAL * HZ));
+       }
 
        spin_lock(&cifs_tcp_ses_lock);
        list_add(&tcon->tcon_list, &ses->tcon_list);
index 43ad117..ac86bd0 100644 (file)
@@ -269,7 +269,7 @@ static int dfscache_proc_show(struct seq_file *m, void *v)
                        list_for_each_entry(t, &ce->tlist, list) {
                                seq_printf(m, "  %s%s\n",
                                           t->name,
-                                          ce->tgthint == t ? " (target hint)" : "");
+                                          READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
                        }
                }
        }
@@ -321,7 +321,7 @@ static inline void dump_tgts(const struct cache_entry *ce)
        cifs_dbg(FYI, "target list:\n");
        list_for_each_entry(t, &ce->tlist, list) {
                cifs_dbg(FYI, "  %s%s\n", t->name,
-                        ce->tgthint == t ? " (target hint)" : "");
+                        READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
        }
 }
 
@@ -427,7 +427,7 @@ static int cache_entry_hash(const void *data, int size, unsigned int *hash)
 /* Return target hint of a DFS cache entry */
 static inline char *get_tgt_name(const struct cache_entry *ce)
 {
-       struct cache_dfs_tgt *t = ce->tgthint;
+       struct cache_dfs_tgt *t = READ_ONCE(ce->tgthint);
 
        return t ? t->name : ERR_PTR(-ENOENT);
 }
@@ -470,6 +470,7 @@ static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
 static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
                         struct cache_entry *ce, const char *tgthint)
 {
+       struct cache_dfs_tgt *target;
        int i;
 
        ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
@@ -496,8 +497,9 @@ static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
                ce->numtgts++;
        }
 
-       ce->tgthint = list_first_entry_or_null(&ce->tlist,
-                                              struct cache_dfs_tgt, list);
+       target = list_first_entry_or_null(&ce->tlist, struct cache_dfs_tgt,
+                                         list);
+       WRITE_ONCE(ce->tgthint, target);
 
        return 0;
 }
@@ -558,7 +560,8 @@ static void remove_oldest_entry_locked(void)
 }
 
 /* Add a new DFS cache entry */
-static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs)
+static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
+                                                 int numrefs)
 {
        int rc;
        struct cache_entry *ce;
@@ -573,11 +576,11 @@ static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs)
 
        rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
        if (rc)
-               return rc;
+               return ERR_PTR(rc);
 
        ce = alloc_cache_entry(refs, numrefs);
        if (IS_ERR(ce))
-               return PTR_ERR(ce);
+               return ce;
 
        spin_lock(&cache_ttl_lock);
        if (!cache_ttl) {
@@ -594,7 +597,7 @@ static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs)
 
        atomic_inc(&cache_count);
 
-       return 0;
+       return ce;
 }
 
 /* Check if two DFS paths are equal.  @s1 and @s2 are expected to be in @cache_cp's charset */
@@ -641,7 +644,9 @@ static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int h
  *
  * Use whole path components in the match.  Must be called with htable_rw_lock held.
  *
+ * Return cached entry if successful.
  * Return ERR_PTR(-ENOENT) if the entry is not found.
+ * Return error ptr otherwise.
  */
 static struct cache_entry *lookup_cache_entry(const char *path)
 {
@@ -711,14 +716,15 @@ void dfs_cache_destroy(void)
 static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
                                     int numrefs)
 {
+       struct cache_dfs_tgt *target;
+       char *th = NULL;
        int rc;
-       char *s, *th = NULL;
 
        WARN_ON(!rwsem_is_locked(&htable_rw_lock));
 
-       if (ce->tgthint) {
-               s = ce->tgthint->name;
-               th = kstrdup(s, GFP_ATOMIC);
+       target = READ_ONCE(ce->tgthint);
+       if (target) {
+               th = kstrdup(target->name, GFP_ATOMIC);
                if (!th)
                        return -ENOMEM;
        }
@@ -767,51 +773,75 @@ static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const
  *
  * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
  * handle them properly.
+ *
+ * On success, return entry with acquired lock for reading, otherwise error ptr.
  */
-static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, const char *path)
+static struct cache_entry *cache_refresh_path(const unsigned int xid,
+                                             struct cifs_ses *ses,
+                                             const char *path,
+                                             bool force_refresh)
 {
-       int rc;
-       struct cache_entry *ce;
        struct dfs_info3_param *refs = NULL;
+       struct cache_entry *ce;
        int numrefs = 0;
-       bool newent = false;
+       int rc;
 
        cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
 
-       down_write(&htable_rw_lock);
+       down_read(&htable_rw_lock);
 
        ce = lookup_cache_entry(path);
        if (!IS_ERR(ce)) {
-               if (!cache_entry_expired(ce)) {
-                       dump_ce(ce);
-                       up_write(&htable_rw_lock);
-                       return 0;
-               }
-       } else {
-               newent = true;
+               if (!force_refresh && !cache_entry_expired(ce))
+                       return ce;
+       } else if (PTR_ERR(ce) != -ENOENT) {
+               up_read(&htable_rw_lock);
+               return ce;
        }
 
        /*
-        * Either the entry was not found, or it is expired.
+        * Unlock shared access as we don't want to hold any locks while getting
+        * a new referral.  The @ses used for performing the I/O could be
+        * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
+        * in order to failover -- if necessary.
+        */
+       up_read(&htable_rw_lock);
+
+       /*
+        * Either the entry was not found, or it is expired, or it is a forced
+        * refresh.
         * Request a new DFS referral in order to create or update a cache entry.
         */
        rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
-       if (rc)
-               goto out_unlock;
+       if (rc) {
+               ce = ERR_PTR(rc);
+               goto out;
+       }
 
        dump_refs(refs, numrefs);
 
-       if (!newent) {
-               rc = update_cache_entry_locked(ce, refs, numrefs);
-               goto out_unlock;
+       down_write(&htable_rw_lock);
+       /* Re-check as another task might have it added or refreshed already */
+       ce = lookup_cache_entry(path);
+       if (!IS_ERR(ce)) {
+               if (force_refresh || cache_entry_expired(ce)) {
+                       rc = update_cache_entry_locked(ce, refs, numrefs);
+                       if (rc)
+                               ce = ERR_PTR(rc);
+               }
+       } else if (PTR_ERR(ce) == -ENOENT) {
+               ce = add_cache_entry_locked(refs, numrefs);
        }
 
-       rc = add_cache_entry_locked(refs, numrefs);
+       if (IS_ERR(ce)) {
+               up_write(&htable_rw_lock);
+               goto out;
+       }
 
-out_unlock:
-       up_write(&htable_rw_lock);
+       downgrade_write(&htable_rw_lock);
+out:
        free_dfs_info_array(refs, numrefs);
-       return rc;
+       return ce;
 }
 
 /*
@@ -878,7 +908,7 @@ static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
                }
                it->it_path_consumed = t->path_consumed;
 
-               if (ce->tgthint == t)
+               if (READ_ONCE(ce->tgthint) == t)
                        list_add(&it->it_list, head);
                else
                        list_add_tail(&it->it_list, head);
@@ -931,15 +961,8 @@ int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nl
        if (IS_ERR(npath))
                return PTR_ERR(npath);
 
-       rc = cache_refresh_path(xid, ses, npath);
-       if (rc)
-               goto out_free_path;
-
-       down_read(&htable_rw_lock);
-
-       ce = lookup_cache_entry(npath);
+       ce = cache_refresh_path(xid, ses, npath, false);
        if (IS_ERR(ce)) {
-               up_read(&htable_rw_lock);
                rc = PTR_ERR(ce);
                goto out_free_path;
        }
@@ -1003,72 +1026,6 @@ out_unlock:
 }
 
 /**
- * dfs_cache_update_tgthint - update target hint of a DFS cache entry
- *
- * If it doesn't find the cache entry, then it will get a DFS referral for @path
- * and create a new entry.
- *
- * In case the cache entry exists but expired, it will get a DFS referral
- * for @path and then update the respective cache entry.
- *
- * @xid: syscall id
- * @ses: smb session
- * @cp: codepage
- * @remap: type of character remapping for paths
- * @path: path to lookup in DFS referral cache
- * @it: DFS target iterator
- *
- * Return zero if the target hint was updated successfully, otherwise non-zero.
- */
-int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
-                            const struct nls_table *cp, int remap, const char *path,
-                            const struct dfs_cache_tgt_iterator *it)
-{
-       int rc;
-       const char *npath;
-       struct cache_entry *ce;
-       struct cache_dfs_tgt *t;
-
-       npath = dfs_cache_canonical_path(path, cp, remap);
-       if (IS_ERR(npath))
-               return PTR_ERR(npath);
-
-       cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
-
-       rc = cache_refresh_path(xid, ses, npath);
-       if (rc)
-               goto out_free_path;
-
-       down_write(&htable_rw_lock);
-
-       ce = lookup_cache_entry(npath);
-       if (IS_ERR(ce)) {
-               rc = PTR_ERR(ce);
-               goto out_unlock;
-       }
-
-       t = ce->tgthint;
-
-       if (likely(!strcasecmp(it->it_name, t->name)))
-               goto out_unlock;
-
-       list_for_each_entry(t, &ce->tlist, list) {
-               if (!strcasecmp(t->name, it->it_name)) {
-                       ce->tgthint = t;
-                       cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
-                                it->it_name);
-                       break;
-               }
-       }
-
-out_unlock:
-       up_write(&htable_rw_lock);
-out_free_path:
-       kfree(npath);
-       return rc;
-}
-
-/**
  * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
  * without sending any requests to the currently connected server.
  *
@@ -1092,21 +1049,20 @@ void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt
 
        cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
 
-       if (!down_write_trylock(&htable_rw_lock))
-               return;
+       down_read(&htable_rw_lock);
 
        ce = lookup_cache_entry(path);
        if (IS_ERR(ce))
                goto out_unlock;
 
-       t = ce->tgthint;
+       t = READ_ONCE(ce->tgthint);
 
        if (unlikely(!strcasecmp(it->it_name, t->name)))
                goto out_unlock;
 
        list_for_each_entry(t, &ce->tlist, list) {
                if (!strcasecmp(t->name, it->it_name)) {
-                       ce->tgthint = t;
+                       WRITE_ONCE(ce->tgthint, t);
                        cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
                                 it->it_name);
                        break;
@@ -1114,7 +1070,7 @@ void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt
        }
 
 out_unlock:
-       up_write(&htable_rw_lock);
+       up_read(&htable_rw_lock);
 }
 
 /**
@@ -1299,7 +1255,6 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
         * Resolve share's hostname and check if server address matches.  Otherwise just ignore it
         * as we could not have upcall to resolve hostname or failed to convert ip address.
         */
-       match = true;
        extract_unc_hostname(s1, &host, &hostlen);
        scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
 
@@ -1321,35 +1276,37 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
  * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
  * target shares in @refs.
  */
-static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cache_tgt_list *tl,
-                                        const struct dfs_info3_param *refs, int numrefs)
+static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
+                                        struct dfs_cache_tgt_list *old_tl,
+                                        struct dfs_cache_tgt_list *new_tl)
 {
-       struct dfs_cache_tgt_iterator *it;
-       int i;
-
-       for (it = dfs_cache_get_tgt_iterator(tl); it; it = dfs_cache_get_next_tgt(tl, it)) {
-               for (i = 0; i < numrefs; i++) {
-                       if (target_share_equal(tcon->ses->server, dfs_cache_get_tgt_name(it),
-                                              refs[i].node_name))
+       struct dfs_cache_tgt_iterator *oit, *nit;
+
+       for (oit = dfs_cache_get_tgt_iterator(old_tl); oit;
+            oit = dfs_cache_get_next_tgt(old_tl, oit)) {
+               for (nit = dfs_cache_get_tgt_iterator(new_tl); nit;
+                    nit = dfs_cache_get_next_tgt(new_tl, nit)) {
+                       if (target_share_equal(server,
+                                              dfs_cache_get_tgt_name(oit),
+                                              dfs_cache_get_tgt_name(nit)))
                                return;
                }
        }
 
        cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
-       cifs_signal_cifsd_for_reconnect(tcon->ses->server, true);
+       cifs_signal_cifsd_for_reconnect(server, true);
 }
 
 /* Refresh dfs referral of tcon and mark it for reconnect if needed */
 static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh)
 {
-       struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+       struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl);
+       struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl);
        struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses);
        struct cifs_tcon *ipc = ses->tcon_ipc;
-       struct dfs_info3_param *refs = NULL;
        bool needs_refresh = false;
        struct cache_entry *ce;
        unsigned int xid;
-       int numrefs = 0;
        int rc = 0;
 
        xid = get_xid();
@@ -1358,9 +1315,8 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
        ce = lookup_cache_entry(path);
        needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
        if (!IS_ERR(ce)) {
-               rc = get_targets(ce, &tl);
-               if (rc)
-                       cifs_dbg(FYI, "%s: could not get dfs targets: %d\n", __func__, rc);
+               rc = get_targets(ce, &old_tl);
+               cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
        }
        up_read(&htable_rw_lock);
 
@@ -1377,26 +1333,18 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
        }
        spin_unlock(&ipc->tc_lock);
 
-       rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
-       if (!rc) {
-               /* Create or update a cache entry with the new referral */
-               dump_refs(refs, numrefs);
-
-               down_write(&htable_rw_lock);
-               ce = lookup_cache_entry(path);
-               if (IS_ERR(ce))
-                       add_cache_entry_locked(refs, numrefs);
-               else if (force_refresh || cache_entry_expired(ce))
-                       update_cache_entry_locked(ce, refs, numrefs);
-               up_write(&htable_rw_lock);
-
-               mark_for_reconnect_if_needed(tcon, &tl, refs, numrefs);
+       ce = cache_refresh_path(xid, ses, path, true);
+       if (!IS_ERR(ce)) {
+               rc = get_targets(ce, &new_tl);
+               up_read(&htable_rw_lock);
+               cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
+               mark_for_reconnect_if_needed(tcon->ses->server, &old_tl, &new_tl);
        }
 
 out:
        free_xid(xid);
-       dfs_cache_free_tgts(&tl);
-       free_dfs_info_array(refs, numrefs);
+       dfs_cache_free_tgts(&old_tl);
+       dfs_cache_free_tgts(&new_tl);
        return rc;
 }
 
index f7cff0b..be3b5a4 100644 (file)
@@ -35,9 +35,6 @@ int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nl
                   struct dfs_cache_tgt_list *tgt_list);
 int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
                         struct dfs_cache_tgt_list *tgt_list);
-int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
-                            const struct nls_table *cp, int remap, const char *path,
-                            const struct dfs_cache_tgt_iterator *it);
 void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it);
 int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
                               struct dfs_info3_param *ref);
index bd374fe..a5a097a 100644 (file)
@@ -428,6 +428,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
        oparms.disposition = FILE_CREATE;
        oparms.fid = &fid;
        oparms.reconnect = false;
+       oparms.mode = 0644;
 
        rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
                       NULL, NULL);
index 0b842a0..c47b254 100644 (file)
@@ -815,6 +815,7 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
                return -EINVAL;
        }
        if (tilen) {
+               kfree_sensitive(ses->auth_key.response);
                ses->auth_key.response = kmemdup(bcc_ptr + tioffset, tilen,
                                                 GFP_KERNEL);
                if (!ses->auth_key.response) {
@@ -1428,6 +1429,7 @@ sess_auth_kerberos(struct sess_data *sess_data)
                goto out_put_spnego_key;
        }
 
+       kfree_sensitive(ses->auth_key.response);
        ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
                                         GFP_KERNEL);
        if (!ses->auth_key.response) {
index 5048075..4cb3644 100644 (file)
@@ -562,17 +562,20 @@ static int cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
        if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
                rc = SMBQueryInformation(xid, tcon, full_path, &fi, cifs_sb->local_nls,
                                         cifs_remap(cifs_sb));
-               if (!rc)
-                       move_cifs_info_to_smb2(&data->fi, &fi);
                *adjustTZ = true;
        }
 
-       if (!rc && (le32_to_cpu(fi.Attributes) & ATTR_REPARSE)) {
+       if (!rc) {
                int tmprc;
                int oplock = 0;
                struct cifs_fid fid;
                struct cifs_open_parms oparms;
 
+               move_cifs_info_to_smb2(&data->fi, &fi);
+
+               if (!(le32_to_cpu(fi.Attributes) & ATTR_REPARSE))
+                       return 0;
+
                oparms.tcon = tcon;
                oparms.cifs_sb = cifs_sb;
                oparms.desired_access = FILE_READ_ATTRIBUTES;
@@ -716,17 +719,25 @@ cifs_mkdir_setinfo(struct inode *inode, const char *full_path,
 static int cifs_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock,
                          void *buf)
 {
-       FILE_ALL_INFO *fi = buf;
+       struct cifs_open_info_data *data = buf;
+       FILE_ALL_INFO fi = {};
+       int rc;
 
        if (!(oparms->tcon->ses->capabilities & CAP_NT_SMBS))
-               return SMBLegacyOpen(xid, oparms->tcon, oparms->path,
-                                    oparms->disposition,
-                                    oparms->desired_access,
-                                    oparms->create_options,
-                                    &oparms->fid->netfid, oplock, fi,
-                                    oparms->cifs_sb->local_nls,
-                                    cifs_remap(oparms->cifs_sb));
-       return CIFS_open(xid, oparms, oplock, fi);
+               rc = SMBLegacyOpen(xid, oparms->tcon, oparms->path,
+                                  oparms->disposition,
+                                  oparms->desired_access,
+                                  oparms->create_options,
+                                  &oparms->fid->netfid, oplock, &fi,
+                                  oparms->cifs_sb->local_nls,
+                                  cifs_remap(oparms->cifs_sb));
+       else
+               rc = CIFS_open(xid, oparms, oplock, &fi);
+
+       if (!rc && data)
+               move_cifs_info_to_smb2(&data->fi, &fi);
+
+       return rc;
 }
 
 static void
@@ -1050,7 +1061,7 @@ cifs_make_node(unsigned int xid, struct inode *inode,
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct inode *newinode = NULL;
        int rc = -EPERM;
-       FILE_ALL_INFO *buf = NULL;
+       struct cifs_open_info_data buf = {};
        struct cifs_io_parms io_parms;
        __u32 oplock = 0;
        struct cifs_fid fid;
@@ -1082,14 +1093,14 @@ cifs_make_node(unsigned int xid, struct inode *inode,
                                            cifs_sb->local_nls,
                                            cifs_remap(cifs_sb));
                if (rc)
-                       goto out;
+                       return rc;
 
                rc = cifs_get_inode_info_unix(&newinode, full_path,
                                              inode->i_sb, xid);
 
                if (rc == 0)
                        d_instantiate(dentry, newinode);
-               goto out;
+               return rc;
        }
 
        /*
@@ -1097,19 +1108,13 @@ cifs_make_node(unsigned int xid, struct inode *inode,
         * support block and char device (no socket & fifo)
         */
        if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
-               goto out;
+               return rc;
 
        if (!S_ISCHR(mode) && !S_ISBLK(mode))
-               goto out;
+               return rc;
 
        cifs_dbg(FYI, "sfu compat create special file\n");
 
-       buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
-       if (buf == NULL) {
-               rc = -ENOMEM;
-               goto out;
-       }
-
        oparms.tcon = tcon;
        oparms.cifs_sb = cifs_sb;
        oparms.desired_access = GENERIC_WRITE;
@@ -1124,21 +1129,21 @@ cifs_make_node(unsigned int xid, struct inode *inode,
                oplock = REQ_OPLOCK;
        else
                oplock = 0;
-       rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
+       rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &buf);
        if (rc)
-               goto out;
+               return rc;
 
        /*
         * BB Do not bother to decode buf since no local inode yet to put
         * timestamps in, but we can reuse it safely.
         */
 
-       pdev = (struct win_dev *)buf;
+       pdev = (struct win_dev *)&buf.fi;
        io_parms.pid = current->tgid;
        io_parms.tcon = tcon;
        io_parms.offset = 0;
        io_parms.length = sizeof(struct win_dev);
-       iov[1].iov_base = buf;
+       iov[1].iov_base = &buf.fi;
        iov[1].iov_len = sizeof(struct win_dev);
        if (S_ISCHR(mode)) {
                memcpy(pdev->type, "IntxCHR", 8);
@@ -1157,8 +1162,8 @@ cifs_make_node(unsigned int xid, struct inode *inode,
        d_drop(dentry);
 
        /* FIXME: add code here to set EAs */
-out:
-       kfree(buf);
+
+       cifs_free_open_info(&buf);
        return rc;
 }
 
index 2c484d4..2c9ffa9 100644 (file)
@@ -1453,6 +1453,7 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
 
        /* keep session key if binding */
        if (!is_binding) {
+               kfree_sensitive(ses->auth_key.response);
                ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
                                                 GFP_KERNEL);
                if (!ses->auth_key.response) {
@@ -1482,8 +1483,11 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
 out_put_spnego_key:
        key_invalidate(spnego_key);
        key_put(spnego_key);
-       if (rc)
+       if (rc) {
                kfree_sensitive(ses->auth_key.response);
+               ses->auth_key.response = NULL;
+               ses->auth_key.len = 0;
+       }
 out:
        sess_data->result = rc;
        sess_data->func = NULL;
@@ -4159,12 +4163,15 @@ smb2_readv_callback(struct mid_q_entry *mid)
                                (struct smb2_hdr *)rdata->iov[0].iov_base;
        struct cifs_credits credits = { .value = 0, .instance = 0 };
        struct smb_rqst rqst = { .rq_iov = &rdata->iov[1],
-                                .rq_nvec = 1,
-                                .rq_pages = rdata->pages,
-                                .rq_offset = rdata->page_offset,
-                                .rq_npages = rdata->nr_pages,
-                                .rq_pagesz = rdata->pagesz,
-                                .rq_tailsz = rdata->tailsz };
+                                .rq_nvec = 1, };
+
+       if (rdata->got_bytes) {
+               rqst.rq_pages = rdata->pages;
+               rqst.rq_offset = rdata->page_offset;
+               rqst.rq_npages = rdata->nr_pages;
+               rqst.rq_pagesz = rdata->pagesz;
+               rqst.rq_tailsz = rdata->tailsz;
+       }
 
        WARN_ONCE(rdata->server != mid->server,
                  "rdata server %p != mid server %p",
index 481788c..626a615 100644 (file)
@@ -577,26 +577,25 @@ static int erofs_fc_parse_param(struct fs_context *fc,
                }
                ++ctx->devs->extra_devices;
                break;
-       case Opt_fsid:
 #ifdef CONFIG_EROFS_FS_ONDEMAND
+       case Opt_fsid:
                kfree(ctx->fsid);
                ctx->fsid = kstrdup(param->string, GFP_KERNEL);
                if (!ctx->fsid)
                        return -ENOMEM;
-#else
-               errorfc(fc, "fsid option not supported");
-#endif
                break;
        case Opt_domain_id:
-#ifdef CONFIG_EROFS_FS_ONDEMAND
                kfree(ctx->domain_id);
                ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
                if (!ctx->domain_id)
                        return -ENOMEM;
+               break;
 #else
-               errorfc(fc, "domain_id option not supported");
-#endif
+       case Opt_fsid:
+       case Opt_domain_id:
+               errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
                break;
+#endif
        default:
                return -ENOPARAM;
        }
index ccf7c55..5200bb8 100644 (file)
@@ -1032,12 +1032,12 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
 
        if (!be->decompressed_pages)
                be->decompressed_pages =
-                       kvcalloc(be->nr_pages, sizeof(struct page *),
-                                GFP_KERNEL | __GFP_NOFAIL);
+                       kcalloc(be->nr_pages, sizeof(struct page *),
+                               GFP_KERNEL | __GFP_NOFAIL);
        if (!be->compressed_pages)
                be->compressed_pages =
-                       kvcalloc(pclusterpages, sizeof(struct page *),
-                                GFP_KERNEL | __GFP_NOFAIL);
+                       kcalloc(pclusterpages, sizeof(struct page *),
+                               GFP_KERNEL | __GFP_NOFAIL);
 
        z_erofs_parse_out_bvecs(be);
        err2 = z_erofs_parse_in_bvecs(be, &overlapped);
@@ -1085,7 +1085,7 @@ out:
        }
        if (be->compressed_pages < be->onstack_pages ||
            be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
-               kvfree(be->compressed_pages);
+               kfree(be->compressed_pages);
        z_erofs_fill_other_copies(be, err);
 
        for (i = 0; i < be->nr_pages; ++i) {
@@ -1104,7 +1104,7 @@ out:
        }
 
        if (be->decompressed_pages != be->onstack_pages)
-               kvfree(be->decompressed_pages);
+               kfree(be->decompressed_pages);
 
        pcl->length = 0;
        pcl->partial = true;
index 0150570..98fb90b 100644 (file)
@@ -793,12 +793,16 @@ static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
                iomap->type = IOMAP_HOLE;
                iomap->addr = IOMAP_NULL_ADDR;
                /*
-                * No strict rule how to describe extents for post EOF, yet
-                * we need do like below. Otherwise, iomap itself will get
+                * No strict rule on how to describe extents for post EOF, yet
+                * we need to do like below. Otherwise, iomap itself will get
                 * into an endless loop on post EOF.
+                *
+                * Calculate the effective offset by subtracting extent start
+                * (map.m_la) from the requested offset, and add it to length.
+                * (NB: offset >= map.m_la always)
                 */
                if (iomap->offset >= inode->i_size)
-                       iomap->length = length + map.m_la - offset;
+                       iomap->length = length + offset - map.m_la;
        }
        iomap->flags = 0;
        return 0;
index 7decaaf..a2f04a3 100644 (file)
@@ -81,6 +81,8 @@ ext4_xattr_block_cache_find(struct inode *, struct ext4_xattr_header *,
                            struct mb_cache_entry **);
 static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
                                    size_t value_count);
+static __le32 ext4_xattr_hash_entry_signed(char *name, size_t name_len, __le32 *value,
+                                   size_t value_count);
 static void ext4_xattr_rehash(struct ext4_xattr_header *);
 
 static const struct xattr_handler * const ext4_xattr_handler_map[] = {
@@ -470,8 +472,22 @@ ext4_xattr_inode_verify_hashes(struct inode *ea_inode,
                tmp_data = cpu_to_le32(hash);
                e_hash = ext4_xattr_hash_entry(entry->e_name, entry->e_name_len,
                                               &tmp_data, 1);
+               /* All good? */
+               if (e_hash == entry->e_hash)
+                       return 0;
+
+               /*
+                * Not good. Maybe the entry hash was calculated
+                * using the buggy signed char version?
+                */
+               e_hash = ext4_xattr_hash_entry_signed(entry->e_name, entry->e_name_len,
+                                                       &tmp_data, 1);
+               /* Still no match - bad */
                if (e_hash != entry->e_hash)
                        return -EFSCORRUPTED;
+
+               /* Let people know about old hash */
+               pr_warn_once("ext4: filesystem with signed xattr name hash");
        }
        return 0;
 }
@@ -3081,7 +3097,29 @@ static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
        while (name_len--) {
                hash = (hash << NAME_HASH_SHIFT) ^
                       (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
-                      *name++;
+                      (unsigned char)*name++;
+       }
+       while (value_count--) {
+               hash = (hash << VALUE_HASH_SHIFT) ^
+                      (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
+                      le32_to_cpu(*value++);
+       }
+       return cpu_to_le32(hash);
+}
+
+/*
+ * ext4_xattr_hash_entry_signed()
+ *
+ * Compute the hash of an extended attribute incorrectly.
+ */
+static __le32 ext4_xattr_hash_entry_signed(char *name, size_t name_len, __le32 *value, size_t value_count)
+{
+       __u32 hash = 0;
+
+       while (name_len--) {
+               hash = (hash << NAME_HASH_SHIFT) ^
+                      (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
+                      (signed char)*name++;
        }
        while (value_count--) {
                hash = (hash << VALUE_HASH_SHIFT) ^
index a4850ae..ad67036 100644 (file)
 #include <linux/posix_acl.h>
 #include <linux/posix_acl_xattr.h>
 
-struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu)
+static struct posix_acl *__fuse_get_acl(struct fuse_conn *fc,
+                                       struct user_namespace *mnt_userns,
+                                       struct inode *inode, int type, bool rcu)
 {
-       struct fuse_conn *fc = get_fuse_conn(inode);
        int size;
        const char *name;
        void *value = NULL;
@@ -25,7 +26,7 @@ struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu)
        if (fuse_is_bad(inode))
                return ERR_PTR(-EIO);
 
-       if (!fc->posix_acl || fc->no_getxattr)
+       if (fc->no_getxattr)
                return NULL;
 
        if (type == ACL_TYPE_ACCESS)
@@ -53,6 +54,46 @@ struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu)
        return acl;
 }
 
+static inline bool fuse_no_acl(const struct fuse_conn *fc,
+                              const struct inode *inode)
+{
+       /*
+        * Refuse interacting with POSIX ACLs for daemons that
+        * don't support FUSE_POSIX_ACL and are not mounted on
+        * the host to retain backwards compatibility.
+        */
+       return !fc->posix_acl && (i_user_ns(inode) != &init_user_ns);
+}
+
+struct posix_acl *fuse_get_acl(struct user_namespace *mnt_userns,
+                              struct dentry *dentry, int type)
+{
+       struct inode *inode = d_inode(dentry);
+       struct fuse_conn *fc = get_fuse_conn(inode);
+
+       if (fuse_no_acl(fc, inode))
+               return ERR_PTR(-EOPNOTSUPP);
+
+       return __fuse_get_acl(fc, mnt_userns, inode, type, false);
+}
+
+struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu)
+{
+       struct fuse_conn *fc = get_fuse_conn(inode);
+
+       /*
+        * FUSE daemons before FUSE_POSIX_ACL was introduced could get and set
+        * POSIX ACLs without them being used for permission checking by the
+        * vfs. Retain that behavior for backwards compatibility as there are
+        * filesystems that do all permission checking for acls in the daemon
+        * and not in the kernel.
+        */
+       if (!fc->posix_acl)
+               return NULL;
+
+       return __fuse_get_acl(fc, &init_user_ns, inode, type, rcu);
+}
+
 int fuse_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry,
                 struct posix_acl *acl, int type)
 {
@@ -64,7 +105,7 @@ int fuse_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry,
        if (fuse_is_bad(inode))
                return -EIO;
 
-       if (!fc->posix_acl || fc->no_setxattr)
+       if (fc->no_setxattr || fuse_no_acl(fc, inode))
                return -EOPNOTSUPP;
 
        if (type == ACL_TYPE_ACCESS)
@@ -99,7 +140,13 @@ int fuse_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry,
                        return ret;
                }
 
-               if (!vfsgid_in_group_p(i_gid_into_vfsgid(&init_user_ns, inode)) &&
+               /*
+                * Fuse daemons without FUSE_POSIX_ACL never changed the passed
+                * through POSIX ACLs. Such daemons don't expect setgid bits to
+                * be stripped.
+                */
+               if (fc->posix_acl &&
+                   !vfsgid_in_group_p(i_gid_into_vfsgid(&init_user_ns, inode)) &&
                    !capable_wrt_inode_uidgid(&init_user_ns, inode, CAP_FSETID))
                        extra_flags |= FUSE_SETXATTR_ACL_KILL_SGID;
 
@@ -108,8 +155,15 @@ int fuse_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry,
        } else {
                ret = fuse_removexattr(inode, name);
        }
-       forget_all_cached_acls(inode);
-       fuse_invalidate_attr(inode);
+
+       if (fc->posix_acl) {
+               /*
+                * Fuse daemons without FUSE_POSIX_ACL never cached POSIX ACLs
+                * and didn't invalidate attributes. Retain that behavior.
+                */
+               forget_all_cached_acls(inode);
+               fuse_invalidate_attr(inode);
+       }
 
        return ret;
 }
index cd1a071..2725fb5 100644 (file)
@@ -1942,7 +1942,8 @@ static const struct inode_operations fuse_dir_inode_operations = {
        .permission     = fuse_permission,
        .getattr        = fuse_getattr,
        .listxattr      = fuse_listxattr,
-       .get_inode_acl  = fuse_get_acl,
+       .get_inode_acl  = fuse_get_inode_acl,
+       .get_acl        = fuse_get_acl,
        .set_acl        = fuse_set_acl,
        .fileattr_get   = fuse_fileattr_get,
        .fileattr_set   = fuse_fileattr_set,
@@ -1964,7 +1965,8 @@ static const struct inode_operations fuse_common_inode_operations = {
        .permission     = fuse_permission,
        .getattr        = fuse_getattr,
        .listxattr      = fuse_listxattr,
-       .get_inode_acl  = fuse_get_acl,
+       .get_inode_acl  = fuse_get_inode_acl,
+       .get_acl        = fuse_get_acl,
        .set_acl        = fuse_set_acl,
        .fileattr_get   = fuse_fileattr_get,
        .fileattr_set   = fuse_fileattr_set,
index c673fae..46797a1 100644 (file)
@@ -1264,11 +1264,11 @@ ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value,
 ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size);
 int fuse_removexattr(struct inode *inode, const char *name);
 extern const struct xattr_handler *fuse_xattr_handlers[];
-extern const struct xattr_handler *fuse_acl_xattr_handlers[];
-extern const struct xattr_handler *fuse_no_acl_xattr_handlers[];
 
 struct posix_acl;
-struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu);
+struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu);
+struct posix_acl *fuse_get_acl(struct user_namespace *mnt_userns,
+                              struct dentry *dentry, int type);
 int fuse_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry,
                 struct posix_acl *acl, int type);
 
index 6b3beda..de9b9ec 100644 (file)
@@ -311,7 +311,8 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
                fuse_dax_dontcache(inode, attr->flags);
 }
 
-static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
+static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr,
+                           struct fuse_conn *fc)
 {
        inode->i_mode = attr->mode & S_IFMT;
        inode->i_size = attr->size;
@@ -333,6 +334,12 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
                                   new_decode_dev(attr->rdev));
        } else
                BUG();
+       /*
+        * Ensure that we don't cache acls for daemons without FUSE_POSIX_ACL
+        * so they see the exact same behavior as before.
+        */
+       if (!fc->posix_acl)
+               inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
 }
 
 static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
@@ -372,7 +379,7 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
                if (!inode)
                        return NULL;
 
-               fuse_init_inode(inode, attr);
+               fuse_init_inode(inode, attr, fc);
                get_fuse_inode(inode)->nodeid = nodeid;
                inode->i_flags |= S_AUTOMOUNT;
                goto done;
@@ -388,7 +395,7 @@ retry:
                if (!fc->writeback_cache || !S_ISREG(attr->mode))
                        inode->i_flags |= S_NOCMTIME;
                inode->i_generation = generation;
-               fuse_init_inode(inode, attr);
+               fuse_init_inode(inode, attr, fc);
                unlock_new_inode(inode);
        } else if (fuse_stale_inode(inode, generation, attr)) {
                /* nodeid was reused, any I/O on the old inode should fail */
@@ -1174,7 +1181,6 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
                        if ((flags & FUSE_POSIX_ACL)) {
                                fc->default_permissions = 1;
                                fc->posix_acl = 1;
-                               fm->sb->s_xattr = fuse_acl_xattr_handlers;
                        }
                        if (flags & FUSE_CACHE_SYMLINKS)
                                fc->cache_symlinks = 1;
@@ -1420,13 +1426,6 @@ static void fuse_sb_defaults(struct super_block *sb)
        if (sb->s_user_ns != &init_user_ns)
                sb->s_iflags |= SB_I_UNTRUSTED_MOUNTER;
        sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION);
-
-       /*
-        * If we are not in the initial user namespace posix
-        * acls must be translated.
-        */
-       if (sb->s_user_ns != &init_user_ns)
-               sb->s_xattr = fuse_no_acl_xattr_handlers;
 }
 
 static int fuse_fill_super_submount(struct super_block *sb,
index 0d3e717..9fe571a 100644 (file)
@@ -203,27 +203,6 @@ static int fuse_xattr_set(const struct xattr_handler *handler,
        return fuse_setxattr(inode, name, value, size, flags, 0);
 }
 
-static bool no_xattr_list(struct dentry *dentry)
-{
-       return false;
-}
-
-static int no_xattr_get(const struct xattr_handler *handler,
-                       struct dentry *dentry, struct inode *inode,
-                       const char *name, void *value, size_t size)
-{
-       return -EOPNOTSUPP;
-}
-
-static int no_xattr_set(const struct xattr_handler *handler,
-                       struct user_namespace *mnt_userns,
-                       struct dentry *dentry, struct inode *nodee,
-                       const char *name, const void *value,
-                       size_t size, int flags)
-{
-       return -EOPNOTSUPP;
-}
-
 static const struct xattr_handler fuse_xattr_handler = {
        .prefix = "",
        .get    = fuse_xattr_get,
@@ -234,33 +213,3 @@ const struct xattr_handler *fuse_xattr_handlers[] = {
        &fuse_xattr_handler,
        NULL
 };
-
-const struct xattr_handler *fuse_acl_xattr_handlers[] = {
-       &posix_acl_access_xattr_handler,
-       &posix_acl_default_xattr_handler,
-       &fuse_xattr_handler,
-       NULL
-};
-
-static const struct xattr_handler fuse_no_acl_access_xattr_handler = {
-       .name  = XATTR_NAME_POSIX_ACL_ACCESS,
-       .flags = ACL_TYPE_ACCESS,
-       .list  = no_xattr_list,
-       .get   = no_xattr_get,
-       .set   = no_xattr_set,
-};
-
-static const struct xattr_handler fuse_no_acl_default_xattr_handler = {
-       .name  = XATTR_NAME_POSIX_ACL_DEFAULT,
-       .flags = ACL_TYPE_ACCESS,
-       .list  = no_xattr_list,
-       .get   = no_xattr_get,
-       .set   = no_xattr_set,
-};
-
-const struct xattr_handler *fuse_no_acl_xattr_handlers[] = {
-       &fuse_no_acl_access_xattr_handler,
-       &fuse_no_acl_default_xattr_handler,
-       &fuse_xattr_handler,
-       NULL
-};
index 7236393..61323de 100644 (file)
@@ -80,6 +80,15 @@ void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
        brelse(bd->bd_bh);
 }
 
+static int __gfs2_writepage(struct page *page, struct writeback_control *wbc,
+                      void *data)
+{
+       struct address_space *mapping = data;
+       int ret = mapping->a_ops->writepage(page, wbc);
+       mapping_set_error(mapping, ret);
+       return ret;
+}
+
 /**
  * gfs2_ail1_start_one - Start I/O on a transaction
  * @sdp: The superblock
@@ -131,7 +140,7 @@ __acquires(&sdp->sd_ail_lock)
                if (!mapping)
                        continue;
                spin_unlock(&sdp->sd_ail_lock);
-               ret = filemap_fdatawrite_wbc(mapping, wbc);
+               ret = write_cache_pages(mapping, wbc, __gfs2_writepage, mapping);
                if (need_resched()) {
                        blk_finish_plug(plug);
                        cond_resched();
index 0ef0703..c0950ed 100644 (file)
@@ -662,6 +662,39 @@ static struct shrinker     nfsd_file_shrinker = {
 };
 
 /**
+ * nfsd_file_cond_queue - conditionally unhash and queue a nfsd_file
+ * @nf: nfsd_file to attempt to queue
+ * @dispose: private list to queue successfully-put objects
+ *
+ * Unhash an nfsd_file, try to get a reference to it, and then put that
+ * reference. If it's the last reference, queue it to the dispose list.
+ */
+static void
+nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose)
+       __must_hold(RCU)
+{
+       int decrement = 1;
+
+       /* If we raced with someone else unhashing, ignore it */
+       if (!nfsd_file_unhash(nf))
+               return;
+
+       /* If we can't get a reference, ignore it */
+       if (!nfsd_file_get(nf))
+               return;
+
+       /* Extra decrement if we remove from the LRU */
+       if (nfsd_file_lru_remove(nf))
+               ++decrement;
+
+       /* If refcount goes to 0, then put on the dispose list */
+       if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
+               list_add(&nf->nf_lru, dispose);
+               trace_nfsd_file_closing(nf);
+       }
+}
+
+/**
  * nfsd_file_queue_for_close: try to close out any open nfsd_files for an inode
  * @inode:   inode on which to close out nfsd_files
  * @dispose: list on which to gather nfsd_files to close out
@@ -688,30 +721,11 @@ nfsd_file_queue_for_close(struct inode *inode, struct list_head *dispose)
 
        rcu_read_lock();
        do {
-               int decrement = 1;
-
                nf = rhashtable_lookup(&nfsd_file_rhash_tbl, &key,
                                       nfsd_file_rhash_params);
                if (!nf)
                        break;
-
-               /* If we raced with someone else unhashing, ignore it */
-               if (!nfsd_file_unhash(nf))
-                       continue;
-
-               /* If we can't get a reference, ignore it */
-               if (!nfsd_file_get(nf))
-                       continue;
-
-               /* Extra decrement if we remove from the LRU */
-               if (nfsd_file_lru_remove(nf))
-                       ++decrement;
-
-               /* If refcount goes to 0, then put on the dispose list */
-               if (refcount_sub_and_test(decrement, &nf->nf_ref)) {
-                       list_add(&nf->nf_lru, dispose);
-                       trace_nfsd_file_closing(nf);
-               }
+               nfsd_file_cond_queue(nf, dispose);
        } while (1);
        rcu_read_unlock();
 }
@@ -928,11 +942,8 @@ __nfsd_file_cache_purge(struct net *net)
 
                nf = rhashtable_walk_next(&iter);
                while (!IS_ERR_OR_NULL(nf)) {
-                       if (!net || nf->nf_net == net) {
-                               nfsd_file_unhash(nf);
-                               nfsd_file_lru_remove(nf);
-                               list_add(&nf->nf_lru, &dispose);
-                       }
+                       if (!net || nf->nf_net == net)
+                               nfsd_file_cond_queue(nf, &dispose);
                        nf = rhashtable_walk_next(&iter);
                }
 
index 8c854ba..51a4b78 100644 (file)
@@ -195,7 +195,7 @@ struct nfsd_net {
 
        atomic_t                nfsd_courtesy_clients;
        struct shrinker         nfsd_client_shrinker;
-       struct delayed_work     nfsd_shrinker_work;
+       struct work_struct      nfsd_shrinker_work;
 };
 
 /* Simple check to find out if a given net was properly initialized */
index 9b81d01..f189ba7 100644 (file)
@@ -1318,6 +1318,7 @@ try_again:
                        /* allow 20secs for mount/unmount for now - revisit */
                        if (signal_pending(current) ||
                                        (schedule_timeout(20*HZ) == 0)) {
+                               finish_wait(&nn->nfsd_ssc_waitq, &wait);
                                kfree(work);
                                return nfserr_eagain;
                        }
index 4809ae0..4ef5293 100644 (file)
@@ -4411,7 +4411,7 @@ nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
        if (!count)
                count = atomic_long_read(&num_delegations);
        if (count)
-               mod_delayed_work(laundry_wq, &nn->nfsd_shrinker_work, 0);
+               queue_work(laundry_wq, &nn->nfsd_shrinker_work);
        return (unsigned long)count;
 }
 
@@ -4421,7 +4421,7 @@ nfsd4_state_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
        return SHRINK_STOP;
 }
 
-int
+void
 nfsd4_init_leases_net(struct nfsd_net *nn)
 {
        struct sysinfo si;
@@ -4443,16 +4443,6 @@ nfsd4_init_leases_net(struct nfsd_net *nn)
        nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB);
 
        atomic_set(&nn->nfsd_courtesy_clients, 0);
-       nn->nfsd_client_shrinker.scan_objects = nfsd4_state_shrinker_scan;
-       nn->nfsd_client_shrinker.count_objects = nfsd4_state_shrinker_count;
-       nn->nfsd_client_shrinker.seeks = DEFAULT_SEEKS;
-       return register_shrinker(&nn->nfsd_client_shrinker, "nfsd-client");
-}
-
-void
-nfsd4_leases_net_shutdown(struct nfsd_net *nn)
-{
-       unregister_shrinker(&nn->nfsd_client_shrinker);
 }
 
 static void init_nfs4_replay(struct nfs4_replay *rp)
@@ -6235,8 +6225,7 @@ deleg_reaper(struct nfsd_net *nn)
 static void
 nfsd4_state_shrinker_worker(struct work_struct *work)
 {
-       struct delayed_work *dwork = to_delayed_work(work);
-       struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
+       struct nfsd_net *nn = container_of(work, struct nfsd_net,
                                nfsd_shrinker_work);
 
        courtesy_client_reaper(nn);
@@ -8066,11 +8055,20 @@ static int nfs4_state_create_net(struct net *net)
        INIT_LIST_HEAD(&nn->blocked_locks_lru);
 
        INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
-       INIT_DELAYED_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker);
+       INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker);
        get_net(net);
 
+       nn->nfsd_client_shrinker.scan_objects = nfsd4_state_shrinker_scan;
+       nn->nfsd_client_shrinker.count_objects = nfsd4_state_shrinker_count;
+       nn->nfsd_client_shrinker.seeks = DEFAULT_SEEKS;
+
+       if (register_shrinker(&nn->nfsd_client_shrinker, "nfsd-client"))
+               goto err_shrinker;
        return 0;
 
+err_shrinker:
+       put_net(net);
+       kfree(nn->sessionid_hashtbl);
 err_sessionid:
        kfree(nn->unconf_id_hashtbl);
 err_unconf_id:
@@ -8163,6 +8161,8 @@ nfs4_state_shutdown_net(struct net *net)
        struct list_head *pos, *next, reaplist;
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
+       unregister_shrinker(&nn->nfsd_client_shrinker);
+       cancel_work(&nn->nfsd_shrinker_work);
        cancel_delayed_work_sync(&nn->laundromat_work);
        locks_end_grace(&nn->nfsd4_manager);
 
index d1e581a..c2577ee 100644 (file)
@@ -1457,9 +1457,7 @@ static __net_init int nfsd_init_net(struct net *net)
                goto out_idmap_error;
        nn->nfsd_versions = NULL;
        nn->nfsd4_minorversions = NULL;
-       retval = nfsd4_init_leases_net(nn);
-       if (retval)
-               goto out_drc_error;
+       nfsd4_init_leases_net(nn);
        retval = nfsd_reply_cache_init(nn);
        if (retval)
                goto out_cache_error;
@@ -1469,8 +1467,6 @@ static __net_init int nfsd_init_net(struct net *net)
        return 0;
 
 out_cache_error:
-       nfsd4_leases_net_shutdown(nn);
-out_drc_error:
        nfsd_idmap_shutdown(net);
 out_idmap_error:
        nfsd_export_shutdown(net);
@@ -1486,7 +1482,6 @@ static __net_exit void nfsd_exit_net(struct net *net)
        nfsd_idmap_shutdown(net);
        nfsd_export_shutdown(net);
        nfsd_netns_free_versions(net_generic(net, nfsd_net_id));
-       nfsd4_leases_net_shutdown(nn);
 }
 
 static struct pernet_operations nfsd_net_ops = {
index 93b42ef..fa0144a 100644 (file)
@@ -504,8 +504,7 @@ extern void unregister_cld_notifier(void);
 extern void nfsd4_ssc_init_umount_work(struct nfsd_net *nn);
 #endif
 
-extern int nfsd4_init_leases_net(struct nfsd_net *nn);
-extern void nfsd4_leases_net_shutdown(struct nfsd_net *nn);
+extern void nfsd4_init_leases_net(struct nfsd_net *nn);
 
 #else /* CONFIG_NFSD_V4 */
 static inline int nfsd4_is_junction(struct dentry *dentry)
@@ -513,8 +512,7 @@ static inline int nfsd4_is_junction(struct dentry *dentry)
        return 0;
 }
 
-static inline int nfsd4_init_leases_net(struct nfsd_net *nn) { return 0; };
-static inline void nfsd4_leases_net_shutdown(struct nfsd_net *nn) {};
+static inline void nfsd4_init_leases_net(struct nfsd_net *nn) { };
 
 #define register_cld_notifier() 0
 #define unregister_cld_notifier() do { } while(0)
index b9d15c3..40ce92a 100644 (file)
@@ -480,9 +480,18 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
        ret = nilfs_btnode_submit_block(btnc, ptr, 0, REQ_OP_READ, &bh,
                                        &submit_ptr);
        if (ret) {
-               if (ret != -EEXIST)
-                       return ret;
-               goto out_check;
+               if (likely(ret == -EEXIST))
+                       goto out_check;
+               if (ret == -ENOENT) {
+                       /*
+                        * Block address translation failed due to invalid
+                        * value of 'ptr'.  In this case, return internal code
+                        * -EINVAL (broken bmap) to notify bmap layer of fatal
+                        * metadata corruption.
+                        */
+                       ret = -EINVAL;
+               }
+               return ret;
        }
 
        if (ra) {
index 98ac37e..cc69484 100644 (file)
@@ -108,6 +108,21 @@ static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
        return ctx->features & UFFD_FEATURE_INITIALIZED;
 }
 
+static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
+                                    vm_flags_t flags)
+{
+       const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
+
+       vma->vm_flags = flags;
+       /*
+        * For shared mappings, we want to enable writenotify while
+        * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
+        * recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
+        */
+       if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
+               vma_set_page_prot(vma);
+}
+
 static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
                                     int wake_flags, void *key)
 {
@@ -618,7 +633,8 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
                for_each_vma(vmi, vma) {
                        if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
                                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
-                               vma->vm_flags &= ~__VM_UFFD_FLAGS;
+                               userfaultfd_set_vm_flags(vma,
+                                                        vma->vm_flags & ~__VM_UFFD_FLAGS);
                        }
                }
                mmap_write_unlock(mm);
@@ -652,7 +668,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
        octx = vma->vm_userfaultfd_ctx.ctx;
        if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
-               vma->vm_flags &= ~__VM_UFFD_FLAGS;
+               userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
                return 0;
        }
 
@@ -733,7 +749,7 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
        } else {
                /* Drop uffd context if remap feature not enabled */
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
-               vma->vm_flags &= ~__VM_UFFD_FLAGS;
+               userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
        }
 }
 
@@ -895,7 +911,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
                        prev = vma;
                }
 
-               vma->vm_flags = new_flags;
+               userfaultfd_set_vm_flags(vma, new_flags);
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
        }
        mmap_write_unlock(mm);
@@ -1463,7 +1479,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
                 * the next vma was merged into the current one and
                 * the current one has not been updated yet.
                 */
-               vma->vm_flags = new_flags;
+               userfaultfd_set_vm_flags(vma, new_flags);
                vma->vm_userfaultfd_ctx.ctx = ctx;
 
                if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
@@ -1651,7 +1667,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
                 * the next vma was merged into the current one and
                 * the current one has not been updated yet.
                 */
-               vma->vm_flags = new_flags;
+               userfaultfd_set_vm_flags(vma, new_flags);
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
 
        skip:
index 2c53fbb..a9c5c3f 100644 (file)
@@ -442,6 +442,10 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
                        data_size = zonefs_check_zone_condition(inode, zone,
                                                                false, false);
                }
+       } else if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO &&
+                  data_size > isize) {
+               /* Do not expose garbage data */
+               data_size = isize;
        }
 
        /*
@@ -805,6 +809,24 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
 
        ret = submit_bio_wait(bio);
 
+       /*
+        * If the file zone was written underneath the file system, the zone
+        * write pointer may not be where we expect it to be, but the zone
+        * append write can still succeed. So check manually that we wrote where
+        * we intended to, that is, at zi->i_wpoffset.
+        */
+       if (!ret) {
+               sector_t wpsector =
+                       zi->i_zsector + (zi->i_wpoffset >> SECTOR_SHIFT);
+
+               if (bio->bi_iter.bi_sector != wpsector) {
+                       zonefs_warn(inode->i_sb,
+                               "Corrupted write pointer %llu for zone at %llu\n",
+                               wpsector, zi->i_zsector);
+                       ret = -EIO;
+               }
+       }
+
        zonefs_file_write_dio_end_io(iocb, size, ret, 0);
        trace_zonefs_file_dio_append(inode, size, ret);
 
index cd3b75e..e44be31 100644 (file)
@@ -230,7 +230,8 @@ struct acpi_pnp_type {
        u32 hardware_id:1;
        u32 bus_address:1;
        u32 platform_id:1;
-       u32 reserved:29;
+       u32 backlight:1;
+       u32 reserved:28;
 };
 
 struct acpi_device_pnp {
index 9ec8129..bd55605 100644 (file)
@@ -105,14 +105,14 @@ int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
  * Dumping its extra ELF program headers includes all the other information
  * a debugger needs to easily find how the gate DSO was being used.
  */
-extern Elf_Half elf_core_extra_phdrs(void);
+extern Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm);
 extern int
 elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
 extern int
 elf_core_write_extra_data(struct coredump_params *cprm);
-extern size_t elf_core_extra_data_size(void);
+extern size_t elf_core_extra_data_size(struct coredump_params *cprm);
 #else
-static inline Elf_Half elf_core_extra_phdrs(void)
+static inline Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
 {
        return 0;
 }
@@ -127,7 +127,7 @@ static inline int elf_core_write_extra_data(struct coredump_params *cprm)
        return 1;
 }
 
-static inline size_t elf_core_extra_data_size(void)
+static inline size_t elf_core_extra_data_size(struct coredump_params *cprm)
 {
        return 0;
 }
index b986e26..b09f443 100644 (file)
@@ -545,8 +545,8 @@ int zynqmp_pm_request_wake(const u32 node,
                           const u64 address,
                           const enum zynqmp_pm_request_ack ack);
 int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode);
-int zynqmp_pm_set_rpu_mode(u32 node_id, u32 arg1);
-int zynqmp_pm_set_tcm_config(u32 node_id, u32 arg1);
+int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode);
+int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode);
 int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value);
 int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
                             u32 value);
@@ -845,12 +845,12 @@ static inline int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mo
        return -ENODEV;
 }
 
-static inline int zynqmp_pm_set_rpu_mode(u32 node_id, u32 arg1)
+static inline int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode)
 {
        return -ENODEV;
 }
 
-static inline int zynqmp_pm_set_tcm_config(u32 node_id, u32 arg1)
+static inline int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode)
 {
        return -ENODEV;
 }
index f3f196e..8f85716 100644 (file)
@@ -1270,10 +1270,10 @@ static inline void folio_put_refs(struct folio *folio, int refs)
                __folio_put(folio);
 }
 
-/**
- * release_pages - release an array of pages or folios
+/*
+ * union release_pages_arg - an array of pages or folios
  *
- * This just releases a simple array of multiple pages, and
+ * release_pages() releases a simple array of multiple pages, and
  * accepts various different forms of said page array: either
  * a regular old boring array of pages, an array of folios, or
  * an array of encoded page pointers.
index e8ed225..ff3f3f2 100644 (file)
@@ -413,8 +413,7 @@ static inline void free_anon_vma_name(struct vm_area_struct *vma)
         * Not using anon_vma_name because it generates a warning if mmap_lock
         * is not held, which might be the case here.
         */
-       if (!vma->vm_file)
-               anon_vma_name_put(vma->anon_name);
+       anon_vma_name_put(vma->anon_name);
 }
 
 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
index 3b84750..9757067 100644 (file)
@@ -581,7 +581,7 @@ struct vm_area_struct {
        /*
         * For private and shared anonymous mappings, a pointer to a null
         * terminated string containing the name given to the vma, or NULL if
-        * unnamed. Serialized by mmap_sem. Use anon_vma_name to access.
+        * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
         */
        struct anon_vma_name *anon_name;
 #endif
index 2e677e6..d7c2d33 100644 (file)
@@ -301,7 +301,7 @@ static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
  *
  * You can also use this function if you're holding a lock that prevents
  * pages being frozen & removed; eg the i_pages lock for the page cache
- * or the mmap_sem or page table lock for page tables.  In this case,
+ * or the mmap_lock or page table lock for page tables.  In this case,
  * it will always succeed, and you could have used a plain folio_get(),
  * but it's sometimes more convenient to have a common function called
  * from both locked and RCU-protected contexts.
index 632320e..a48bb52 100644 (file)
@@ -32,7 +32,8 @@ enum simatic_ipc_station_ids {
        SIMATIC_IPC_IPC477E = 0x00000A02,
        SIMATIC_IPC_IPC127E = 0x00000D01,
        SIMATIC_IPC_IPC227G = 0x00000F01,
-       SIMATIC_IPC_IPC427G = 0x00001001,
+       SIMATIC_IPC_IPCBX_39A = 0x00001001,
+       SIMATIC_IPC_IPCPX_39A = 0x00001002,
 };
 
 static inline u32 simatic_ipc_get_station_id(u8 *data, int max_len)
index f7f1272..9a60f45 100644 (file)
@@ -5,7 +5,7 @@
 #ifndef __ASSEMBLER__
 #include <linux/types.h>
 
-#ifdef CONFIG_ARCH_OMAP1_ANY
+#ifdef CONFIG_ARCH_OMAP1
 /*
  * NOTE: Please use ioremap + __raw_read/write where possible instead of these
  */
@@ -15,7 +15,7 @@ extern u32 omap_readl(u32 pa);
 extern void omap_writeb(u8 v, u32 pa);
 extern void omap_writew(u16 v, u32 pa);
 extern void omap_writel(u32 v, u32 pa);
-#else
+#elif defined(CONFIG_COMPILE_TEST)
 static inline u8 omap_readb(u32 pa)  { return 0; }
 static inline u16 omap_readw(u32 pa) { return 0; }
 static inline u32 omap_readl(u32 pa) { return 0; }
index 20c0ff5..7d68a5c 100644 (file)
@@ -198,8 +198,8 @@ static __always_inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *ev
         * The loop below will unmap these fields if the log is larger than
         * one page, so save them here for reference:
         */
-       count = READ_ONCE(event->count);
-       event_type = READ_ONCE(event->event_type);
+       count = event->count;
+       event_type = event->event_type;
 
        /* Verify that it's the log header */
        if (event_header->pcr_idx != 0 ||
index 7d5325d..86d1c8e 100644 (file)
@@ -267,16 +267,15 @@ static inline void *usb_get_intfdata(struct usb_interface *intf)
 }
 
 /**
- * usb_set_intfdata() - associate driver-specific data with the interface
- * @intf: the usb interface
- * @data: pointer to the device priv structure or %NULL
+ * usb_set_intfdata() - associate driver-specific data with an interface
+ * @intf: USB interface
+ * @data: driver data
  *
- * Drivers should use this function in their probe() to associate their
- * driver-specific data with the usb interface.
+ * Drivers can use this function in their probe() callbacks to associate
+ * driver-specific data with an interface.
  *
- * When disconnecting, the core will take care of setting @intf back to %NULL,
- * so no actions are needed on the driver side. The interface should not be set
- * to %NULL before all actions completed (e.g. no outsanding URB remaining).
+ * Note that there is generally no need to clear the driver-data pointer even
+ * if some drivers do so for historical or implementation-specific reasons.
  */
 static inline void usb_set_intfdata(struct usb_interface *intf, void *data)
 {
@@ -774,11 +773,14 @@ extern struct device *usb_intf_get_dma_device(struct usb_interface *intf);
 extern int usb_acpi_set_power_state(struct usb_device *hdev, int index,
        bool enable);
 extern bool usb_acpi_power_manageable(struct usb_device *hdev, int index);
+extern int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index);
 #else
 static inline int usb_acpi_set_power_state(struct usb_device *hdev, int index,
        bool enable) { return 0; }
 static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
        { return true; }
+static inline int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index)
+       { return 0; }
 #endif
 
 /* USB autosuspend and autoresume */
index b3ba046..56189e4 100644 (file)
@@ -336,9 +336,12 @@ struct gdma_queue_spec {
        };
 };
 
+#define MANA_IRQ_NAME_SZ 32
+
 struct gdma_irq_context {
        void (*handler)(void *arg);
        void *arg;
+       char name[MANA_IRQ_NAME_SZ];
 };
 
 struct gdma_context {
index 695eebc..e39fb07 100644 (file)
@@ -422,6 +422,8 @@ extern int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost,
 extern struct iscsi_cls_session *
 iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
                    uint16_t, int, int, uint32_t, unsigned int);
+void iscsi_session_remove(struct iscsi_cls_session *cls_session);
+void iscsi_session_free(struct iscsi_cls_session *cls_session);
 extern void iscsi_session_teardown(struct iscsi_cls_session *);
 extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
 extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
index ab95559..73cac8d 100644 (file)
@@ -170,7 +170,7 @@ struct rpi_firmware_clk_rate_request {
 
 #define RPI_FIRMWARE_CLK_RATE_REQUEST(_id)     \
        {                                       \
-               .id = _id,                      \
+               .id = cpu_to_le32(_id),         \
        }
 
 #if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
index c742469..2d6f80d 100644 (file)
@@ -15,8 +15,7 @@ enum sctp_conntrack {
        SCTP_CONNTRACK_SHUTDOWN_RECD,
        SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
        SCTP_CONNTRACK_HEARTBEAT_SENT,
-       SCTP_CONNTRACK_HEARTBEAT_ACKED,
-       SCTP_CONNTRACK_DATA_SENT,
+       SCTP_CONNTRACK_HEARTBEAT_ACKED, /* no longer used */
        SCTP_CONNTRACK_MAX
 };
 
index 94e7403..aa805e6 100644 (file)
@@ -94,8 +94,7 @@ enum ctattr_timeout_sctp {
        CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
        CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
        CTA_TIMEOUT_SCTP_HEARTBEAT_SENT,
-       CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED,
-       CTA_TIMEOUT_SCTP_DATA_SENT,
+       CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED, /* no longer used */
        __CTA_TIMEOUT_SCTP_MAX
 };
 #define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
index 3511095..42a40ad 100644 (file)
@@ -58,7 +58,7 @@
 
 #define PSCI_1_1_FN_SYSTEM_RESET2              PSCI_0_2_FN(18)
 #define PSCI_1_1_FN_MEM_PROTECT                        PSCI_0_2_FN(19)
-#define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE    PSCI_0_2_FN(19)
+#define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE    PSCI_0_2_FN(20)
 
 #define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND      PSCI_0_2_FN64(12)
 #define PSCI_1_0_FN64_NODE_HW_STATE            PSCI_0_2_FN64(13)
@@ -67,7 +67,7 @@
 #define PSCI_1_0_FN64_STAT_COUNT               PSCI_0_2_FN64(17)
 
 #define PSCI_1_1_FN64_SYSTEM_RESET2            PSCI_0_2_FN64(18)
-#define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE  PSCI_0_2_FN64(19)
+#define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE  PSCI_0_2_FN64(20)
 
 /* PSCI v0.2 power state encoding for CPU_SUSPEND function */
 #define PSCI_0_2_POWER_STATE_ID_MASK           0xffff
index 5cf81df..727084c 100644 (file)
@@ -808,6 +808,7 @@ struct ufs_hba_monitor {
  * @urgent_bkops_lvl: keeps track of urgent bkops level for device
  * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
  *  device is known or not.
+ * @wb_mutex: used to serialize devfreq and sysfs write booster toggling
  * @clk_scaling_lock: used to serialize device commands and clock scaling
  * @desc_size: descriptor sizes reported by device
  * @scsi_block_reqs_cnt: reference counting for scsi block requests
@@ -951,6 +952,7 @@ struct ufs_hba {
        enum bkops_status urgent_bkops_lvl;
        bool is_urgent_bkops_lvl_checked;
 
+       struct mutex wb_mutex;
        struct rw_semaphore clk_scaling_lock;
        unsigned char desc_size[QUERY_DESC_IDN_MAX];
        atomic_t scsi_block_reqs_cnt;
index 0958846..44e90b2 100644 (file)
@@ -204,7 +204,7 @@ config LOCALVERSION_AUTO
          appended after any matching localversion* files, and after the value
          set in CONFIG_LOCALVERSION.
 
-         (The actual string used here is the first eight characters produced
+         (The actual string used here is the first 12 characters produced
          by running the command:
 
            $ git rev-parse --verify HEAD
@@ -776,7 +776,7 @@ config PRINTK_SAFE_LOG_BUF_SHIFT
        depends on PRINTK
        help
          Select the size of an alternate printk per-CPU buffer where messages
-         printed from usafe contexts are temporary stored. One example would
+         printed from unsafe contexts are temporary stored. One example would
          be NMI messages, another one - printk recursion. The messages are
          copied to the main log buffer in a safe context to avoid a deadlock.
          The value defines the size as a power of 2.
index 8316c23..26de459 100644 (file)
@@ -59,3 +59,4 @@ include/generated/utsversion.h: FORCE
 
 $(obj)/version-timestamp.o: include/generated/utsversion.h
 CFLAGS_version-timestamp.o := -include include/generated/utsversion.h
+KASAN_SANITIZE_version-timestamp.o := n
index 179e93b..043cbf8 100644 (file)
@@ -2,7 +2,6 @@
 
 #include <generated/compile.h>
 #include <generated/utsrelease.h>
-#include <linux/version.h>
 #include <linux/proc_ns.h>
 #include <linux/refcount.h>
 #include <linux/uts.h>
index 2e04850..882bd56 100644 (file)
@@ -170,12 +170,11 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
                xa_for_each(&ctx->personalities, index, cred)
                        io_uring_show_cred(m, index, cred);
        }
-       if (has_lock)
-               mutex_unlock(&ctx->uring_lock);
 
        seq_puts(m, "PollList:\n");
        for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) {
                struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
+               struct io_hash_bucket *hbl = &ctx->cancel_table_locked.hbs[i];
                struct io_kiocb *req;
 
                spin_lock(&hb->lock);
@@ -183,8 +182,17 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
                        seq_printf(m, "  op=%d, task_works=%d\n", req->opcode,
                                        task_work_pending(req->task));
                spin_unlock(&hb->lock);
+
+               if (!has_lock)
+                       continue;
+               hlist_for_each_entry(req, &hbl->list, hash_node)
+                       seq_printf(m, "  op=%d, task_works=%d\n", req->opcode,
+                                       task_work_pending(req->task));
        }
 
+       if (has_lock)
+               mutex_unlock(&ctx->uring_lock);
+
        seq_puts(m, "CqOverflowList:\n");
        spin_lock(&ctx->completion_lock);
        list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {
index 992dcd9..411bb2d 100644 (file)
@@ -1230,7 +1230,12 @@ static void io_wq_cancel_tw_create(struct io_wq *wq)
 
                worker = container_of(cb, struct io_worker, create_work);
                io_worker_cancel_cb(worker);
-               kfree(worker);
+               /*
+                * Only the worker continuation helper has worker allocated and
+                * hence needs freeing.
+                */
+               if (cb->func == create_worker_cont)
+                       kfree(worker);
        }
 }
 
index 2ac1cd8..0a4efad 100644 (file)
@@ -3674,7 +3674,7 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
 
        if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
            && !(ctx->flags & IORING_SETUP_R_DISABLED))
-               ctx->submitter_task = get_task_struct(current);
+               WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
 
        file = io_uring_get_file(ctx);
        if (IS_ERR(file)) {
@@ -3868,7 +3868,7 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
                return -EBADFD;
 
        if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task)
-               ctx->submitter_task = get_task_struct(current);
+               WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
 
        if (ctx->restrictions.registered)
                ctx->restricted = 1;
index 2d3cd94..15602a1 100644 (file)
@@ -25,6 +25,28 @@ struct io_msg {
        u32 flags;
 };
 
+static void io_double_unlock_ctx(struct io_ring_ctx *octx)
+{
+       mutex_unlock(&octx->uring_lock);
+}
+
+static int io_double_lock_ctx(struct io_ring_ctx *octx,
+                             unsigned int issue_flags)
+{
+       /*
+        * To ensure proper ordering between the two ctxs, we can only
+        * attempt a trylock on the target. If that fails and we already have
+        * the source ctx lock, punt to io-wq.
+        */
+       if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+               if (!mutex_trylock(&octx->uring_lock))
+                       return -EAGAIN;
+               return 0;
+       }
+       mutex_lock(&octx->uring_lock);
+       return 0;
+}
+
 void io_msg_ring_cleanup(struct io_kiocb *req)
 {
        struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
@@ -36,6 +58,29 @@ void io_msg_ring_cleanup(struct io_kiocb *req)
        msg->src_file = NULL;
 }
 
+static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx)
+{
+       if (!target_ctx->task_complete)
+               return false;
+       return current != target_ctx->submitter_task;
+}
+
+static int io_msg_exec_remote(struct io_kiocb *req, task_work_func_t func)
+{
+       struct io_ring_ctx *ctx = req->file->private_data;
+       struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+       struct task_struct *task = READ_ONCE(ctx->submitter_task);
+
+       if (unlikely(!task))
+               return -EOWNERDEAD;
+
+       init_task_work(&msg->tw, func);
+       if (task_work_add(ctx->submitter_task, &msg->tw, TWA_SIGNAL))
+               return -EOWNERDEAD;
+
+       return IOU_ISSUE_SKIP_COMPLETE;
+}
+
 static void io_msg_tw_complete(struct callback_head *head)
 {
        struct io_msg *msg = container_of(head, struct io_msg, tw);
@@ -43,61 +88,54 @@ static void io_msg_tw_complete(struct callback_head *head)
        struct io_ring_ctx *target_ctx = req->file->private_data;
        int ret = 0;
 
-       if (current->flags & PF_EXITING)
+       if (current->flags & PF_EXITING) {
                ret = -EOWNERDEAD;
-       else if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
-               ret = -EOVERFLOW;
+       } else {
+               /*
+                * If the target ring is using IOPOLL mode, then we need to be
+                * holding the uring_lock for posting completions. Other ring
+                * types rely on the regular completion locking, which is
+                * handled while posting.
+                */
+               if (target_ctx->flags & IORING_SETUP_IOPOLL)
+                       mutex_lock(&target_ctx->uring_lock);
+               if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+                       ret = -EOVERFLOW;
+               if (target_ctx->flags & IORING_SETUP_IOPOLL)
+                       mutex_unlock(&target_ctx->uring_lock);
+       }
 
        if (ret < 0)
                req_set_fail(req);
        io_req_queue_tw_complete(req, ret);
 }
 
-static int io_msg_ring_data(struct io_kiocb *req)
+static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_ring_ctx *target_ctx = req->file->private_data;
        struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+       int ret;
 
        if (msg->src_fd || msg->dst_fd || msg->flags)
                return -EINVAL;
+       if (target_ctx->flags & IORING_SETUP_R_DISABLED)
+               return -EBADFD;
 
-       if (target_ctx->task_complete && current != target_ctx->submitter_task) {
-               init_task_work(&msg->tw, io_msg_tw_complete);
-               if (task_work_add(target_ctx->submitter_task, &msg->tw,
-                                 TWA_SIGNAL_NO_IPI))
-                       return -EOWNERDEAD;
-
-               atomic_or(IORING_SQ_TASKRUN, &target_ctx->rings->sq_flags);
-               return IOU_ISSUE_SKIP_COMPLETE;
-       }
-
-       if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
-               return 0;
+       if (io_msg_need_remote(target_ctx))
+               return io_msg_exec_remote(req, io_msg_tw_complete);
 
-       return -EOVERFLOW;
-}
-
-static void io_double_unlock_ctx(struct io_ring_ctx *octx,
-                                unsigned int issue_flags)
-{
-       mutex_unlock(&octx->uring_lock);
-}
-
-static int io_double_lock_ctx(struct io_ring_ctx *octx,
-                             unsigned int issue_flags)
-{
-       /*
-        * To ensure proper ordering between the two ctxs, we can only
-        * attempt a trylock on the target. If that fails and we already have
-        * the source ctx lock, punt to io-wq.
-        */
-       if (!(issue_flags & IO_URING_F_UNLOCKED)) {
-               if (!mutex_trylock(&octx->uring_lock))
+       ret = -EOVERFLOW;
+       if (target_ctx->flags & IORING_SETUP_IOPOLL) {
+               if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
                        return -EAGAIN;
-               return 0;
+               if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+                       ret = 0;
+               io_double_unlock_ctx(target_ctx);
+       } else {
+               if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+                       ret = 0;
        }
-       mutex_lock(&octx->uring_lock);
-       return 0;
+       return ret;
 }
 
 static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
@@ -148,7 +186,7 @@ static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flag
        if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
                ret = -EOVERFLOW;
 out_unlock:
-       io_double_unlock_ctx(target_ctx, issue_flags);
+       io_double_unlock_ctx(target_ctx);
        return ret;
 }
 
@@ -174,6 +212,8 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
 
        if (target_ctx == ctx)
                return -EINVAL;
+       if (target_ctx->flags & IORING_SETUP_R_DISABLED)
+               return -EBADFD;
        if (!src_file) {
                src_file = io_msg_grab_file(req, issue_flags);
                if (!src_file)
@@ -182,14 +222,8 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
                req->flags |= REQ_F_NEED_CLEANUP;
        }
 
-       if (target_ctx->task_complete && current != target_ctx->submitter_task) {
-               init_task_work(&msg->tw, io_msg_tw_fd_complete);
-               if (task_work_add(target_ctx->submitter_task, &msg->tw,
-                                 TWA_SIGNAL))
-                       return -EOWNERDEAD;
-
-               return IOU_ISSUE_SKIP_COMPLETE;
-       }
+       if (io_msg_need_remote(target_ctx))
+               return io_msg_exec_remote(req, io_msg_tw_fd_complete);
        return io_msg_install_complete(req, issue_flags);
 }
 
@@ -224,7 +258,7 @@ int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
 
        switch (msg->cmd) {
        case IORING_MSG_DATA:
-               ret = io_msg_ring_data(req);
+               ret = io_msg_ring_data(req, issue_flags);
                break;
        case IORING_MSG_SEND_FD:
                ret = io_msg_send_fd(req, issue_flags);
index ee7da61..2ac1366 100644 (file)
@@ -223,21 +223,22 @@ enum {
        IOU_POLL_DONE = 0,
        IOU_POLL_NO_ACTION = 1,
        IOU_POLL_REMOVE_POLL_USE_RES = 2,
+       IOU_POLL_REISSUE = 3,
 };
 
 /*
  * All poll tw should go through this. Checks for poll events, manages
  * references, does rewait, etc.
  *
- * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action require,
- * which is either spurious wakeup or multishot CQE is served.
- * IOU_POLL_DONE when it's done with the request, then the mask is stored in req->cqe.res.
- * IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot poll and that the result
- * is stored in req->cqe.
+ * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
+ * require, which is either spurious wakeup or multishot CQE is served.
+ * IOU_POLL_DONE when it's done with the request, then the mask is stored in
+ * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
+ * poll and that the result is stored in req->cqe.
  */
 static int io_poll_check_events(struct io_kiocb *req, bool *locked)
 {
-       int v, ret;
+       int v;
 
        /* req->task == current here, checking PF_EXITING is safe */
        if (unlikely(req->task->flags & PF_EXITING))
@@ -276,10 +277,19 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
                if (!req->cqe.res) {
                        struct poll_table_struct pt = { ._key = req->apoll_events };
                        req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
+                       /*
+                        * We got woken with a mask, but someone else got to
+                        * it first. The above vfs_poll() doesn't add us back
+                        * to the waitqueue, so if we get nothing back, we
+                        * should be safe and attempt a reissue.
+                        */
+                       if (unlikely(!req->cqe.res)) {
+                               /* Multishot armed need not reissue */
+                               if (!(req->apoll_events & EPOLLONESHOT))
+                                       continue;
+                               return IOU_POLL_REISSUE;
+                       }
                }
-
-               if ((unlikely(!req->cqe.res)))
-                       continue;
                if (req->apoll_events & EPOLLONESHOT)
                        return IOU_POLL_DONE;
 
@@ -294,7 +304,7 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
                                return IOU_POLL_REMOVE_POLL_USE_RES;
                        }
                } else {
-                       ret = io_poll_issue(req, locked);
+                       int ret = io_poll_issue(req, locked);
                        if (ret == IOU_STOP_MULTISHOT)
                                return IOU_POLL_REMOVE_POLL_USE_RES;
                        if (ret < 0)
@@ -330,6 +340,9 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
 
                        poll = io_kiocb_to_cmd(req, struct io_poll);
                        req->cqe.res = mangle_poll(req->cqe.res & poll->events);
+               } else if (ret == IOU_POLL_REISSUE) {
+                       io_req_task_submit(req, locked);
+                       return;
                } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
                        req->cqe.res = ret;
                        req_set_fail(req);
@@ -342,7 +355,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
 
                if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
                        io_req_task_complete(req, locked);
-               else if (ret == IOU_POLL_DONE)
+               else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
                        io_req_task_submit(req, locked);
                else
                        io_req_defer_failed(req, ret);
@@ -533,6 +546,14 @@ static bool io_poll_can_finish_inline(struct io_kiocb *req,
        return pt->owning || io_poll_get_ownership(req);
 }
 
+static void io_poll_add_hash(struct io_kiocb *req)
+{
+       if (req->flags & REQ_F_HASH_LOCKED)
+               io_poll_req_insert_locked(req);
+       else
+               io_poll_req_insert(req);
+}
+
 /*
  * Returns 0 when it's handed over for polling. The caller owns the requests if
  * it returns non-zero, but otherwise should not touch it. Negative values
@@ -591,18 +612,17 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
 
        if (mask &&
           ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
-               if (!io_poll_can_finish_inline(req, ipt))
+               if (!io_poll_can_finish_inline(req, ipt)) {
+                       io_poll_add_hash(req);
                        return 0;
+               }
                io_poll_remove_entries(req);
                ipt->result_mask = mask;
                /* no one else has access to the req, forget about the ref */
                return 1;
        }
 
-       if (req->flags & REQ_F_HASH_LOCKED)
-               io_poll_req_insert_locked(req);
-       else
-               io_poll_req_insert(req);
+       io_poll_add_hash(req);
 
        if (mask && (poll->events & EPOLLET) &&
            io_poll_can_finish_inline(req, ipt)) {
index 8227af2..9c3ddd4 100644 (file)
@@ -1062,7 +1062,11 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
                        continue;
 
                req->cqe.flags = io_put_kbuf(req, 0);
-               io_fill_cqe_req(req->ctx, req);
+               if (unlikely(!__io_fill_cqe_req(ctx, req))) {
+                       spin_lock(&ctx->completion_lock);
+                       io_req_cqe_overflow(req);
+                       spin_unlock(&ctx->completion_lock);
+               }
        }
 
        if (unlikely(!nr_events))
index 473036b..81b97f0 100755 (executable)
@@ -14,6 +14,8 @@ include/
 arch/$SRCARCH/include/
 "
 
+type cpio > /dev/null
+
 # Support incremental builds by skipping archive generation
 # if timestamps of files being archived are not changed.
 
index f35d9cc..bfbc12d 100644 (file)
@@ -157,14 +157,11 @@ static void test_kallsyms_compression_ratio(void)
 static int lookup_name(void *data, const char *name, struct module *mod, unsigned long addr)
 {
        u64 t0, t1, t;
-       unsigned long flags;
        struct test_stat *stat = (struct test_stat *)data;
 
-       local_irq_save(flags);
-       t0 = sched_clock();
+       t0 = ktime_get_ns();
        (void)kallsyms_lookup_name(name);
-       t1 = sched_clock();
-       local_irq_restore(flags);
+       t1 = ktime_get_ns();
 
        t = t1 - t0;
        if (t < stat->min)
@@ -234,18 +231,15 @@ static int find_symbol(void *data, const char *name, struct module *mod, unsigne
 static void test_perf_kallsyms_on_each_symbol(void)
 {
        u64 t0, t1;
-       unsigned long flags;
        struct test_stat stat;
 
        memset(&stat, 0, sizeof(stat));
        stat.max = INT_MAX;
        stat.name = stub_name;
        stat.perf = 1;
-       local_irq_save(flags);
-       t0 = sched_clock();
+       t0 = ktime_get_ns();
        kallsyms_on_each_symbol(find_symbol, &stat);
-       t1 = sched_clock();
-       local_irq_restore(flags);
+       t1 = ktime_get_ns();
        pr_info("kallsyms_on_each_symbol() traverse all: %lld ns\n", t1 - t0);
 }
 
@@ -270,17 +264,14 @@ static int match_symbol(void *data, unsigned long addr)
 static void test_perf_kallsyms_on_each_match_symbol(void)
 {
        u64 t0, t1;
-       unsigned long flags;
        struct test_stat stat;
 
        memset(&stat, 0, sizeof(stat));
        stat.max = INT_MAX;
        stat.name = stub_name;
-       local_irq_save(flags);
-       t0 = sched_clock();
+       t0 = ktime_get_ns();
        kallsyms_on_each_match_symbol(match_symbol, stat.name, &stat);
-       t1 = sched_clock();
-       local_irq_restore(flags);
+       t1 = ktime_get_ns();
        pr_info("kallsyms_on_each_match_symbol() traverse all: %lld ns\n", t1 - t0);
 }
 
index 48568a0..4ac3fe4 100644 (file)
@@ -2393,7 +2393,8 @@ static bool finished_loading(const char *name)
        sched_annotate_sleep();
        mutex_lock(&module_mutex);
        mod = find_module_all(name, strlen(name), true);
-       ret = !mod || mod->state == MODULE_STATE_LIVE;
+       ret = !mod || mod->state == MODULE_STATE_LIVE
+               || mod->state == MODULE_STATE_GOING;
        mutex_unlock(&module_mutex);
 
        return ret;
@@ -2569,20 +2570,35 @@ static int add_unformed_module(struct module *mod)
 
        mod->state = MODULE_STATE_UNFORMED;
 
-again:
        mutex_lock(&module_mutex);
        old = find_module_all(mod->name, strlen(mod->name), true);
        if (old != NULL) {
-               if (old->state != MODULE_STATE_LIVE) {
+               if (old->state == MODULE_STATE_COMING
+                   || old->state == MODULE_STATE_UNFORMED) {
                        /* Wait in case it fails to load. */
                        mutex_unlock(&module_mutex);
                        err = wait_event_interruptible(module_wq,
                                               finished_loading(mod->name));
                        if (err)
                                goto out_unlocked;
-                       goto again;
+
+                       /* The module might have gone in the meantime. */
+                       mutex_lock(&module_mutex);
+                       old = find_module_all(mod->name, strlen(mod->name),
+                                             true);
                }
-               err = -EEXIST;
+
+               /*
+                * We are here only when the same module was being loaded. Do
+                * not try to load it again right now. It prevents long delays
+                * caused by serialized module load failures. It might happen
+                * when more devices of the same type trigger load of
+                * a particular module.
+                */
+               if (old && old->state == MODULE_STATE_LIVE)
+                       err = -EEXIST;
+               else
+                       err = -EBUSY;
                goto out;
        }
        mod_update_bounds(mod);
index 7decf1e..a5ed2e5 100644 (file)
@@ -123,6 +123,7 @@ bool console_srcu_read_lock_is_held(void)
 {
        return srcu_read_lock_held(&console_srcu);
 }
+EXPORT_SYMBOL(console_srcu_read_lock_is_held);
 #endif
 
 enum devkmsg_log_bits {
@@ -1891,6 +1892,7 @@ static void console_lock_spinning_enable(void)
 /**
  * console_lock_spinning_disable_and_check - mark end of code where another
  *     thread was able to busy wait and check if there is a waiter
+ * @cookie: cookie returned from console_srcu_read_lock()
  *
  * This is called at the end of the section where spinning is allowed.
  * It has two functions. First, it is a signal that it is no longer
index bb1ee6d..e838feb 100644 (file)
@@ -8290,12 +8290,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
        if (retval)
                goto out_put_task;
 
+       /*
+        * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
+        * alloc_user_cpus_ptr() returns NULL.
+        */
        user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
-       if (IS_ENABLED(CONFIG_SMP) && !user_mask) {
+       if (user_mask) {
+               cpumask_copy(user_mask, in_mask);
+       } else if (IS_ENABLED(CONFIG_SMP)) {
                retval = -ENOMEM;
                goto out_put_task;
        }
-       cpumask_copy(user_mask, in_mask);
+
        ac = (struct affinity_context){
                .new_mask  = in_mask,
                .user_mask = user_mask,
index c36aa54..0f87369 100644 (file)
@@ -7229,10 +7229,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
        eenv_task_busy_time(&eenv, p, prev_cpu);
 
        for (; pd; pd = pd->next) {
+               unsigned long util_min = p_util_min, util_max = p_util_max;
                unsigned long cpu_cap, cpu_thermal_cap, util;
                unsigned long cur_delta, max_spare_cap = 0;
                unsigned long rq_util_min, rq_util_max;
-               unsigned long util_min, util_max;
                unsigned long prev_spare_cap = 0;
                int max_spare_cap_cpu = -1;
                unsigned long base_energy;
@@ -7251,6 +7251,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
                eenv.pd_cap = 0;
 
                for_each_cpu(cpu, cpus) {
+                       struct rq *rq = cpu_rq(cpu);
+
                        eenv.pd_cap += cpu_thermal_cap;
 
                        if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
@@ -7269,24 +7271,19 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
                         * much capacity we can get out of the CPU; this is
                         * aligned with sched_cpu_util().
                         */
-                       if (uclamp_is_used()) {
-                               if (uclamp_rq_is_idle(cpu_rq(cpu))) {
-                                       util_min = p_util_min;
-                                       util_max = p_util_max;
-                               } else {
-                                       /*
-                                        * Open code uclamp_rq_util_with() except for
-                                        * the clamp() part. Ie: apply max aggregation
-                                        * only. util_fits_cpu() logic requires to
-                                        * operate on non clamped util but must use the
-                                        * max-aggregated uclamp_{min, max}.
-                                        */
-                                       rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
-                                       rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
-
-                                       util_min = max(rq_util_min, p_util_min);
-                                       util_max = max(rq_util_max, p_util_max);
-                               }
+                       if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) {
+                               /*
+                                * Open code uclamp_rq_util_with() except for
+                                * the clamp() part. Ie: apply max aggregation
+                                * only. util_fits_cpu() logic requires to
+                                * operate on non clamped util but must use the
+                                * max-aggregated uclamp_{min, max}.
+                                */
+                               rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN);
+                               rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX);
+
+                               util_min = max(rq_util_min, p_util_min);
+                               util_max = max(rq_util_max, p_util_max);
                        }
                        if (!util_fits_cpu(util, util_min, util_max, cpu))
                                continue;
@@ -8871,16 +8868,23 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
         *   * Thermal pressure will impact all cpus in this perf domain
         *     equally.
         */
-       if (static_branch_unlikely(&sched_asym_cpucapacity)) {
+       if (sched_energy_enabled()) {
                unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
-               struct perf_domain *pd = rcu_dereference(rq->rd->pd);
+               struct perf_domain *pd;
 
+               rcu_read_lock();
+
+               pd = rcu_dereference(rq->rd->pd);
                rq->cpu_capacity_inverted = 0;
 
                for (; pd; pd = pd->next) {
                        struct cpumask *pd_span = perf_domain_span(pd);
                        unsigned long pd_cap_orig, pd_cap;
 
+                       /* We can't be inverted against our own pd */
+                       if (cpumask_test_cpu(cpu_of(rq), pd_span))
+                               continue;
+
                        cpu = cpumask_any(pd_span);
                        pd_cap_orig = arch_scale_cpu_capacity(cpu);
 
@@ -8905,6 +8909,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
                                break;
                        }
                }
+
+               rcu_read_unlock();
        }
 
        trace_sched_cpu_capacity_tp(rq);
index 5fd54bf..88b31f0 100644 (file)
@@ -1442,6 +1442,8 @@ static int do_prlimit(struct task_struct *tsk, unsigned int resource,
 
        if (resource >= RLIM_NLIMITS)
                return -EINVAL;
+       resource = array_index_nospec(resource, RLIM_NLIMITS);
+
        if (new_rlim) {
                if (new_rlim->rlim_cur > new_rlim->rlim_max)
                        return -EINVAL;
index 45e93ec..2afe4c5 100644 (file)
@@ -23,7 +23,6 @@
                }                                                               \
                if (!--retry)                                                   \
                        break;                                                  \
-               cpu_relax();                                                    \
        }                                                                       \
 } while (0)
 
index 9055e8b..489e15b 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/jiffies.h>
+#include <linux/nospec.h>
 #include <linux/skbuff.h>
 #include <linux/string.h>
 #include <linux/types.h>
@@ -381,6 +382,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
        if (type <= 0 || type > maxtype)
                return 0;
 
+       type = array_index_nospec(type, maxtype + 1);
        pt = &policy[type];
 
        BUG_ON(pt->type > NLA_TYPE_MAX);
@@ -596,6 +598,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
                        }
                        continue;
                }
+               type = array_index_nospec(type, maxtype + 1);
                if (policy) {
                        int err = validate_nla(nla, maxtype, policy,
                                               validate, extack, depth);
index f72aa50..8d7519a 100644 (file)
@@ -470,22 +470,27 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
                return -EOPNOTSUPP;
 
        if (sgt_append->prv) {
+               unsigned long next_pfn = (page_to_phys(sg_page(sgt_append->prv)) +
+                       sgt_append->prv->offset + sgt_append->prv->length) / PAGE_SIZE;
+
                if (WARN_ON(offset))
                        return -EINVAL;
 
                /* Merge contiguous pages into the last SG */
                prv_len = sgt_append->prv->length;
-               last_pg = sg_page(sgt_append->prv);
-               while (n_pages && pages_are_mergeable(pages[0], last_pg)) {
-                       if (sgt_append->prv->length + PAGE_SIZE > max_segment)
-                               break;
-                       sgt_append->prv->length += PAGE_SIZE;
-                       last_pg = pages[0];
-                       pages++;
-                       n_pages--;
+               if (page_to_pfn(pages[0]) == next_pfn) {
+                       last_pg = pfn_to_page(next_pfn - 1);
+                       while (n_pages && pages_are_mergeable(pages[0], last_pg)) {
+                               if (sgt_append->prv->length + PAGE_SIZE > max_segment)
+                                       break;
+                               sgt_append->prv->length += PAGE_SIZE;
+                               last_pg = pages[0];
+                               pages++;
+                               n_pages--;
+                       }
+                       if (!n_pages)
+                               goto out;
                }
-               if (!n_pages)
-                       goto out;
        }
 
        /* compute number of contiguous chunks */
index 6bdc1cd..ec10506 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/**
+/*
  * lib/minmax.c: windowed min/max tracker
  *
  * Kathleen Nichols' algorithm for tracking the minimum (or maximum)
index db89523..7fcdb98 100644 (file)
@@ -94,6 +94,8 @@ static int hugetlb_acct_memory(struct hstate *h, long delta);
 static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
+static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
+               unsigned long start, unsigned long end);
 
 static inline bool subpool_is_free(struct hugepage_subpool *spool)
 {
@@ -1181,7 +1183,7 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma)
 
 /*
  * Reset and decrement one ref on hugepage private reservation.
- * Called with mm->mmap_sem writer semaphore held.
+ * Called with mm->mmap_lock writer semaphore held.
  * This function should be only used by move_vma() and operate on
  * same sized vma. It should never come here with last ref on the
  * reservation.
@@ -4834,6 +4836,25 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
 {
        if (addr & ~(huge_page_mask(hstate_vma(vma))))
                return -EINVAL;
+
+       /*
+        * PMD sharing is only possible for PUD_SIZE-aligned address ranges
+        * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
+        * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
+        */
+       if (addr & ~PUD_MASK) {
+               /*
+                * hugetlb_vm_op_split is called right before we attempt to
+                * split the VMA. We will need to unshare PMDs in the old and
+                * new VMAs, so let's unshare before we split.
+                */
+               unsigned long floor = addr & PUD_MASK;
+               unsigned long ceil = floor + PUD_SIZE;
+
+               if (floor >= vma->vm_start && ceil <= vma->vm_end)
+                       hugetlb_unshare_pmds(vma, floor, ceil);
+       }
+
        return 0;
 }
 
@@ -5131,7 +5152,7 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
 
        /*
         * We don't have to worry about the ordering of src and dst ptlocks
-        * because exclusive mmap_sem (or the i_mmap_lock) prevents deadlock.
+        * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock.
         */
        if (src_ptl != dst_ptl)
                spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
@@ -6639,8 +6660,17 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                spinlock_t *ptl;
                ptep = huge_pte_offset(mm, address, psize);
                if (!ptep) {
-                       address |= last_addr_mask;
-                       continue;
+                       if (!uffd_wp) {
+                               address |= last_addr_mask;
+                               continue;
+                       }
+                       /*
+                        * Userfaultfd wr-protect requires pgtable
+                        * pre-allocations to install pte markers.
+                        */
+                       ptep = huge_pte_alloc(mm, vma, address, psize);
+                       if (!ptep)
+                               break;
                }
                ptl = huge_pte_lock(h, mm, ptep);
                if (huge_pmd_unshare(mm, vma, address, ptep)) {
@@ -6658,16 +6688,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                }
                pte = huge_ptep_get(ptep);
                if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
-                       spin_unlock(ptl);
-                       continue;
-               }
-               if (unlikely(is_hugetlb_entry_migration(pte))) {
+                       /* Nothing to do. */
+               } else if (unlikely(is_hugetlb_entry_migration(pte))) {
                        swp_entry_t entry = pte_to_swp_entry(pte);
                        struct page *page = pfn_swap_entry_to_page(entry);
+                       pte_t newpte = pte;
 
-                       if (!is_readable_migration_entry(entry)) {
-                               pte_t newpte;
-
+                       if (is_writable_migration_entry(entry)) {
                                if (PageAnon(page))
                                        entry = make_readable_exclusive_migration_entry(
                                                                swp_offset(entry));
@@ -6675,25 +6702,22 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                                        entry = make_readable_migration_entry(
                                                                swp_offset(entry));
                                newpte = swp_entry_to_pte(entry);
-                               if (uffd_wp)
-                                       newpte = pte_swp_mkuffd_wp(newpte);
-                               else if (uffd_wp_resolve)
-                                       newpte = pte_swp_clear_uffd_wp(newpte);
-                               set_huge_pte_at(mm, address, ptep, newpte);
                                pages++;
                        }
-                       spin_unlock(ptl);
-                       continue;
-               }
-               if (unlikely(pte_marker_uffd_wp(pte))) {
-                       /*
-                        * This is changing a non-present pte into a none pte,
-                        * no need for huge_ptep_modify_prot_start/commit().
-                        */
+
+                       if (uffd_wp)
+                               newpte = pte_swp_mkuffd_wp(newpte);
+                       else if (uffd_wp_resolve)
+                               newpte = pte_swp_clear_uffd_wp(newpte);
+                       if (!pte_same(pte, newpte))
+                               set_huge_pte_at(mm, address, ptep, newpte);
+               } else if (unlikely(is_pte_marker(pte))) {
+                       /* No other markers apply for now. */
+                       WARN_ON_ONCE(!pte_marker_uffd_wp(pte));
                        if (uffd_wp_resolve)
+                               /* Safe to modify directly (non-present->none). */
                                huge_pte_clear(mm, address, ptep, psize);
-               }
-               if (!huge_pte_none(pte)) {
+               } else if (!huge_pte_none(pte)) {
                        pte_t old_pte;
                        unsigned int shift = huge_page_shift(hstate_vma(vma));
 
@@ -7328,26 +7352,21 @@ void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int re
        }
 }
 
-/*
- * This function will unconditionally remove all the shared pmd pgtable entries
- * within the specific vma for a hugetlbfs memory range.
- */
-void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
+static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
+                                  unsigned long start,
+                                  unsigned long end)
 {
        struct hstate *h = hstate_vma(vma);
        unsigned long sz = huge_page_size(h);
        struct mm_struct *mm = vma->vm_mm;
        struct mmu_notifier_range range;
-       unsigned long address, start, end;
+       unsigned long address;
        spinlock_t *ptl;
        pte_t *ptep;
 
        if (!(vma->vm_flags & VM_MAYSHARE))
                return;
 
-       start = ALIGN(vma->vm_start, PUD_SIZE);
-       end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
-
        if (start >= end)
                return;
 
@@ -7379,6 +7398,16 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
        mmu_notifier_invalidate_range_end(&range);
 }
 
+/*
+ * This function will unconditionally remove all the shared pmd pgtable entries
+ * within the specific vma for a hugetlbfs memory range.
+ */
+void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
+{
+       hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
+                       ALIGN_DOWN(vma->vm_end, PUD_SIZE));
+}
+
 #ifdef CONFIG_CMA
 static bool cma_reserve_called __initdata;
 
index 1d02757..22598b2 100644 (file)
@@ -119,7 +119,7 @@ EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
  * Whether the KASAN KUnit test suite is currently being executed.
  * Updated in kasan_test.c.
  */
-bool kasan_kunit_executing;
+static bool kasan_kunit_executing;
 
 void kasan_kunit_test_suite_start(void)
 {
index 5cb401a..79be131 100644 (file)
@@ -1460,14 +1460,6 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
        if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
                return SCAN_VMA_CHECK;
 
-       /*
-        * Symmetry with retract_page_tables(): Exclude MAP_PRIVATE mappings
-        * that got written to. Without this, we'd have to also lock the
-        * anon_vma if one exists.
-        */
-       if (vma->anon_vma)
-               return SCAN_VMA_CHECK;
-
        /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
        if (userfaultfd_wp(vma))
                return SCAN_PTE_UFFD_WP;
@@ -1567,8 +1559,14 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
        }
 
        /* step 4: remove pte entries */
+       /* we make no change to anon, but protect concurrent anon page lookup */
+       if (vma->anon_vma)
+               anon_vma_lock_write(vma->anon_vma);
+
        collapse_and_free_pmd(mm, vma, haddr, pmd);
 
+       if (vma->anon_vma)
+               anon_vma_unlock_write(vma->anon_vma);
        i_mmap_unlock_write(vma->vm_file->f_mapping);
 
 maybe_install_pmd:
@@ -2649,7 +2647,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
                                goto out_nolock;
                        }
 
-                       hend = vma->vm_end & HPAGE_PMD_MASK;
+                       hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
                }
                mmap_assert_locked(mm);
                memset(cc->node_load, 0, sizeof(cc->node_load));
index a56a6d1..b6ea204 100644 (file)
@@ -130,7 +130,7 @@ static int replace_anon_vma_name(struct vm_area_struct *vma,
 #endif /* CONFIG_ANON_VMA_NAME */
 /*
  * Update the vm_flags on region of a vma, splitting it or merging it as
- * necessary.  Must be called with mmap_sem held for writing;
+ * necessary.  Must be called with mmap_lock held for writing;
  * Caller should ensure anon_name stability by raising its refcount even when
  * anon_name belongs to a valid vma because this function might free that vma.
  */
index d036c78..685e30e 100644 (file)
@@ -1640,7 +1640,13 @@ void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
        end = PFN_DOWN(base + size);
 
        for (; cursor < end; cursor++) {
-               memblock_free_pages(pfn_to_page(cursor), cursor, 0);
+               /*
+                * Reserved pages are always initialized by the end of
+                * memblock_free_all() (by memmap_init() and, if deferred
+                * initialization is enabled, memmap_init_reserved_pages()), so
+                * these pages can be released directly to the buddy allocator.
+                */
+               __free_pages_core(pfn_to_page(cursor), 0);
                totalram_pages_inc();
        }
 }
index 87d9293..425a934 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1524,6 +1524,10 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
        if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
                return 1;
 
+       /* Do we need write faults for uffd-wp tracking? */
+       if (userfaultfd_wp(vma))
+               return 1;
+
        /* Specialty mapping? */
        if (vm_flags & VM_PFNMAP)
                return 0;
@@ -2290,7 +2294,7 @@ static inline int munmap_sidetree(struct vm_area_struct *vma,
  * @start: The aligned start address to munmap.
  * @end: The aligned end address to munmap.
  * @uf: The userfaultfd list_head
- * @downgrade: Set to true to attempt a write downgrade of the mmap_sem
+ * @downgrade: Set to true to attempt a write downgrade of the mmap_lock
  *
  * If @downgrade is true, check return code for potential release of the lock.
  */
@@ -2465,7 +2469,7 @@ map_count_exceeded:
  * @len: The length of the range to munmap
  * @uf: The userfaultfd list_head
  * @downgrade: set to true if the user wants to attempt to write_downgrade the
- * mmap_sem
+ * mmap_lock
  *
  * This function takes a @mas that is either pointing to the previous VMA or set
  * to MA_START and sets it up to remove the mapping(s).  The @len will be
index 214c70e..5b83938 100644 (file)
@@ -559,7 +559,6 @@ void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
 
 static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
 {
-       mm->map_count++;
        vma->vm_mm = mm;
 
        /* add the VMA to the mapping */
@@ -587,6 +586,7 @@ static void mas_add_vma_to_mm(struct ma_state *mas, struct mm_struct *mm,
        BUG_ON(!vma->vm_region);
 
        setup_vma_to_mm(vma, mm);
+       mm->map_count++;
 
        /* add the VMA to the tree */
        vma_mas_store(vma, mas);
@@ -1240,6 +1240,7 @@ share:
 error_just_free:
        up_write(&nommu_region_sem);
 error:
+       mas_destroy(&mas);
        if (region->vm_file)
                fput(region->vm_file);
        kmem_cache_free(vm_region_jar, region);
@@ -1250,7 +1251,6 @@ error:
 
 sharing_violation:
        up_write(&nommu_region_sem);
-       mas_destroy(&mas);
        pr_warn("Attempt to share mismatched mappings\n");
        ret = -EINVAL;
        goto error;
@@ -1347,6 +1347,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        if (vma->vm_file)
                return -ENOMEM;
 
+       mm = vma->vm_mm;
        if (mm->map_count >= sysctl_max_map_count)
                return -ENOMEM;
 
@@ -1398,6 +1399,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        mas_set_range(&mas, vma->vm_start, vma->vm_end - 1);
        mas_store(&mas, vma);
        vma_mas_store(new, &mas);
+       mm->map_count++;
        return 0;
 
 err_mas_preallocate:
@@ -1509,7 +1511,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list
 erase_whole_vma:
        if (delete_vma_from_mm(vma))
                ret = -ENOMEM;
-       delete_vma(mm, vma);
+       else
+               delete_vma(mm, vma);
        return ret;
 }
 
index c301487..0005ab2 100644 (file)
@@ -478,12 +478,10 @@ bool shmem_is_huge(struct vm_area_struct *vma, struct inode *inode,
        if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
            test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
                return false;
-       if (shmem_huge_force)
-               return true;
-       if (shmem_huge == SHMEM_HUGE_FORCE)
-               return true;
        if (shmem_huge == SHMEM_HUGE_DENY)
                return false;
+       if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
+               return true;
 
        switch (SHMEM_SB(inode->i_sb)->huge) {
        case SHMEM_HUGE_ALWAYS:
index 7a269db..29300fc 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2211,6 +2211,8 @@ static int drain_freelist(struct kmem_cache *cache,
                raw_spin_unlock_irq(&n->list_lock);
                slab_destroy(cache, slab);
                nr_freed++;
+
+               cond_resched();
        }
 out:
        return nr_freed;
index 506f83d..4bac7ea 100644 (file)
@@ -162,6 +162,15 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
        struct sk_buff *lp;
        int segs;
 
+       /* Do not splice page pool based packets w/ non-page pool
+        * packets. This can result in reference count issues as page
+        * pool pages will not decrement the reference count and will
+        * instead be immediately returned to the pool or have frag
+        * count decremented.
+        */
+       if (p->pp_recycle != skb->pp_recycle)
+               return -ETOOMANYREFS;
+
        /* pairs with WRITE_ONCE() in netif_set_gro_max_size() */
        gro_max_size = READ_ONCE(p->dev->gro_max_size);
 
index 5581d22..078a0a4 100644 (file)
@@ -137,12 +137,12 @@ static int ops_init(const struct pernet_operations *ops, struct net *net)
                return 0;
 
        if (ops->id && ops->size) {
-cleanup:
                ng = rcu_dereference_protected(net->gen,
                                               lockdep_is_held(&pernet_ops_rwsem));
                ng->ptr[*ops->id] = NULL;
        }
 
+cleanup:
        kfree(data);
 
 out:
index ce9ff3c..3bb890a 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/slab.h>
 #include <linux/netlink.h>
 #include <linux/hash.h>
+#include <linux/nospec.h>
 
 #include <net/arp.h>
 #include <net/inet_dscp.h>
@@ -1022,6 +1023,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
                if (type > RTAX_MAX)
                        return false;
 
+               type = array_index_nospec(type, RTAX_MAX + 1);
                if (type == RTAX_CC_ALGO) {
                        char tmp[TCP_CA_NAME_MAX];
                        bool ecn_ca = false;
index 24a38b5..f58d738 100644 (file)
@@ -650,8 +650,20 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
        spin_lock(lock);
        if (osk) {
                WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
-               ret = sk_nulls_del_node_init_rcu(osk);
-       } else if (found_dup_sk) {
+               ret = sk_hashed(osk);
+               if (ret) {
+                       /* Before deleting the node, we insert a new one to make
+                        * sure that the look-up-sk process would not miss either
+                        * of them and that at least one node would exist in ehash
+                        * table all the time. Otherwise there's a tiny chance
+                        * that lookup process could find nothing in ehash table.
+                        */
+                       __sk_nulls_add_node_tail_rcu(sk, list);
+                       sk_nulls_del_node_init_rcu(osk);
+               }
+               goto unlock;
+       }
+       if (found_dup_sk) {
                *found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
                if (*found_dup_sk)
                        ret = false;
@@ -660,6 +672,7 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
        if (ret)
                __sk_nulls_add_node_rcu(sk, list);
 
+unlock:
        spin_unlock(lock);
 
        return ret;
index 1d77d99..beed32f 100644 (file)
@@ -91,10 +91,10 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
 }
 EXPORT_SYMBOL_GPL(inet_twsk_put);
 
-static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
-                                  struct hlist_nulls_head *list)
+static void inet_twsk_add_node_tail_rcu(struct inet_timewait_sock *tw,
+                                       struct hlist_nulls_head *list)
 {
-       hlist_nulls_add_head_rcu(&tw->tw_node, list);
+       hlist_nulls_add_tail_rcu(&tw->tw_node, list);
 }
 
 static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
@@ -147,7 +147,7 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
 
        spin_lock(lock);
 
-       inet_twsk_add_node_rcu(tw, &ehead->chain);
+       inet_twsk_add_node_tail_rcu(tw, &ehead->chain);
 
        /* Step 3: Remove SK from hash chain */
        if (__sk_nulls_del_node_init_rcu(sk))
index 7fcfdfd..0e3ee15 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 #include <linux/netlink.h>
+#include <linux/nospec.h>
 #include <linux/rtnetlink.h>
 #include <linux/types.h>
 #include <net/ip.h>
@@ -25,6 +26,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
                        return -EINVAL;
                }
 
+               type = array_index_nospec(type, RTAX_MAX + 1);
                if (type == RTAX_CC_ALGO) {
                        char tmp[TCP_CA_NAME_MAX];
 
index c567d5e..33f559f 100644 (file)
@@ -435,6 +435,7 @@ void tcp_init_sock(struct sock *sk)
 
        /* There's a bubble in the pipe until at least the first ACK. */
        tp->app_limited = ~0U;
+       tp->rate_app_limited = 1;
 
        /* See draft-stevens-tcpca-spec-01 for discussion of the
         * initialization of these values.
@@ -3178,6 +3179,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        tp->plb_rehash = 0;
        /* There's a bubble in the pipe until at least the first ACK. */
        tp->app_limited = ~0U;
+       tp->rate_app_limited = 1;
        tp->rack.mstamp = 0;
        tp->rack.advanced = 0;
        tp->rack.reo_wnd_steps = 1;
index 05b6077..2aa4421 100644 (file)
@@ -139,7 +139,7 @@ static int __tcp_set_ulp(struct sock *sk, const struct tcp_ulp_ops *ulp_ops)
        if (sk->sk_socket)
                clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
 
-       err = -EINVAL;
+       err = -ENOTCONN;
        if (!ulp_ops->clone && sk->sk_state == TCP_LISTEN)
                goto out_err;
 
index 60fd91b..c314fdd 100644 (file)
@@ -547,7 +547,20 @@ int ip6_forward(struct sk_buff *skb)
            pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
                int proxied = ip6_forward_proxy_check(skb);
                if (proxied > 0) {
-                       hdr->hop_limit--;
+                       /* It's tempting to decrease the hop limit
+                        * here by 1, as we do at the end of the
+                        * function too.
+                        *
+                        * But that would be incorrect, as proxying is
+                        * not forwarding.  The ip6_input function
+                        * will handle this packet locally, and it
+                        * depends on the hop limit being unchanged.
+                        *
+                        * One example is the NDP hop limit, that
+                        * always has to stay 255, but other would be
+                        * similar checks around RA packets, where the
+                        * user can even change the desired limit.
+                        */
                        return ip6_input(skb);
                } else if (proxied < 0) {
                        __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
index e20c3fe..23ed13f 100644 (file)
@@ -2197,7 +2197,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 
                ret = cfg80211_register_netdevice(ndev);
                if (ret) {
-                       ieee80211_if_free(ndev);
                        free_netdev(ndev);
                        return ret;
                }
index fc9e728..45bbe3e 100644 (file)
@@ -544,9 +544,6 @@ static int mctp_sk_init(struct sock *sk)
 
 static void mctp_sk_close(struct sock *sk, long timeout)
 {
-       struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
-
-       del_timer_sync(&msk->key_expiry);
        sk_common_release(sk);
 }
 
@@ -580,7 +577,14 @@ static void mctp_sk_unhash(struct sock *sk)
                spin_lock_irqsave(&key->lock, fl2);
                __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_CLOSED);
        }
+       sock_set_flag(sk, SOCK_DEAD);
        spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
+
+       /* Since there are no more tag allocations (we have removed all of the
+        * keys), stop any pending expiry events. the timer cannot be re-queued
+        * as the sk is no longer observable
+        */
+       del_timer_sync(&msk->key_expiry);
 }
 
 static struct proto mctp_proto = {
index f9a80b8..f51a05e 100644 (file)
@@ -147,6 +147,7 @@ static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk,
        key->valid = true;
        spin_lock_init(&key->lock);
        refcount_set(&key->refs, 1);
+       sock_hold(key->sk);
 
        return key;
 }
@@ -165,6 +166,7 @@ void mctp_key_unref(struct mctp_sk_key *key)
        mctp_dev_release_key(key->dev, key);
        spin_unlock_irqrestore(&key->lock, flags);
 
+       sock_put(key->sk);
        kfree(key);
 }
 
@@ -177,6 +179,11 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
 
        spin_lock_irqsave(&net->mctp.keys_lock, flags);
 
+       if (sock_flag(&msk->sk, SOCK_DEAD)) {
+               rc = -EINVAL;
+               goto out_unlock;
+       }
+
        hlist_for_each_entry(tmp, &net->mctp.keys, hlist) {
                if (mctp_key_match(tmp, key->local_addr, key->peer_addr,
                                   key->tag)) {
@@ -198,6 +205,7 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
                hlist_add_head(&key->sklist, &msk->keys);
        }
 
+out_unlock:
        spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
 
        return rc;
@@ -315,8 +323,8 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
 
 static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
 {
+       struct mctp_sk_key *key, *any_key = NULL;
        struct net *net = dev_net(skb->dev);
-       struct mctp_sk_key *key;
        struct mctp_sock *msk;
        struct mctp_hdr *mh;
        unsigned long f;
@@ -361,13 +369,11 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
                         * key for reassembly - we'll create a more specific
                         * one for future packets if required (ie, !EOM).
                         */
-                       key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
-                       if (key) {
-                               msk = container_of(key->sk,
+                       any_key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
+                       if (any_key) {
+                               msk = container_of(any_key->sk,
                                                   struct mctp_sock, sk);
-                               spin_unlock_irqrestore(&key->lock, f);
-                               mctp_key_unref(key);
-                               key = NULL;
+                               spin_unlock_irqrestore(&any_key->lock, f);
                        }
                }
 
@@ -419,14 +425,14 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
                         * this function.
                         */
                        rc = mctp_key_add(key, msk);
-                       if (rc) {
-                               kfree(key);
-                       } else {
+                       if (!rc)
                                trace_mctp_key_acquire(key);
 
-                               /* we don't need to release key->lock on exit */
-                               mctp_key_unref(key);
-                       }
+                       /* we don't need to release key->lock on exit, so
+                        * clean up here and suppress the unlock via
+                        * setting to NULL
+                        */
+                       mctp_key_unref(key);
                        key = NULL;
 
                } else {
@@ -473,6 +479,8 @@ out_unlock:
                spin_unlock_irqrestore(&key->lock, f);
                mctp_key_unref(key);
        }
+       if (any_key)
+               mctp_key_unref(any_key);
 out:
        if (rc)
                kfree_skb(skb);
index d88b92a..945dd40 100644 (file)
 #include <net/netfilter/nf_conntrack_ecache.h>
 #include <net/netfilter/nf_conntrack_timeout.h>
 
-/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
-   closely.  They're more complex. --RR
-
-   And so for me for SCTP :D -Kiran */
-
 static const char *const sctp_conntrack_names[] = {
-       "NONE",
-       "CLOSED",
-       "COOKIE_WAIT",
-       "COOKIE_ECHOED",
-       "ESTABLISHED",
-       "SHUTDOWN_SENT",
-       "SHUTDOWN_RECD",
-       "SHUTDOWN_ACK_SENT",
-       "HEARTBEAT_SENT",
-       "HEARTBEAT_ACKED",
+       [SCTP_CONNTRACK_NONE]                   = "NONE",
+       [SCTP_CONNTRACK_CLOSED]                 = "CLOSED",
+       [SCTP_CONNTRACK_COOKIE_WAIT]            = "COOKIE_WAIT",
+       [SCTP_CONNTRACK_COOKIE_ECHOED]          = "COOKIE_ECHOED",
+       [SCTP_CONNTRACK_ESTABLISHED]            = "ESTABLISHED",
+       [SCTP_CONNTRACK_SHUTDOWN_SENT]          = "SHUTDOWN_SENT",
+       [SCTP_CONNTRACK_SHUTDOWN_RECD]          = "SHUTDOWN_RECD",
+       [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT]      = "SHUTDOWN_ACK_SENT",
+       [SCTP_CONNTRACK_HEARTBEAT_SENT]         = "HEARTBEAT_SENT",
 };
 
 #define SECS  * HZ
@@ -54,13 +48,11 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
        [SCTP_CONNTRACK_CLOSED]                 = 10 SECS,
        [SCTP_CONNTRACK_COOKIE_WAIT]            = 3 SECS,
        [SCTP_CONNTRACK_COOKIE_ECHOED]          = 3 SECS,
-       [SCTP_CONNTRACK_ESTABLISHED]            = 5 DAYS,
+       [SCTP_CONNTRACK_ESTABLISHED]            = 210 SECS,
        [SCTP_CONNTRACK_SHUTDOWN_SENT]          = 300 SECS / 1000,
        [SCTP_CONNTRACK_SHUTDOWN_RECD]          = 300 SECS / 1000,
        [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT]      = 3 SECS,
        [SCTP_CONNTRACK_HEARTBEAT_SENT]         = 30 SECS,
-       [SCTP_CONNTRACK_HEARTBEAT_ACKED]        = 210 SECS,
-       [SCTP_CONNTRACK_DATA_SENT]              = 30 SECS,
 };
 
 #define        SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1
@@ -74,8 +66,6 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
 #define        sSR SCTP_CONNTRACK_SHUTDOWN_RECD
 #define        sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
 #define        sHS SCTP_CONNTRACK_HEARTBEAT_SENT
-#define        sHA SCTP_CONNTRACK_HEARTBEAT_ACKED
-#define        sDS SCTP_CONNTRACK_DATA_SENT
 #define        sIV SCTP_CONNTRACK_MAX
 
 /*
@@ -98,10 +88,6 @@ SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite
 CLOSED            - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
                    the SHUTDOWN chunk. Connection is closed.
 HEARTBEAT_SENT    - We have seen a HEARTBEAT in a new flow.
-HEARTBEAT_ACKED   - We have seen a HEARTBEAT-ACK/DATA/SACK in the direction
-                   opposite to that of the HEARTBEAT/DATA chunk. Secondary connection
-                   is established.
-DATA_SENT         - We have seen a DATA/SACK in a new flow.
 */
 
 /* TODO
@@ -115,38 +101,36 @@ cookie echoed to closed.
 */
 
 /* SCTP conntrack state transitions */
-static const u8 sctp_conntracks[2][12][SCTP_CONNTRACK_MAX] = {
+static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
        {
 /*     ORIGINAL        */
-/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS */
-/* init         */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA, sCW},
-/* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},
-/* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
-/* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS, sCL},
-/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA, sHA, sSA},
-/* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},/* Can't have Stale cookie*/
-/* cookie_echo  */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},/* 5.2.4 - Big TODO */
-/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},/* Can't come in orig dir */
-/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL, sHA, sCL},
-/* heartbeat    */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS},
-/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS},
-/* data/sack    */ {sDS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS}
+/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
+/* init         */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW},
+/* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},
+/* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+/* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL},
+/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA},
+/* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/
+/* cookie_echo  */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */
+/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
+/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL},
+/* heartbeat    */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
+/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
        },
        {
 /*     REPLY   */
-/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS */
-/* init         */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA, sIV},/* INIT in sCL Big TODO */
-/* init_ack     */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA, sIV},
-/* abort        */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL, sIV},
-/* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR, sIV},
-/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA, sIV},
-/* error        */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV, sHA, sIV},
-/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA, sIV},/* Can't come in reply dir */
-/* cookie_ack   */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV, sHA, sIV},
-/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV, sHA, sIV},
-/* heartbeat    */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sHA},
-/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA, sHA},
-/* data/sack    */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA, sHA},
+/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
+/* init         */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* INIT in sCL Big TODO */
+/* init_ack     */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV},
+/* abort        */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV},
+/* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV},
+/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV},
+/* error        */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV},
+/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
+/* cookie_ack   */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV},
+/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV},
+/* heartbeat    */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
+/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sES},
        }
 };
 
@@ -160,8 +144,8 @@ static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
 
 #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count)    \
 for ((offset) = (dataoff) + sizeof(struct sctphdr), (count) = 0;       \
-       (offset) < (skb)->len &&                                        \
-       ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch)));   \
+       ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))) && \
+       (sch)->length;  \
        (offset) += (ntohs((sch)->length) + 3) & ~3, (count)++)
 
 /* Some validity checks to make sure the chunks are fine */
@@ -258,11 +242,6 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
                pr_debug("SCTP_CID_HEARTBEAT_ACK");
                i = 10;
                break;
-       case SCTP_CID_DATA:
-       case SCTP_CID_SACK:
-               pr_debug("SCTP_CID_DATA/SACK");
-               i = 11;
-               break;
        default:
                /* Other chunks like DATA or SACK do not change the state */
                pr_debug("Unknown chunk type, Will stay in %s\n",
@@ -316,9 +295,7 @@ sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
                                 ih->init_tag);
 
                        ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = ih->init_tag;
-               } else if (sch->type == SCTP_CID_HEARTBEAT ||
-                          sch->type == SCTP_CID_DATA ||
-                          sch->type == SCTP_CID_SACK) {
+               } else if (sch->type == SCTP_CID_HEARTBEAT) {
                        pr_debug("Setting vtag %x for secondary conntrack\n",
                                 sh->vtag);
                        ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
@@ -404,19 +381,19 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
 
                if (!sctp_new(ct, skb, sh, dataoff))
                        return -NF_ACCEPT;
-       } else {
-               /* Check the verification tag (Sec 8.5) */
-               if (!test_bit(SCTP_CID_INIT, map) &&
-                   !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) &&
-                   !test_bit(SCTP_CID_COOKIE_ECHO, map) &&
-                   !test_bit(SCTP_CID_ABORT, map) &&
-                   !test_bit(SCTP_CID_SHUTDOWN_ACK, map) &&
-                   !test_bit(SCTP_CID_HEARTBEAT, map) &&
-                   !test_bit(SCTP_CID_HEARTBEAT_ACK, map) &&
-                   sh->vtag != ct->proto.sctp.vtag[dir]) {
-                       pr_debug("Verification tag check failed\n");
-                       goto out;
-               }
+       }
+
+       /* Check the verification tag (Sec 8.5) */
+       if (!test_bit(SCTP_CID_INIT, map) &&
+           !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) &&
+           !test_bit(SCTP_CID_COOKIE_ECHO, map) &&
+           !test_bit(SCTP_CID_ABORT, map) &&
+           !test_bit(SCTP_CID_SHUTDOWN_ACK, map) &&
+           !test_bit(SCTP_CID_HEARTBEAT, map) &&
+           !test_bit(SCTP_CID_HEARTBEAT_ACK, map) &&
+           sh->vtag != ct->proto.sctp.vtag[dir]) {
+               pr_debug("Verification tag check failed\n");
+               goto out;
        }
 
        old_state = new_state = SCTP_CONNTRACK_NONE;
@@ -424,22 +401,29 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
        for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
                /* Special cases of Verification tag check (Sec 8.5.1) */
                if (sch->type == SCTP_CID_INIT) {
-                       /* Sec 8.5.1 (A) */
+                       /* (A) vtag MUST be zero */
                        if (sh->vtag != 0)
                                goto out_unlock;
                } else if (sch->type == SCTP_CID_ABORT) {
-                       /* Sec 8.5.1 (B) */
-                       if (sh->vtag != ct->proto.sctp.vtag[dir] &&
-                           sh->vtag != ct->proto.sctp.vtag[!dir])
+                       /* (B) vtag MUST match own vtag if T flag is unset OR
+                        * MUST match peer's vtag if T flag is set
+                        */
+                       if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
+                            sh->vtag != ct->proto.sctp.vtag[dir]) ||
+                           ((sch->flags & SCTP_CHUNK_FLAG_T) &&
+                            sh->vtag != ct->proto.sctp.vtag[!dir]))
                                goto out_unlock;
                } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
-                       /* Sec 8.5.1 (C) */
-                       if (sh->vtag != ct->proto.sctp.vtag[dir] &&
-                           sh->vtag != ct->proto.sctp.vtag[!dir] &&
-                           sch->flags & SCTP_CHUNK_FLAG_T)
+                       /* (C) vtag MUST match own vtag if T flag is unset OR
+                        * MUST match peer's vtag if T flag is set
+                        */
+                       if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
+                            sh->vtag != ct->proto.sctp.vtag[dir]) ||
+                           ((sch->flags & SCTP_CHUNK_FLAG_T) &&
+                            sh->vtag != ct->proto.sctp.vtag[!dir]))
                                goto out_unlock;
                } else if (sch->type == SCTP_CID_COOKIE_ECHO) {
-                       /* Sec 8.5.1 (D) */
+                       /* (D) vtag must be same as init_vtag as found in INIT_ACK */
                        if (sh->vtag != ct->proto.sctp.vtag[dir])
                                goto out_unlock;
                } else if (sch->type == SCTP_CID_HEARTBEAT) {
@@ -476,11 +460,6 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
                        } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
                                ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
                        }
-               } else if (sch->type == SCTP_CID_DATA || sch->type == SCTP_CID_SACK) {
-                       if (ct->proto.sctp.vtag[dir] == 0) {
-                               pr_debug("Setting vtag %x for dir %d\n", sh->vtag, dir);
-                               ct->proto.sctp.vtag[dir] = sh->vtag;
-                       }
                }
 
                old_state = ct->proto.sctp.state;
@@ -518,8 +497,12 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
                }
 
                ct->proto.sctp.state = new_state;
-               if (old_state != new_state)
+               if (old_state != new_state) {
                        nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
+                       if (new_state == SCTP_CONNTRACK_ESTABLISHED &&
+                           !test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
+                               nf_conntrack_event_cache(IPCT_ASSURED, ct);
+               }
        }
        spin_unlock_bh(&ct->lock);
 
@@ -533,14 +516,6 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
 
        nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
 
-       if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED &&
-           dir == IP_CT_DIR_REPLY &&
-           new_state == SCTP_CONNTRACK_ESTABLISHED) {
-               pr_debug("Setting assured bit\n");
-               set_bit(IPS_ASSURED_BIT, &ct->status);
-               nf_conntrack_event_cache(IPCT_ASSURED, ct);
-       }
-
        return NF_ACCEPT;
 
 out_unlock:
@@ -701,7 +676,6 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
        [CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT]    = { .type = NLA_U32 },
        [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT]       = { .type = NLA_U32 },
        [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED]      = { .type = NLA_U32 },
-       [CTA_TIMEOUT_SCTP_DATA_SENT]            = { .type = NLA_U32 },
 };
 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
index 0250725..460294b 100644 (file)
@@ -601,8 +601,6 @@ enum nf_ct_sysctl_index {
        NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD,
        NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
        NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT,
-       NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED,
-       NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_DATA_SENT,
 #endif
 #ifdef CONFIG_NF_CT_PROTO_DCCP
        NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST,
@@ -887,18 +885,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
-       [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED] = {
-               .procname       = "nf_conntrack_sctp_timeout_heartbeat_acked",
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_DATA_SENT] = {
-               .procname       = "nf_conntrack_sctp_timeout_data_sent",
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
 #endif
 #ifdef CONFIG_NF_CT_PROTO_DCCP
        [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST] = {
@@ -1042,8 +1028,6 @@ static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net,
        XASSIGN(SHUTDOWN_RECD, sn);
        XASSIGN(SHUTDOWN_ACK_SENT, sn);
        XASSIGN(HEARTBEAT_SENT, sn);
-       XASSIGN(HEARTBEAT_ACKED, sn);
-       XASSIGN(DATA_SENT, sn);
 #undef XASSIGN
 #endif
 }
index 7325bee..19ea4d3 100644 (file)
@@ -38,10 +38,12 @@ static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
        return !nft_rbtree_interval_end(rbe);
 }
 
-static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
-                            const struct nft_rbtree_elem *interval)
+static int nft_rbtree_cmp(const struct nft_set *set,
+                         const struct nft_rbtree_elem *e1,
+                         const struct nft_rbtree_elem *e2)
 {
-       return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
+       return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
+                     set->klen);
 }
 
 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
@@ -52,7 +54,6 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
        const struct nft_rbtree_elem *rbe, *interval = NULL;
        u8 genmask = nft_genmask_cur(net);
        const struct rb_node *parent;
-       const void *this;
        int d;
 
        parent = rcu_dereference_raw(priv->root.rb_node);
@@ -62,12 +63,11 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
 
                rbe = rb_entry(parent, struct nft_rbtree_elem, node);
 
-               this = nft_set_ext_key(&rbe->ext);
-               d = memcmp(this, key, set->klen);
+               d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
                if (d < 0) {
                        parent = rcu_dereference_raw(parent->rb_left);
                        if (interval &&
-                           nft_rbtree_equal(set, this, interval) &&
+                           !nft_rbtree_cmp(set, rbe, interval) &&
                            nft_rbtree_interval_end(rbe) &&
                            nft_rbtree_interval_start(interval))
                                continue;
@@ -215,154 +215,216 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
        return rbe;
 }
 
+static int nft_rbtree_gc_elem(const struct nft_set *__set,
+                             struct nft_rbtree *priv,
+                             struct nft_rbtree_elem *rbe)
+{
+       struct nft_set *set = (struct nft_set *)__set;
+       struct rb_node *prev = rb_prev(&rbe->node);
+       struct nft_rbtree_elem *rbe_prev;
+       struct nft_set_gc_batch *gcb;
+
+       gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
+       if (!gcb)
+               return -ENOMEM;
+
+       /* search for expired end interval coming before this element. */
+       do {
+               rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
+               if (nft_rbtree_interval_end(rbe_prev))
+                       break;
+
+               prev = rb_prev(prev);
+       } while (prev != NULL);
+
+       rb_erase(&rbe_prev->node, &priv->root);
+       rb_erase(&rbe->node, &priv->root);
+       atomic_sub(2, &set->nelems);
+
+       nft_set_gc_batch_add(gcb, rbe);
+       nft_set_gc_batch_complete(gcb);
+
+       return 0;
+}
+
+static bool nft_rbtree_update_first(const struct nft_set *set,
+                                   struct nft_rbtree_elem *rbe,
+                                   struct rb_node *first)
+{
+       struct nft_rbtree_elem *first_elem;
+
+       first_elem = rb_entry(first, struct nft_rbtree_elem, node);
+       /* this element is closest to where the new element is to be inserted:
+        * update the first element for the node list path.
+        */
+       if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
+               return true;
+
+       return false;
+}
+
 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
                               struct nft_rbtree_elem *new,
                               struct nft_set_ext **ext)
 {
-       bool overlap = false, dup_end_left = false, dup_end_right = false;
+       struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
+       struct rb_node *node, *parent, **p, *first = NULL;
        struct nft_rbtree *priv = nft_set_priv(set);
        u8 genmask = nft_genmask_next(net);
-       struct nft_rbtree_elem *rbe;
-       struct rb_node *parent, **p;
-       int d;
+       int d, err;
 
-       /* Detect overlaps as we descend the tree. Set the flag in these cases:
-        *
-        * a1. _ _ __>|  ?_ _ __|  (insert end before existing end)
-        * a2. _ _ ___|  ?_ _ _>|  (insert end after existing end)
-        * a3. _ _ ___? >|_ _ __|  (insert start before existing end)
-        *
-        * and clear it later on, as we eventually reach the points indicated by
-        * '?' above, in the cases described below. We'll always meet these
-        * later, locally, due to tree ordering, and overlaps for the intervals
-        * that are the closest together are always evaluated last.
-        *
-        * b1. _ _ __>|  !_ _ __|  (insert end before existing start)
-        * b2. _ _ ___|  !_ _ _>|  (insert end after existing start)
-        * b3. _ _ ___! >|_ _ __|  (insert start after existing end, as a leaf)
-        *            '--' no nodes falling in this range
-        * b4.          >|_ _   !  (insert start before existing start)
-        *
-        * Case a3. resolves to b3.:
-        * - if the inserted start element is the leftmost, because the '0'
-        *   element in the tree serves as end element
-        * - otherwise, if an existing end is found immediately to the left. If
-        *   there are existing nodes in between, we need to further descend the
-        *   tree before we can conclude the new start isn't causing an overlap
-        *
-        * or to b4., which, preceded by a3., means we already traversed one or
-        * more existing intervals entirely, from the right.
-        *
-        * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
-        * in that order.
-        *
-        * The flag is also cleared in two special cases:
-        *
-        * b5. |__ _ _!|<_ _ _   (insert start right before existing end)
-        * b6. |__ _ >|!__ _ _   (insert end right after existing start)
-        *
-        * which always happen as last step and imply that no further
-        * overlapping is possible.
-        *
-        * Another special case comes from the fact that start elements matching
-        * an already existing start element are allowed: insertion is not
-        * performed but we return -EEXIST in that case, and the error will be
-        * cleared by the caller if NLM_F_EXCL is not present in the request.
-        * This way, request for insertion of an exact overlap isn't reported as
-        * error to userspace if not desired.
-        *
-        * However, if the existing start matches a pre-existing start, but the
-        * end element doesn't match the corresponding pre-existing end element,
-        * we need to report a partial overlap. This is a local condition that
-        * can be noticed without need for a tracking flag, by checking for a
-        * local duplicated end for a corresponding start, from left and right,
-        * separately.
+       /* Descend the tree to search for an existing element greater than the
+        * key value to insert that is greater than the new element. This is the
+        * first element to walk the ordered elements to find possible overlap.
         */
-
        parent = NULL;
        p = &priv->root.rb_node;
        while (*p != NULL) {
                parent = *p;
                rbe = rb_entry(parent, struct nft_rbtree_elem, node);
-               d = memcmp(nft_set_ext_key(&rbe->ext),
-                          nft_set_ext_key(&new->ext),
-                          set->klen);
+               d = nft_rbtree_cmp(set, rbe, new);
+
                if (d < 0) {
                        p = &parent->rb_left;
-
-                       if (nft_rbtree_interval_start(new)) {
-                               if (nft_rbtree_interval_end(rbe) &&
-                                   nft_set_elem_active(&rbe->ext, genmask) &&
-                                   !nft_set_elem_expired(&rbe->ext) && !*p)
-                                       overlap = false;
-                       } else {
-                               if (dup_end_left && !*p)
-                                       return -ENOTEMPTY;
-
-                               overlap = nft_rbtree_interval_end(rbe) &&
-                                         nft_set_elem_active(&rbe->ext,
-                                                             genmask) &&
-                                         !nft_set_elem_expired(&rbe->ext);
-
-                               if (overlap) {
-                                       dup_end_right = true;
-                                       continue;
-                               }
-                       }
                } else if (d > 0) {
-                       p = &parent->rb_right;
+                       if (!first ||
+                           nft_rbtree_update_first(set, rbe, first))
+                               first = &rbe->node;
 
-                       if (nft_rbtree_interval_end(new)) {
-                               if (dup_end_right && !*p)
-                                       return -ENOTEMPTY;
-
-                               overlap = nft_rbtree_interval_end(rbe) &&
-                                         nft_set_elem_active(&rbe->ext,
-                                                             genmask) &&
-                                         !nft_set_elem_expired(&rbe->ext);
-
-                               if (overlap) {
-                                       dup_end_left = true;
-                                       continue;
-                               }
-                       } else if (nft_set_elem_active(&rbe->ext, genmask) &&
-                                  !nft_set_elem_expired(&rbe->ext)) {
-                               overlap = nft_rbtree_interval_end(rbe);
-                       }
+                       p = &parent->rb_right;
                } else {
-                       if (nft_rbtree_interval_end(rbe) &&
-                           nft_rbtree_interval_start(new)) {
+                       if (nft_rbtree_interval_end(rbe))
                                p = &parent->rb_left;
-
-                               if (nft_set_elem_active(&rbe->ext, genmask) &&
-                                   !nft_set_elem_expired(&rbe->ext))
-                                       overlap = false;
-                       } else if (nft_rbtree_interval_start(rbe) &&
-                                  nft_rbtree_interval_end(new)) {
+                       else
                                p = &parent->rb_right;
+               }
+       }
+
+       if (!first)
+               first = rb_first(&priv->root);
+
+       /* Detect overlap by going through the list of valid tree nodes.
+        * Values stored in the tree are in reversed order, starting from
+        * highest to lowest value.
+        */
+       for (node = first; node != NULL; node = rb_next(node)) {
+               rbe = rb_entry(node, struct nft_rbtree_elem, node);
 
-                               if (nft_set_elem_active(&rbe->ext, genmask) &&
-                                   !nft_set_elem_expired(&rbe->ext))
-                                       overlap = false;
-                       } else if (nft_set_elem_active(&rbe->ext, genmask) &&
-                                  !nft_set_elem_expired(&rbe->ext)) {
-                               *ext = &rbe->ext;
-                               return -EEXIST;
-                       } else {
-                               overlap = false;
-                               if (nft_rbtree_interval_end(rbe))
-                                       p = &parent->rb_left;
-                               else
-                                       p = &parent->rb_right;
+               if (!nft_set_elem_active(&rbe->ext, genmask))
+                       continue;
+
+               /* perform garbage collection to avoid bogus overlap reports. */
+               if (nft_set_elem_expired(&rbe->ext)) {
+                       err = nft_rbtree_gc_elem(set, priv, rbe);
+                       if (err < 0)
+                               return err;
+
+                       continue;
+               }
+
+               d = nft_rbtree_cmp(set, rbe, new);
+               if (d == 0) {
+                       /* Matching end element: no need to look for an
+                        * overlapping greater or equal element.
+                        */
+                       if (nft_rbtree_interval_end(rbe)) {
+                               rbe_le = rbe;
+                               break;
+                       }
+
+                       /* first element that is greater or equal to key value. */
+                       if (!rbe_ge) {
+                               rbe_ge = rbe;
+                               continue;
+                       }
+
+                       /* this is a closer more or equal element, update it. */
+                       if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
+                               rbe_ge = rbe;
+                               continue;
+                       }
+
+                       /* element is equal to key value, make sure flags are
+                        * the same, an existing more or equal start element
+                        * must not be replaced by more or equal end element.
+                        */
+                       if ((nft_rbtree_interval_start(new) &&
+                            nft_rbtree_interval_start(rbe_ge)) ||
+                           (nft_rbtree_interval_end(new) &&
+                            nft_rbtree_interval_end(rbe_ge))) {
+                               rbe_ge = rbe;
+                               continue;
                        }
+               } else if (d > 0) {
+                       /* annotate element greater than the new element. */
+                       rbe_ge = rbe;
+                       continue;
+               } else if (d < 0) {
+                       /* annotate element less than the new element. */
+                       rbe_le = rbe;
+                       break;
                }
+       }
 
-               dup_end_left = dup_end_right = false;
+       /* - new start element matching existing start element: full overlap
+        *   reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
+        */
+       if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
+           nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
+               *ext = &rbe_ge->ext;
+               return -EEXIST;
        }
 
-       if (overlap)
+       /* - new end element matching existing end element: full overlap
+        *   reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
+        */
+       if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
+           nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
+               *ext = &rbe_le->ext;
+               return -EEXIST;
+       }
+
+       /* - new start element with existing closest, less or equal key value
+        *   being a start element: partial overlap, reported as -ENOTEMPTY.
+        *   Anonymous sets allow for two consecutive start element since they
+        *   are constant, skip them to avoid bogus overlap reports.
+        */
+       if (!nft_set_is_anonymous(set) && rbe_le &&
+           nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
+               return -ENOTEMPTY;
+
+       /* - new end element with existing closest, less or equal key value
+        *   being a end element: partial overlap, reported as -ENOTEMPTY.
+        */
+       if (rbe_le &&
+           nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
                return -ENOTEMPTY;
 
+       /* - new end element with existing closest, greater or equal key value
+        *   being an end element: partial overlap, reported as -ENOTEMPTY
+        */
+       if (rbe_ge &&
+           nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
+               return -ENOTEMPTY;
+
+       /* Accepted element: pick insertion point depending on key value */
+       parent = NULL;
+       p = &priv->root.rb_node;
+       while (*p != NULL) {
+               parent = *p;
+               rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+               d = nft_rbtree_cmp(set, rbe, new);
+
+               if (d < 0)
+                       p = &parent->rb_left;
+               else if (d > 0)
+                       p = &parent->rb_right;
+               else if (nft_rbtree_interval_end(rbe))
+                       p = &parent->rb_left;
+               else
+                       p = &parent->rb_right;
+       }
+
        rb_link_node_rcu(&new->node, parent, p);
        rb_insert_color(&new->node, &priv->root);
        return 0;
@@ -501,23 +563,37 @@ static void nft_rbtree_gc(struct work_struct *work)
        struct nft_rbtree *priv;
        struct rb_node *node;
        struct nft_set *set;
+       struct net *net;
+       u8 genmask;
 
        priv = container_of(work, struct nft_rbtree, gc_work.work);
        set  = nft_set_container_of(priv);
+       net  = read_pnet(&set->net);
+       genmask = nft_genmask_cur(net);
 
        write_lock_bh(&priv->lock);
        write_seqcount_begin(&priv->count);
        for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
                rbe = rb_entry(node, struct nft_rbtree_elem, node);
 
+               if (!nft_set_elem_active(&rbe->ext, genmask))
+                       continue;
+
+               /* elements are reversed in the rbtree for historical reasons,
+                * from highest to lowest value, that is why end element is
+                * always visited before the start element.
+                */
                if (nft_rbtree_interval_end(rbe)) {
                        rbe_end = rbe;
                        continue;
                }
                if (!nft_set_elem_expired(&rbe->ext))
                        continue;
-               if (nft_set_elem_mark_busy(&rbe->ext))
+
+               if (nft_set_elem_mark_busy(&rbe->ext)) {
+                       rbe_end = NULL;
                        continue;
+               }
 
                if (rbe_prev) {
                        rb_erase(&rbe_prev->node, &priv->root);
index bca2a47..c642776 100644 (file)
@@ -580,7 +580,9 @@ static int netlink_insert(struct sock *sk, u32 portid)
        if (nlk_sk(sk)->bound)
                goto err;
 
-       nlk_sk(sk)->portid = portid;
+       /* portid can be read locklessly from netlink_getname(). */
+       WRITE_ONCE(nlk_sk(sk)->portid, portid);
+
        sock_hold(sk);
 
        err = __netlink_insert(table, sk);
@@ -1096,9 +1098,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
                return -EINVAL;
 
        if (addr->sa_family == AF_UNSPEC) {
-               sk->sk_state    = NETLINK_UNCONNECTED;
-               nlk->dst_portid = 0;
-               nlk->dst_group  = 0;
+               /* paired with READ_ONCE() in netlink_getsockbyportid() */
+               WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED);
+               /* dst_portid and dst_group can be read locklessly */
+               WRITE_ONCE(nlk->dst_portid, 0);
+               WRITE_ONCE(nlk->dst_group, 0);
                return 0;
        }
        if (addr->sa_family != AF_NETLINK)
@@ -1119,9 +1123,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
                err = netlink_autobind(sock);
 
        if (err == 0) {
-               sk->sk_state    = NETLINK_CONNECTED;
-               nlk->dst_portid = nladdr->nl_pid;
-               nlk->dst_group  = ffs(nladdr->nl_groups);
+               /* paired with READ_ONCE() in netlink_getsockbyportid() */
+               WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED);
+               /* dst_portid and dst_group can be read locklessly */
+               WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid);
+               WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups));
        }
 
        return err;
@@ -1138,10 +1144,12 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
        nladdr->nl_pad = 0;
 
        if (peer) {
-               nladdr->nl_pid = nlk->dst_portid;
-               nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
+               /* Paired with WRITE_ONCE() in netlink_connect() */
+               nladdr->nl_pid = READ_ONCE(nlk->dst_portid);
+               nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group));
        } else {
-               nladdr->nl_pid = nlk->portid;
+               /* Paired with WRITE_ONCE() in netlink_insert() */
+               nladdr->nl_pid = READ_ONCE(nlk->portid);
                netlink_lock_table();
                nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
                netlink_unlock_table();
@@ -1168,8 +1176,9 @@ static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
 
        /* Don't bother queuing skb if kernel socket has no input function */
        nlk = nlk_sk(sock);
-       if (sock->sk_state == NETLINK_CONNECTED &&
-           nlk->dst_portid != nlk_sk(ssk)->portid) {
+       /* dst_portid and sk_state can be changed in netlink_connect() */
+       if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED &&
+           READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) {
                sock_put(sock);
                return ERR_PTR(-ECONNREFUSED);
        }
@@ -1886,8 +1895,9 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
                        goto out;
                netlink_skb_flags |= NETLINK_SKB_DST;
        } else {
-               dst_portid = nlk->dst_portid;
-               dst_group = nlk->dst_group;
+               /* Paired with WRITE_ONCE() in netlink_connect() */
+               dst_portid = READ_ONCE(nlk->dst_portid);
+               dst_group = READ_ONCE(nlk->dst_group);
        }
 
        /* Paired with WRITE_ONCE() in netlink_insert() */
index a8da88d..4e7c968 100644 (file)
@@ -121,6 +121,7 @@ static void nr_heartbeat_expiry(struct timer_list *t)
                   is accepted() it isn't 'dead' so doesn't get removed. */
                if (sock_flag(sk, SOCK_DESTROY) ||
                    (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
+                       sock_hold(sk);
                        bh_unlock_sock(sk);
                        nr_destroy_socket(sk);
                        goto out;
index a661b06..872d127 100644 (file)
@@ -377,6 +377,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
        /* Even if driver returns failure adjust the stats - in case offload
         * ended but driver still wants to adjust the values.
         */
+       sch_tree_lock(sch);
        for (i = 0; i < MAX_DPs; i++) {
                if (!table->tab[i])
                        continue;
@@ -393,6 +394,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
                sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
        }
        _bstats_update(&sch->bstats, bytes, packets);
+       sch_tree_unlock(sch);
 
        kfree(hw_stats);
        return ret;
index 9a11a49..c322a61 100644 (file)
@@ -1700,7 +1700,6 @@ static void taprio_reset(struct Qdisc *sch)
        int i;
 
        hrtimer_cancel(&q->advance_timer);
-       qdisc_synchronize(sch);
 
        if (q->qdiscs) {
                for (i = 0; i < dev->num_tx_queues; i++)
index 59e653b..6b95d3b 100644 (file)
@@ -73,6 +73,12 @@ int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest,
                }
        }
 
+       /* If somehow no addresses were found that can be used with this
+        * scope, it's an error.
+        */
+       if (list_empty(&dest->address_list))
+               error = -ENETUNREACH;
+
 out:
        if (error)
                sctp_bind_addr_clean(dest);
index 3b55502..5c7ad30 100644 (file)
@@ -482,6 +482,12 @@ static int x25_listen(struct socket *sock, int backlog)
        int rc = -EOPNOTSUPP;
 
        lock_sock(sk);
+       if (sock->state != SS_UNCONNECTED) {
+               rc = -EINVAL;
+               release_sock(sk);
+               return rc;
+       }
+
        if (sk->sk_state != TCP_LISTEN) {
                memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
                sk->sk_max_ack_backlog = backlog;
index 29bf9c2..3010332 100644 (file)
@@ -142,17 +142,24 @@ pub fn call_printk_cont(args: fmt::Arguments<'_>) {
 macro_rules! print_macro (
     // The non-continuation cases (most of them, e.g. `INFO`).
     ($format_string:path, false, $($arg:tt)+) => (
-        // SAFETY: This hidden macro should only be called by the documented
-        // printing macros which ensure the format string is one of the fixed
-        // ones. All `__LOG_PREFIX`s are null-terminated as they are generated
-        // by the `module!` proc macro or fixed values defined in a kernel
-        // crate.
-        unsafe {
-            $crate::print::call_printk(
-                &$format_string,
-                crate::__LOG_PREFIX,
-                format_args!($($arg)+),
-            );
+        // To remain sound, `arg`s must be expanded outside the `unsafe` block.
+        // Typically one would use a `let` binding for that; however, `format_args!`
+        // takes borrows on the arguments, but does not extend the scope of temporaries.
+        // Therefore, a `match` expression is used to keep them around, since
+        // the scrutinee is kept until the end of the `match`.
+        match format_args!($($arg)+) {
+            // SAFETY: This hidden macro should only be called by the documented
+            // printing macros which ensure the format string is one of the fixed
+            // ones. All `__LOG_PREFIX`s are null-terminated as they are generated
+            // by the `module!` proc macro or fixed values defined in a kernel
+            // crate.
+            args => unsafe {
+                $crate::print::call_printk(
+                    &$format_string,
+                    crate::__LOG_PREFIX,
+                    args,
+                );
+            }
         }
     );
 
index 49946cb..10176de 100644 (file)
@@ -18,6 +18,7 @@ quiet_cmd_cc_o_c = CC      $@
        $(call if_changed_dep,cc_o_c)
 
 ifdef CONFIG_MODULES
+KASAN_SANITIZE_.vmlinux.export.o := n
 targets += .vmlinux.export.o
 vmlinux: .vmlinux.export.o
 endif
index 4192855..7eca035 100755 (executable)
@@ -26,11 +26,20 @@ try:
        # If the MAKEFLAGS variable contains multiple instances of the
        # --jobserver-auth= option, the last one is relevant.
        fds = opts[-1].split("=", 1)[1]
-       reader, writer = [int(x) for x in fds.split(",", 1)]
-       # Open a private copy of reader to avoid setting nonblocking
-       # on an unexpecting process with the same reader fd.
-       reader = os.open("/proc/self/fd/%d" % (reader),
-                        os.O_RDONLY | os.O_NONBLOCK)
+
+       # Starting with GNU Make 4.4, named pipes are used for reader and writer.
+       # Example argument: --jobserver-auth=fifo:/tmp/GMfifo8134
+       _, _, path = fds.partition('fifo:')
+
+       if path:
+               reader = os.open(path, os.O_RDONLY | os.O_NONBLOCK)
+               writer = os.open(path, os.O_WRONLY)
+       else:
+               reader, writer = [int(x) for x in fds.split(",", 1)]
+               # Open a private copy of reader to avoid setting nonblocking
+               # on an unexpecting process with the same reader fd.
+               reader = os.open("/proc/self/fd/%d" % (reader),
+                                os.O_RDONLY | os.O_NONBLOCK)
 
        # Read out as many jobserver slots as possible.
        while True:
index c8a3f9c..0b2ff77 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 /conf
 /[gmnq]conf
+/[gmnq]conf-bin
 /[gmnq]conf-cflags
 /[gmnq]conf-libs
-/qconf-bin
 /qconf-moc.cc
index 0b1d15e..af1c961 100644 (file)
@@ -209,7 +209,7 @@ $(obj)/gconf: | $(obj)/gconf-libs
 $(obj)/gconf.o: | $(obj)/gconf-cflags
 
 # check if necessary packages are available, and configure build flags
-cmd_conf_cfg = $< $(addprefix $(obj)/$*conf-, cflags libs bin)
+cmd_conf_cfg = $< $(addprefix $(obj)/$*conf-, cflags libs bin); touch $(obj)/$*conf-bin
 
 $(obj)/%conf-cflags $(obj)/%conf-libs $(obj)/%conf-bin: $(src)/%conf-cfg.sh
        $(call cmd,conf_cfg)
index adab28f..094e52c 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 #
 #      Output a simple RPM spec file.
-#      This version assumes a minimum of RPM 4.0.3.
+#      This version assumes a minimum of RPM 4.13
 #
 #      The only gothic bit here is redefining install_post to avoid
 #      stripping the symbols from files in the kernel which we want
index b9f8671..fad75be 100644 (file)
@@ -6,13 +6,11 @@ config SECURITY_TOMOYO
        select SECURITYFS
        select SECURITY_PATH
        select SECURITY_NETWORK
-       select SRCU
-       select BUILD_BIN2C
        default n
        help
          This selects TOMOYO Linux, pathname-based access control.
          Required userspace tools and further information may be
-         found at <http://tomoyo.sourceforge.jp/>.
+         found at <https://tomoyo.osdn.jp/>.
          If you are unsure how to answer this question, answer N.
 
 config SECURITY_TOMOYO_MAX_ACCEPT_ENTRY
index cca5a30..884ff15 100644 (file)
@@ -2,15 +2,18 @@
 obj-y = audit.o common.o condition.o domain.o environ.o file.o gc.o group.o load_policy.o memory.o mount.o network.o realpath.o securityfs_if.o tomoyo.o util.o
 
 targets += builtin-policy.h
-define do_policy
-echo "static char tomoyo_builtin_$(1)[] __initdata ="; \
-$(objtree)/scripts/bin2c <$(firstword $(wildcard $(obj)/policy/$(1).conf $(srctree)/$(src)/policy/$(1).conf.default) /dev/null); \
-echo ";"
-endef
-quiet_cmd_policy  = POLICY  $@
-      cmd_policy  = ($(call do_policy,profile); $(call do_policy,exception_policy); $(call do_policy,domain_policy); $(call do_policy,manager); $(call do_policy,stat)) >$@
 
-$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(src)/policy/*.conf.default) FORCE
+quiet_cmd_policy = POLICY  $@
+      cmd_policy = { \
+       $(foreach x, profile exception_policy domain_policy manager stat, \
+       printf 'static char tomoyo_builtin_$x[] __initdata =\n'; \
+       sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/\t"\1\\n"/' -- $(firstword $(filter %/$x.conf %/$x.conf.default, $^) /dev/null);  \
+       printf '\t"";\n';) \
+       } > $@
+
+$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(srctree)/$(src)/policy/*.conf.default) FORCE
        $(call if_changed,policy)
 
+ifndef CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING
 $(obj)/common.o: $(obj)/builtin-policy.h
+endif
index 50e7ba6..82aa1af 100644 (file)
@@ -1203,14 +1203,19 @@ static int snd_ctl_elem_read(struct snd_card *card,
        const u32 pattern = 0xdeadbeef;
        int ret;
 
+       down_read(&card->controls_rwsem);
        kctl = snd_ctl_find_id(card, &control->id);
-       if (kctl == NULL)
-               return -ENOENT;
+       if (kctl == NULL) {
+               ret = -ENOENT;
+               goto unlock;
+       }
 
        index_offset = snd_ctl_get_ioff(kctl, &control->id);
        vd = &kctl->vd[index_offset];
-       if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL)
-               return -EPERM;
+       if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL) {
+               ret = -EPERM;
+               goto unlock;
+       }
 
        snd_ctl_build_ioff(&control->id, kctl, index_offset);
 
@@ -1220,7 +1225,7 @@ static int snd_ctl_elem_read(struct snd_card *card,
        info.id = control->id;
        ret = __snd_ctl_elem_info(card, kctl, &info, NULL);
        if (ret < 0)
-               return ret;
+               goto unlock;
 #endif
 
        if (!snd_ctl_skip_validation(&info))
@@ -1230,7 +1235,7 @@ static int snd_ctl_elem_read(struct snd_card *card,
                ret = kctl->get(kctl, control);
        snd_power_unref(card);
        if (ret < 0)
-               return ret;
+               goto unlock;
        if (!snd_ctl_skip_validation(&info) &&
            sanity_check_elem_value(card, control, &info, pattern) < 0) {
                dev_err(card->dev,
@@ -1238,8 +1243,11 @@ static int snd_ctl_elem_read(struct snd_card *card,
                        control->id.iface, control->id.device,
                        control->id.subdevice, control->id.name,
                        control->id.index);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto unlock;
        }
+unlock:
+       up_read(&card->controls_rwsem);
        return ret;
 }
 
@@ -1253,9 +1261,7 @@ static int snd_ctl_elem_read_user(struct snd_card *card,
        if (IS_ERR(control))
                return PTR_ERR(control);
 
-       down_read(&card->controls_rwsem);
        result = snd_ctl_elem_read(card, control);
-       up_read(&card->controls_rwsem);
        if (result < 0)
                goto error;
 
index f975cc8..3cadd40 100644 (file)
@@ -530,12 +530,11 @@ static ssize_t set_led_id(struct snd_ctl_led_card *led_card, const char *buf, si
                          bool attach)
 {
        char buf2[256], *s, *os;
-       size_t len = max(sizeof(s) - 1, count);
        struct snd_ctl_elem_id id;
        int err;
 
-       strncpy(buf2, buf, len);
-       buf2[len] = '\0';
+       if (strscpy(buf2, buf, sizeof(buf2)) < 0)
+               return -E2BIG;
        memset(&id, 0, sizeof(id));
        id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
        s = buf2;
index 91842c0..f7815ee 100644 (file)
@@ -598,8 +598,8 @@ static int cs35l41_system_suspend(struct device *dev)
        dev_dbg(cs35l41->dev, "System Suspend\n");
 
        if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH) {
-               dev_err(cs35l41->dev, "System Suspend not supported\n");
-               return -EINVAL;
+               dev_err_once(cs35l41->dev, "System Suspend not supported\n");
+               return 0; /* don't block the whole system suspend */
        }
 
        ret = pm_runtime_force_suspend(dev);
@@ -624,8 +624,8 @@ static int cs35l41_system_resume(struct device *dev)
        dev_dbg(cs35l41->dev, "System Resume\n");
 
        if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH) {
-               dev_err(cs35l41->dev, "System Resume not supported\n");
-               return -EINVAL;
+               dev_err_once(cs35l41->dev, "System Resume not supported\n");
+               return 0; /* don't block the whole system resume */
        }
 
        if (cs35l41->reset_gpio) {
@@ -647,6 +647,15 @@ static int cs35l41_system_resume(struct device *dev)
        return ret;
 }
 
+static int cs35l41_runtime_idle(struct device *dev)
+{
+       struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+
+       if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH)
+               return -EBUSY; /* suspend not supported yet on this model */
+       return 0;
+}
+
 static int cs35l41_runtime_suspend(struct device *dev)
 {
        struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
@@ -1536,7 +1545,8 @@ void cs35l41_hda_remove(struct device *dev)
 EXPORT_SYMBOL_NS_GPL(cs35l41_hda_remove, SND_HDA_SCODEC_CS35L41);
 
 const struct dev_pm_ops cs35l41_hda_pm_ops = {
-       RUNTIME_PM_OPS(cs35l41_runtime_suspend, cs35l41_runtime_resume, NULL)
+       RUNTIME_PM_OPS(cs35l41_runtime_suspend, cs35l41_runtime_resume,
+                      cs35l41_runtime_idle)
        SYSTEM_SLEEP_PM_OPS(cs35l41_system_suspend, cs35l41_system_resume)
 };
 EXPORT_SYMBOL_NS_GPL(cs35l41_hda_pm_ops, SND_HDA_SCODEC_CS35L41);
index 386dd9d..9ea633f 100644 (file)
@@ -1981,6 +1981,7 @@ static const struct snd_pci_quirk force_connect_list[] = {
        SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
        SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
        SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1),
+       SND_PCI_QUIRK(0x103c, 0x8715, "HP", 1),
        SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1),
        SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1),
        {}
index 3794b52..6fab7c8 100644 (file)
@@ -3564,6 +3564,15 @@ static void alc256_init(struct hda_codec *codec)
        hda_nid_t hp_pin = alc_get_hp_pin(spec);
        bool hp_pin_sense;
 
+       if (spec->ultra_low_power) {
+               alc_update_coef_idx(codec, 0x03, 1<<1, 1<<1);
+               alc_update_coef_idx(codec, 0x08, 3<<2, 3<<2);
+               alc_update_coef_idx(codec, 0x08, 7<<4, 0);
+               alc_update_coef_idx(codec, 0x3b, 1<<15, 0);
+               alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
+               msleep(30);
+       }
+
        if (!hp_pin)
                hp_pin = 0x21;
 
@@ -3575,14 +3584,6 @@ static void alc256_init(struct hda_codec *codec)
                msleep(2);
 
        alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
-       if (spec->ultra_low_power) {
-               alc_update_coef_idx(codec, 0x03, 1<<1, 1<<1);
-               alc_update_coef_idx(codec, 0x08, 3<<2, 3<<2);
-               alc_update_coef_idx(codec, 0x08, 7<<4, 0);
-               alc_update_coef_idx(codec, 0x3b, 1<<15, 0);
-               alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
-               msleep(30);
-       }
 
        snd_hda_codec_write(codec, hp_pin, 0,
                            AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
@@ -3713,6 +3714,13 @@ static void alc225_init(struct hda_codec *codec)
        hda_nid_t hp_pin = alc_get_hp_pin(spec);
        bool hp1_pin_sense, hp2_pin_sense;
 
+       if (spec->ultra_low_power) {
+               alc_update_coef_idx(codec, 0x08, 0x0f << 2, 3<<2);
+               alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
+               alc_update_coef_idx(codec, 0x33, 1<<11, 0);
+               msleep(30);
+       }
+
        if (spec->codec_variant != ALC269_TYPE_ALC287 &&
                spec->codec_variant != ALC269_TYPE_ALC245)
                /* required only at boot or S3 and S4 resume time */
@@ -3734,12 +3742,6 @@ static void alc225_init(struct hda_codec *codec)
                msleep(2);
 
        alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
-       if (spec->ultra_low_power) {
-               alc_update_coef_idx(codec, 0x08, 0x0f << 2, 3<<2);
-               alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
-               alc_update_coef_idx(codec, 0x33, 1<<11, 0);
-               msleep(30);
-       }
 
        if (hp1_pin_sense || spec->ultra_low_power)
                snd_hda_codec_write(codec, hp_pin, 0,
@@ -4644,6 +4646,16 @@ static void alc285_fixup_hp_coef_micmute_led(struct hda_codec *codec,
        }
 }
 
+static void alc285_fixup_hp_gpio_micmute_led(struct hda_codec *codec,
+                               const struct hda_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE)
+               spec->micmute_led_polarity = 1;
+       alc_fixup_hp_gpio_led(codec, action, 0, 0x04);
+}
+
 static void alc236_fixup_hp_coef_micmute_led(struct hda_codec *codec,
                                const struct hda_fixup *fix, int action)
 {
@@ -4665,6 +4677,13 @@ static void alc285_fixup_hp_mute_led(struct hda_codec *codec,
        alc285_fixup_hp_coef_micmute_led(codec, fix, action);
 }
 
+static void alc285_fixup_hp_spectre_x360_mute_led(struct hda_codec *codec,
+                               const struct hda_fixup *fix, int action)
+{
+       alc285_fixup_hp_mute_led_coefbit(codec, fix, action);
+       alc285_fixup_hp_gpio_micmute_led(codec, fix, action);
+}
+
 static void alc236_fixup_hp_mute_led(struct hda_codec *codec,
                                const struct hda_fixup *fix, int action)
 {
@@ -7106,6 +7125,7 @@ enum {
        ALC285_FIXUP_ASUS_G533Z_PINS,
        ALC285_FIXUP_HP_GPIO_LED,
        ALC285_FIXUP_HP_MUTE_LED,
+       ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED,
        ALC236_FIXUP_HP_GPIO_LED,
        ALC236_FIXUP_HP_MUTE_LED,
        ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
@@ -8486,6 +8506,10 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc285_fixup_hp_mute_led,
        },
+       [ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc285_fixup_hp_spectre_x360_mute_led,
+       },
        [ALC236_FIXUP_HP_GPIO_LED] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc236_fixup_hp_gpio_led,
@@ -9239,6 +9263,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x0b37, "Dell Inspiron 16 Plus 7620 2-in-1", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
        SND_PCI_QUIRK(0x1028, 0x0b71, "Dell Inspiron 16 Plus 7620", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
+       SND_PCI_QUIRK(0x1028, 0x0c03, "Dell Precision 5340", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0c19, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
        SND_PCI_QUIRK(0x1028, 0x0c1a, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
        SND_PCI_QUIRK(0x1028, 0x0c1b, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
@@ -9327,6 +9352,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
        SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
        SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+       SND_PCI_QUIRK(0x103c, 0x86f9, "HP Spectre x360 13-aw0xxx", ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8720, "HP EliteBook x360 1040 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
@@ -9406,6 +9432,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
index 1f0b552..0d283e4 100644 (file)
@@ -209,6 +209,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
        {
                .driver_data = &acp6x_card,
                .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "M5402RA"),
+               }
+       },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
                        DMI_MATCH(DMI_BOARD_VENDOR, "Alienware"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17 R5 AMD"),
                }
@@ -220,6 +227,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 14 2022"),
                }
        },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Razer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Blade 14 (2022) - RZ09-0427"),
+               }
+       },
        {}
 };
 
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
index 644300e..fcf4fba 100644 (file)
@@ -177,8 +177,20 @@ static int rt9120_codec_probe(struct snd_soc_component *comp)
        return 0;
 }
 
+static int rt9120_codec_suspend(struct snd_soc_component *comp)
+{
+       return pm_runtime_force_suspend(comp->dev);
+}
+
+static int rt9120_codec_resume(struct snd_soc_component *comp)
+{
+       return pm_runtime_force_resume(comp->dev);
+}
+
 static const struct snd_soc_component_driver rt9120_component_driver = {
        .probe = rt9120_codec_probe,
+       .suspend = rt9120_codec_suspend,
+       .resume = rt9120_codec_resume,
        .controls = rt9120_snd_controls,
        .num_controls = ARRAY_SIZE(rt9120_snd_controls),
        .dapm_widgets = rt9120_dapm_widgets,
index ca6a01a..791d873 100644 (file)
@@ -697,6 +697,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
        int dcs_mask;
        int dcs_l, dcs_r;
        int dcs_l_reg, dcs_r_reg;
+       int an_out_reg;
        int timeout;
        int pwr_reg;
 
@@ -712,6 +713,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
                dcs_mask = WM8904_DCS_ENA_CHAN_0 | WM8904_DCS_ENA_CHAN_1;
                dcs_r_reg = WM8904_DC_SERVO_8;
                dcs_l_reg = WM8904_DC_SERVO_9;
+               an_out_reg = WM8904_ANALOGUE_OUT1_LEFT;
                dcs_l = 0;
                dcs_r = 1;
                break;
@@ -720,6 +722,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
                dcs_mask = WM8904_DCS_ENA_CHAN_2 | WM8904_DCS_ENA_CHAN_3;
                dcs_r_reg = WM8904_DC_SERVO_6;
                dcs_l_reg = WM8904_DC_SERVO_7;
+               an_out_reg = WM8904_ANALOGUE_OUT2_LEFT;
                dcs_l = 2;
                dcs_r = 3;
                break;
@@ -792,6 +795,10 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
                snd_soc_component_update_bits(component, reg,
                                    WM8904_HPL_ENA_OUTP | WM8904_HPR_ENA_OUTP,
                                    WM8904_HPL_ENA_OUTP | WM8904_HPR_ENA_OUTP);
+
+               /* Update volume, requires PGA to be powered */
+               val = snd_soc_component_read(component, an_out_reg);
+               snd_soc_component_write(component, an_out_reg, val);
                break;
 
        case SND_SOC_DAPM_POST_PMU:
index c836848..8d14b55 100644 (file)
@@ -121,11 +121,11 @@ static const struct snd_soc_dapm_route audio_map[] = {
 
 static const struct snd_soc_dapm_route audio_map_ac97[] = {
        /* 1st half -- Normal DAPM routes */
-       {"Playback",  NULL, "AC97 Playback"},
-       {"AC97 Capture",  NULL, "Capture"},
+       {"AC97 Playback",  NULL, "CPU AC97 Playback"},
+       {"CPU AC97 Capture",  NULL, "AC97 Capture"},
        /* 2nd half -- ASRC DAPM routes */
-       {"AC97 Playback",  NULL, "ASRC-Playback"},
-       {"ASRC-Capture",  NULL, "AC97 Capture"},
+       {"CPU AC97 Playback",  NULL, "ASRC-Playback"},
+       {"ASRC-Capture",  NULL, "CPU AC97 Capture"},
 };
 
 static const struct snd_soc_dapm_route audio_map_tx[] = {
index 7b17f15..94341e4 100644 (file)
@@ -315,21 +315,21 @@ static int hwvad_detected(struct snd_kcontrol *kcontrol,
 
 static const struct snd_kcontrol_new fsl_micfil_snd_controls[] = {
        SOC_SINGLE_SX_TLV("CH0 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(0), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(0), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH1 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(1), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(1), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH2 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(2), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(2), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH3 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(3), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(3), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH4 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(4), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(4), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH5 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(5), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(5), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH6 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(6), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(6), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH7 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(7), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(7), 0x8, 0xF, gain_tlv),
        SOC_ENUM_EXT("MICFIL Quality Select",
                     fsl_micfil_quality_enum,
                     micfil_quality_get, micfil_quality_set),
index c9e0e31..46a5355 100644 (file)
@@ -1189,14 +1189,14 @@ static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
        .symmetric_channels = 1,
        .probe = fsl_ssi_dai_probe,
        .playback = {
-               .stream_name = "AC97 Playback",
+               .stream_name = "CPU AC97 Playback",
                .channels_min = 2,
                .channels_max = 2,
                .rates = SNDRV_PCM_RATE_8000_48000,
                .formats = SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S20,
        },
        .capture = {
-               .stream_name = "AC97 Capture",
+               .stream_name = "CPU AC97 Capture",
                .channels_min = 2,
                .channels_max = 2,
                .rates = SNDRV_PCM_RATE_48000,
index a472de1..99308ed 100644 (file)
@@ -554,10 +554,12 @@ config SND_SOC_INTEL_SOF_NAU8825_MACH
        select SND_SOC_RT1015P
        select SND_SOC_MAX98373_I2C
        select SND_SOC_MAX98357A
+       select SND_SOC_NAU8315
        select SND_SOC_DMIC
        select SND_SOC_HDAC_HDMI
        select SND_SOC_INTEL_HDA_DSP_COMMON
        select SND_SOC_INTEL_SOF_MAXIM_COMMON
+       select SND_SOC_INTEL_SOF_REALTEK_COMMON
        help
           This adds support for ASoC machine driver for SOF platforms
           with nau8825 codec.
index 2788022..a800854 100644 (file)
@@ -48,6 +48,7 @@
 #define SOF_MAX98373_SPEAKER_AMP_PRESENT       BIT(15)
 #define SOF_MAX98360A_SPEAKER_AMP_PRESENT      BIT(16)
 #define SOF_RT1015P_SPEAKER_AMP_PRESENT        BIT(17)
+#define SOF_NAU8318_SPEAKER_AMP_PRESENT        BIT(18)
 
 static unsigned long sof_nau8825_quirk = SOF_NAU8825_SSP_CODEC(0);
 
@@ -338,6 +339,13 @@ static struct snd_soc_dai_link_component rt1019p_component[] = {
        }
 };
 
+static struct snd_soc_dai_link_component nau8318_components[] = {
+       {
+               .name = "NVTN2012:00",
+               .dai_name = "nau8315-hifi",
+       }
+};
+
 static struct snd_soc_dai_link_component dummy_component[] = {
        {
                .name = "snd-soc-dummy",
@@ -486,6 +494,11 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
                        max_98360a_dai_link(&links[id]);
                } else if (sof_nau8825_quirk & SOF_RT1015P_SPEAKER_AMP_PRESENT) {
                        sof_rt1015p_dai_link(&links[id]);
+               } else if (sof_nau8825_quirk &
+                               SOF_NAU8318_SPEAKER_AMP_PRESENT) {
+                       links[id].codecs = nau8318_components;
+                       links[id].num_codecs = ARRAY_SIZE(nau8318_components);
+                       links[id].init = speaker_codec_init;
                } else {
                        goto devm_err;
                }
@@ -618,7 +631,7 @@ static const struct platform_device_id board_ids[] = {
 
        },
        {
-               .name = "adl_rt1019p_nau8825",
+               .name = "adl_rt1019p_8825",
                .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
                                        SOF_SPEAKER_AMP_PRESENT |
                                        SOF_RT1019P_SPEAKER_AMP_PRESENT |
@@ -626,7 +639,7 @@ static const struct platform_device_id board_ids[] = {
                                        SOF_NAU8825_NUM_HDMIDEV(4)),
        },
        {
-               .name = "adl_max98373_nau8825",
+               .name = "adl_max98373_8825",
                .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
                                        SOF_SPEAKER_AMP_PRESENT |
                                        SOF_MAX98373_SPEAKER_AMP_PRESENT |
@@ -637,7 +650,7 @@ static const struct platform_device_id board_ids[] = {
        },
        {
                /* The limitation of length of char array, shorten the name */
-               .name = "adl_mx98360a_nau8825",
+               .name = "adl_mx98360a_8825",
                .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
                                        SOF_SPEAKER_AMP_PRESENT |
                                        SOF_MAX98360A_SPEAKER_AMP_PRESENT |
@@ -648,7 +661,7 @@ static const struct platform_device_id board_ids[] = {
 
        },
        {
-               .name = "adl_rt1015p_nau8825",
+               .name = "adl_rt1015p_8825",
                .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
                                        SOF_SPEAKER_AMP_PRESENT |
                                        SOF_RT1015P_SPEAKER_AMP_PRESENT |
@@ -657,6 +670,16 @@ static const struct platform_device_id board_ids[] = {
                                        SOF_BT_OFFLOAD_SSP(2) |
                                        SOF_SSP_BT_OFFLOAD_PRESENT),
        },
+       {
+               .name = "adl_nau8318_8825",
+               .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
+                                       SOF_SPEAKER_AMP_PRESENT |
+                                       SOF_NAU8318_SPEAKER_AMP_PRESENT |
+                                       SOF_NAU8825_SSP_AMP(1) |
+                                       SOF_NAU8825_NUM_HDMIDEV(4) |
+                                       SOF_BT_OFFLOAD_SSP(2) |
+                                       SOF_SSP_BT_OFFLOAD_PRESENT),
+       },
        { }
 };
 MODULE_DEVICE_TABLE(platform, board_ids);
index 60aee56..56ee5fe 100644 (file)
@@ -450,6 +450,11 @@ static const struct snd_soc_acpi_codecs adl_lt6911_hdmi = {
        .codecs = {"INTC10B0"}
 };
 
+static const struct snd_soc_acpi_codecs adl_nau8318_amp = {
+       .num_codecs = 1,
+       .codecs = {"NVTN2012"}
+};
+
 struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
        {
                .comp_ids = &adl_rt5682_rt5682s_hp,
@@ -474,21 +479,21 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
        },
        {
                .id = "10508825",
-               .drv_name = "adl_rt1019p_nau8825",
+               .drv_name = "adl_rt1019p_8825",
                .machine_quirk = snd_soc_acpi_codec_list,
                .quirk_data = &adl_rt1019p_amp,
                .sof_tplg_filename = "sof-adl-rt1019-nau8825.tplg",
        },
        {
                .id = "10508825",
-               .drv_name = "adl_max98373_nau8825",
+               .drv_name = "adl_max98373_8825",
                .machine_quirk = snd_soc_acpi_codec_list,
                .quirk_data = &adl_max98373_amp,
                .sof_tplg_filename = "sof-adl-max98373-nau8825.tplg",
        },
        {
                .id = "10508825",
-               .drv_name = "adl_mx98360a_nau8825",
+               .drv_name = "adl_mx98360a_8825",
                .machine_quirk = snd_soc_acpi_codec_list,
                .quirk_data = &adl_max98360a_amp,
                .sof_tplg_filename = "sof-adl-max98360a-nau8825.tplg",
@@ -502,13 +507,20 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
        },
        {
                .id = "10508825",
-               .drv_name = "adl_rt1015p_nau8825",
+               .drv_name = "adl_rt1015p_8825",
                .machine_quirk = snd_soc_acpi_codec_list,
                .quirk_data = &adl_rt1015p_amp,
                .sof_tplg_filename = "sof-adl-rt1015-nau8825.tplg",
        },
        {
                .id = "10508825",
+               .drv_name = "adl_nau8318_8825",
+               .machine_quirk = snd_soc_acpi_codec_list,
+               .quirk_data = &adl_nau8318_amp,
+               .sof_tplg_filename = "sof-adl-nau8318-nau8825.tplg",
+       },
+       {
+               .id = "10508825",
                .drv_name = "sof_nau8825",
                .sof_tplg_filename = "sof-adl-nau8825.tplg",
        },
index 31b4311..07f96a1 100644 (file)
@@ -203,6 +203,25 @@ static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link2_rt1316_link01_rt71
        {}
 };
 
+static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link2_rt1316_link01[] = {
+       {
+               .mask = BIT(2),
+               .num_adr = ARRAY_SIZE(rt711_sdca_2_adr),
+               .adr_d = rt711_sdca_2_adr,
+       },
+       {
+               .mask = BIT(0),
+               .num_adr = ARRAY_SIZE(rt1316_0_group2_adr),
+               .adr_d = rt1316_0_group2_adr,
+       },
+       {
+               .mask = BIT(1),
+               .num_adr = ARRAY_SIZE(rt1316_1_group2_adr),
+               .adr_d = rt1316_1_group2_adr,
+       },
+       {}
+};
+
 static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link0_rt1318_link12_rt714_link3[] = {
        {
                .mask = BIT(0),
@@ -227,6 +246,25 @@ static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link0_rt1318_link12_rt71
        {}
 };
 
+static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link0_rt1318_link12[] = {
+       {
+               .mask = BIT(0),
+               .num_adr = ARRAY_SIZE(rt711_sdca_0_adr),
+               .adr_d = rt711_sdca_0_adr,
+       },
+       {
+               .mask = BIT(1),
+               .num_adr = ARRAY_SIZE(rt1318_1_group1_adr),
+               .adr_d = rt1318_1_group1_adr,
+       },
+       {
+               .mask = BIT(2),
+               .num_adr = ARRAY_SIZE(rt1318_2_group1_adr),
+               .adr_d = rt1318_2_group1_adr,
+       },
+       {}
+};
+
 static const struct snd_soc_acpi_link_adr rpl_sdw_rt1316_link12_rt714_link0[] = {
        {
                .mask = BIT(1),
@@ -272,12 +310,24 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_sdw_machines[] = {
                .sof_tplg_filename = "sof-rpl-rt711-l0-rt1318-l12-rt714-l3.tplg",
        },
        {
+               .link_mask = 0x7, /* rt711 on link0 & two rt1318s on link1 and link2 */
+               .links = rpl_sdw_rt711_link0_rt1318_link12,
+               .drv_name = "sof_sdw",
+               .sof_tplg_filename = "sof-rpl-rt711-l0-rt1318-l12.tplg",
+       },
+       {
                .link_mask = 0x7, /* rt714 on link0 & two rt1316s on link1 and link2 */
                .links = rpl_sdw_rt1316_link12_rt714_link0,
                .drv_name = "sof_sdw",
                .sof_tplg_filename = "sof-rpl-rt1316-l12-rt714-l0.tplg",
        },
        {
+               .link_mask = 0x7, /* rt711 on link2 & two rt1316s on link0 and link1 */
+               .links = rpl_sdw_rt711_link2_rt1316_link01,
+               .drv_name = "sof_sdw",
+               .sof_tplg_filename = "sof-rpl-rt711-l2-rt1316-l01.tplg",
+       },
+       {
                .link_mask = 0x1, /* link0 required */
                .links = rpl_rvp,
                .drv_name = "sof_sdw",
index 363fa4d..b027fba 100644 (file)
@@ -182,10 +182,12 @@ config SND_SOC_MT8186_MT6366_DA7219_MAX98357
          If unsure select "N".
 
 config SND_SOC_MT8186_MT6366_RT1019_RT5682S
-       tristate "ASoC Audio driver for MT8186 with RT1019 RT5682S codec"
+       tristate "ASoC Audio driver for MT8186 with RT1019 RT5682S MAX98357A/MAX98360 codec"
        depends on I2C && GPIOLIB
        depends on SND_SOC_MT8186 && MTK_PMIC_WRAP
+       select SND_SOC_MAX98357A
        select SND_SOC_MT6358
+       select SND_SOC_MAX98357A
        select SND_SOC_RT1015P
        select SND_SOC_RT5682S
        select SND_SOC_BT_SCO
index 8f77a0b..af44e33 100644 (file)
@@ -1083,6 +1083,21 @@ static struct snd_soc_card mt8186_mt6366_rt1019_rt5682s_soc_card = {
        .num_configs = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_codec_conf),
 };
 
+static struct snd_soc_card mt8186_mt6366_rt5682s_max98360_soc_card = {
+       .name = "mt8186_rt5682s_max98360",
+       .owner = THIS_MODULE,
+       .dai_link = mt8186_mt6366_rt1019_rt5682s_dai_links,
+       .num_links = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_dai_links),
+       .controls = mt8186_mt6366_rt1019_rt5682s_controls,
+       .num_controls = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_controls),
+       .dapm_widgets = mt8186_mt6366_rt1019_rt5682s_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_widgets),
+       .dapm_routes = mt8186_mt6366_rt1019_rt5682s_routes,
+       .num_dapm_routes = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_routes),
+       .codec_conf = mt8186_mt6366_rt1019_rt5682s_codec_conf,
+       .num_configs = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_codec_conf),
+};
+
 static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
 {
        struct snd_soc_card *card;
@@ -1232,9 +1247,14 @@ err_adsp_node:
 
 #if IS_ENABLED(CONFIG_OF)
 static const struct of_device_id mt8186_mt6366_rt1019_rt5682s_dt_match[] = {
-       {       .compatible = "mediatek,mt8186-mt6366-rt1019-rt5682s-sound",
+       {
+               .compatible = "mediatek,mt8186-mt6366-rt1019-rt5682s-sound",
                .data = &mt8186_mt6366_rt1019_rt5682s_soc_card,
        },
+       {
+               .compatible = "mediatek,mt8186-mt6366-rt5682s-max98360-sound",
+               .data = &mt8186_mt6366_rt5682s_max98360_soc_card,
+       },
        {}
 };
 MODULE_DEVICE_TABLE(of, mt8186_mt6366_rt1019_rt5682s_dt_match);
index 96a6d47..e7b00d1 100644 (file)
@@ -2,7 +2,6 @@
 menuconfig SND_SOC_QCOM
        tristate "ASoC support for QCOM platforms"
        depends on ARCH_QCOM || COMPILE_TEST
-       imply SND_SOC_QCOM_COMMON
        help
          Say Y or M if you want to add support to use audio devices
          in Qualcomm Technologies SOC-based platforms.
@@ -60,14 +59,16 @@ config SND_SOC_STORM
 config SND_SOC_APQ8016_SBC
        tristate "SoC Audio support for APQ8016 SBC platforms"
        select SND_SOC_LPASS_APQ8016
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
        help
          Support for Qualcomm Technologies LPASS audio block in
          APQ8016 SOC-based systems.
          Say Y if you want to use audio devices on MI2S.
 
 config SND_SOC_QCOM_COMMON
-       depends on SOUNDWIRE
+       tristate
+
+config SND_SOC_QCOM_SDW
        tristate
 
 config SND_SOC_QDSP6_COMMON
@@ -144,7 +145,7 @@ config SND_SOC_MSM8996
        depends on QCOM_APR
        depends on COMMON_CLK
        select SND_SOC_QDSP6
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
        help
          Support for Qualcomm Technologies LPASS audio block in
          APQ8096 SoC-based systems.
@@ -155,7 +156,7 @@ config SND_SOC_SDM845
        depends on QCOM_APR && I2C && SOUNDWIRE
        depends on COMMON_CLK
        select SND_SOC_QDSP6
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
        select SND_SOC_RT5663
        select SND_SOC_MAX98927
        imply SND_SOC_CROS_EC_CODEC
@@ -169,7 +170,8 @@ config SND_SOC_SM8250
        depends on QCOM_APR && SOUNDWIRE
        depends on COMMON_CLK
        select SND_SOC_QDSP6
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_SDW
        help
          To add support for audio on Qualcomm Technologies Inc.
          SM8250 SoC-based systems.
@@ -180,7 +182,8 @@ config SND_SOC_SC8280XP
        depends on QCOM_APR && SOUNDWIRE
        depends on COMMON_CLK
        select SND_SOC_QDSP6
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_SDW
        help
          To add support for audio on Qualcomm Technologies Inc.
          SC8280XP SoC-based systems.
@@ -190,7 +193,7 @@ config SND_SOC_SC7180
        tristate "SoC Machine driver for SC7180 boards"
        depends on I2C && GPIOLIB
        depends on SOUNDWIRE || SOUNDWIRE=n
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
        select SND_SOC_LPASS_SC7180
        select SND_SOC_MAX98357A
        select SND_SOC_RT5682_I2C
@@ -204,7 +207,7 @@ config SND_SOC_SC7180
 config SND_SOC_SC7280
        tristate "SoC Machine driver for SC7280 boards"
        depends on I2C && SOUNDWIRE
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
        select SND_SOC_LPASS_SC7280
        select SND_SOC_MAX98357A
        select SND_SOC_WCD938X_SDW
index 8b97172..254350d 100644 (file)
@@ -28,6 +28,7 @@ snd-soc-sdm845-objs := sdm845.o
 snd-soc-sm8250-objs := sm8250.o
 snd-soc-sc8280xp-objs := sc8280xp.o
 snd-soc-qcom-common-objs := common.o
+snd-soc-qcom-sdw-objs := sdw.o
 
 obj-$(CONFIG_SND_SOC_STORM) += snd-soc-storm.o
 obj-$(CONFIG_SND_SOC_APQ8016_SBC) += snd-soc-apq8016-sbc.o
@@ -38,6 +39,7 @@ obj-$(CONFIG_SND_SOC_SC8280XP) += snd-soc-sc8280xp.o
 obj-$(CONFIG_SND_SOC_SDM845) += snd-soc-sdm845.o
 obj-$(CONFIG_SND_SOC_SM8250) += snd-soc-sm8250.o
 obj-$(CONFIG_SND_SOC_QCOM_COMMON) += snd-soc-qcom-common.o
+obj-$(CONFIG_SND_SOC_QCOM_SDW) += snd-soc-qcom-sdw.o
 
 #DSP lib
 obj-$(CONFIG_SND_SOC_QDSP6) += qdsp6/
index 49c74c1..96fe802 100644 (file)
@@ -180,120 +180,6 @@ err_put_np:
 }
 EXPORT_SYMBOL_GPL(qcom_snd_parse_of);
 
-int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
-                        struct sdw_stream_runtime *sruntime,
-                        bool *stream_prepared)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
-       int ret;
-
-       if (!sruntime)
-               return 0;
-
-       switch (cpu_dai->id) {
-       case WSA_CODEC_DMA_RX_0:
-       case WSA_CODEC_DMA_RX_1:
-       case RX_CODEC_DMA_RX_0:
-       case RX_CODEC_DMA_RX_1:
-       case TX_CODEC_DMA_TX_0:
-       case TX_CODEC_DMA_TX_1:
-       case TX_CODEC_DMA_TX_2:
-       case TX_CODEC_DMA_TX_3:
-               break;
-       default:
-               return 0;
-       }
-
-       if (*stream_prepared) {
-               sdw_disable_stream(sruntime);
-               sdw_deprepare_stream(sruntime);
-               *stream_prepared = false;
-       }
-
-       ret = sdw_prepare_stream(sruntime);
-       if (ret)
-               return ret;
-
-       /**
-        * NOTE: there is a strict hw requirement about the ordering of port
-        * enables and actual WSA881x PA enable. PA enable should only happen
-        * after soundwire ports are enabled if not DC on the line is
-        * accumulated resulting in Click/Pop Noise
-        * PA enable/mute are handled as part of codec DAPM and digital mute.
-        */
-
-       ret = sdw_enable_stream(sruntime);
-       if (ret) {
-               sdw_deprepare_stream(sruntime);
-               return ret;
-       }
-       *stream_prepared  = true;
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(qcom_snd_sdw_prepare);
-
-int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
-                          struct snd_pcm_hw_params *params,
-                          struct sdw_stream_runtime **psruntime)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_dai *codec_dai;
-       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
-       struct sdw_stream_runtime *sruntime;
-       int i;
-
-       switch (cpu_dai->id) {
-       case WSA_CODEC_DMA_RX_0:
-       case RX_CODEC_DMA_RX_0:
-       case RX_CODEC_DMA_RX_1:
-       case TX_CODEC_DMA_TX_0:
-       case TX_CODEC_DMA_TX_1:
-       case TX_CODEC_DMA_TX_2:
-       case TX_CODEC_DMA_TX_3:
-               for_each_rtd_codec_dais(rtd, i, codec_dai) {
-                       sruntime = snd_soc_dai_get_stream(codec_dai, substream->stream);
-                       if (sruntime != ERR_PTR(-ENOTSUPP))
-                               *psruntime = sruntime;
-               }
-               break;
-       }
-
-       return 0;
-
-}
-EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_params);
-
-int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
-                        struct sdw_stream_runtime *sruntime, bool *stream_prepared)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
-
-       switch (cpu_dai->id) {
-       case WSA_CODEC_DMA_RX_0:
-       case WSA_CODEC_DMA_RX_1:
-       case RX_CODEC_DMA_RX_0:
-       case RX_CODEC_DMA_RX_1:
-       case TX_CODEC_DMA_TX_0:
-       case TX_CODEC_DMA_TX_1:
-       case TX_CODEC_DMA_TX_2:
-       case TX_CODEC_DMA_TX_3:
-               if (sruntime && *stream_prepared) {
-                       sdw_disable_stream(sruntime);
-                       sdw_deprepare_stream(sruntime);
-                       *stream_prepared = false;
-               }
-               break;
-       default:
-               break;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_free);
-
 int qcom_snd_wcd_jack_setup(struct snd_soc_pcm_runtime *rtd,
                            struct snd_soc_jack *jack, bool *jack_setup)
 {
index 3ef5bb6..d7f80ee 100644 (file)
@@ -5,19 +5,9 @@
 #define __QCOM_SND_COMMON_H__
 
 #include <sound/soc.h>
-#include <linux/soundwire/sdw.h>
 
 int qcom_snd_parse_of(struct snd_soc_card *card);
 int qcom_snd_wcd_jack_setup(struct snd_soc_pcm_runtime *rtd,
                            struct snd_soc_jack *jack, bool *jack_setup);
 
-int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
-                        struct sdw_stream_runtime *runtime,
-                        bool *stream_prepared);
-int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
-                          struct snd_pcm_hw_params *params,
-                          struct sdw_stream_runtime **psruntime);
-int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
-                        struct sdw_stream_runtime *sruntime,
-                        bool *stream_prepared);
 #endif
index 5435384..dbdaaa8 100644 (file)
@@ -1037,10 +1037,11 @@ static void of_lpass_cpu_parse_dai_data(struct device *dev,
                                        struct lpass_data *data)
 {
        struct device_node *node;
-       int ret, id;
+       int ret, i, id;
 
        /* Allow all channels by default for backwards compatibility */
-       for (id = 0; id < data->variant->num_dai; id++) {
+       for (i = 0; i < data->variant->num_dai; i++) {
+               id = data->variant->dai_driver[i].id;
                data->mi2s_playback_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
                data->mi2s_capture_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
        }
index ade44ad..14d9fea 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/input-event-codes.h>
 #include "qdsp6/q6afe.h"
 #include "common.h"
+#include "sdw.h"
 
 #define DRIVER_NAME            "sc8280xp"
 
diff --git a/sound/soc/qcom/sdw.c b/sound/soc/qcom/sdw.c
new file mode 100644 (file)
index 0000000..1024951
--- /dev/null
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, Linaro Limited.
+// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+
+#include <linux/module.h>
+#include <sound/soc.h>
+#include "qdsp6/q6afe.h"
+#include "sdw.h"
+
+int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
+                        struct sdw_stream_runtime *sruntime,
+                        bool *stream_prepared)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+       int ret;
+
+       if (!sruntime)
+               return 0;
+
+       switch (cpu_dai->id) {
+       case WSA_CODEC_DMA_RX_0:
+       case WSA_CODEC_DMA_RX_1:
+       case RX_CODEC_DMA_RX_0:
+       case RX_CODEC_DMA_RX_1:
+       case TX_CODEC_DMA_TX_0:
+       case TX_CODEC_DMA_TX_1:
+       case TX_CODEC_DMA_TX_2:
+       case TX_CODEC_DMA_TX_3:
+               break;
+       default:
+               return 0;
+       }
+
+       if (*stream_prepared) {
+               sdw_disable_stream(sruntime);
+               sdw_deprepare_stream(sruntime);
+               *stream_prepared = false;
+       }
+
+       ret = sdw_prepare_stream(sruntime);
+       if (ret)
+               return ret;
+
+       /**
+        * NOTE: there is a strict hw requirement about the ordering of port
+        * enables and actual WSA881x PA enable. PA enable should only happen
+        * after soundwire ports are enabled if not DC on the line is
+        * accumulated resulting in Click/Pop Noise
+        * PA enable/mute are handled as part of codec DAPM and digital mute.
+        */
+
+       ret = sdw_enable_stream(sruntime);
+       if (ret) {
+               sdw_deprepare_stream(sruntime);
+               return ret;
+       }
+       *stream_prepared  = true;
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_snd_sdw_prepare);
+
+int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
+                          struct snd_pcm_hw_params *params,
+                          struct sdw_stream_runtime **psruntime)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_dai *codec_dai;
+       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+       struct sdw_stream_runtime *sruntime;
+       int i;
+
+       switch (cpu_dai->id) {
+       case WSA_CODEC_DMA_RX_0:
+       case RX_CODEC_DMA_RX_0:
+       case RX_CODEC_DMA_RX_1:
+       case TX_CODEC_DMA_TX_0:
+       case TX_CODEC_DMA_TX_1:
+       case TX_CODEC_DMA_TX_2:
+       case TX_CODEC_DMA_TX_3:
+               for_each_rtd_codec_dais(rtd, i, codec_dai) {
+                       sruntime = snd_soc_dai_get_stream(codec_dai, substream->stream);
+                       if (sruntime != ERR_PTR(-ENOTSUPP))
+                               *psruntime = sruntime;
+               }
+               break;
+       }
+
+       return 0;
+
+}
+EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_params);
+
+int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
+                        struct sdw_stream_runtime *sruntime, bool *stream_prepared)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+
+       switch (cpu_dai->id) {
+       case WSA_CODEC_DMA_RX_0:
+       case WSA_CODEC_DMA_RX_1:
+       case RX_CODEC_DMA_RX_0:
+       case RX_CODEC_DMA_RX_1:
+       case TX_CODEC_DMA_TX_0:
+       case TX_CODEC_DMA_TX_1:
+       case TX_CODEC_DMA_TX_2:
+       case TX_CODEC_DMA_TX_3:
+               if (sruntime && *stream_prepared) {
+                       sdw_disable_stream(sruntime);
+                       sdw_deprepare_stream(sruntime);
+                       *stream_prepared = false;
+               }
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_free);
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/qcom/sdw.h b/sound/soc/qcom/sdw.h
new file mode 100644 (file)
index 0000000..d74cbb8
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+
+#ifndef __QCOM_SND_SDW_H__
+#define __QCOM_SND_SDW_H__
+
+#include <linux/soundwire/sdw.h>
+
+int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
+                        struct sdw_stream_runtime *runtime,
+                        bool *stream_prepared);
+int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
+                          struct snd_pcm_hw_params *params,
+                          struct sdw_stream_runtime **psruntime);
+int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
+                        struct sdw_stream_runtime *sruntime,
+                        bool *stream_prepared);
+#endif
index 8dbe9ef..9626a9e 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/input-event-codes.h>
 #include "qdsp6/q6afe.h"
 #include "common.h"
+#include "sdw.h"
 
 #define DRIVER_NAME            "sm8250"
 #define MI2S_BCLK_RATE         1536000
index d9a3ce7..ade0507 100644 (file)
@@ -353,7 +353,9 @@ int snd_sof_dbg_init(struct snd_sof_dev *sdev)
                        return err;
        }
 
-       return 0;
+       return snd_sof_debugfs_buf_item(sdev, &sdev->fw_state,
+                                       sizeof(sdev->fw_state),
+                                       "fw_state", 0444);
 }
 EXPORT_SYMBOL_GPL(snd_sof_dbg_init);
 
index df740be..8722bbd 100644 (file)
@@ -182,7 +182,7 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
        const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
        const struct sof_ipc_tplg_ops *tplg_ops = sdev->ipc->ops->tplg;
        pm_message_t pm_state;
-       u32 target_state = 0;
+       u32 target_state = snd_sof_dsp_power_target(sdev);
        int ret;
 
        /* do nothing if dsp suspend callback is not set */
@@ -192,6 +192,9 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
        if (runtime_suspend && !sof_ops(sdev)->runtime_suspend)
                return 0;
 
+       if (tplg_ops && tplg_ops->tear_down_all_pipelines)
+               tplg_ops->tear_down_all_pipelines(sdev, false);
+
        if (sdev->fw_state != SOF_FW_BOOT_COMPLETE)
                goto suspend;
 
@@ -206,7 +209,6 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
                }
        }
 
-       target_state = snd_sof_dsp_power_target(sdev);
        pm_state.event = target_state;
 
        /* Skip to platform-specific suspend if DSP is entering D0 */
@@ -217,9 +219,6 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
                goto suspend;
        }
 
-       if (tplg_ops->tear_down_all_pipelines)
-               tplg_ops->tear_down_all_pipelines(sdev, false);
-
        /* suspend DMA trace */
        sof_fw_trace_suspend(sdev, pm_state);
 
index 41ac718..4727043 100644 (file)
@@ -471,7 +471,7 @@ snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip,
        subs = find_matching_substream(chip, stream, target->sync_ep,
                                       target->fmt_type);
        if (!subs)
-               return sync_fmt;
+               goto end;
 
        high_score = 0;
        list_for_each_entry(fp, &subs->fmt_list, list) {
@@ -485,6 +485,7 @@ snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip,
                }
        }
 
+ end:
        if (fixed_rate)
                *fixed_rate = snd_usb_pcm_has_fixed_rate(subs);
        return sync_fmt;
index 99a66d0..d959da7 100644 (file)
@@ -160,9 +160,12 @@ find_substream_format(struct snd_usb_substream *subs,
 bool snd_usb_pcm_has_fixed_rate(struct snd_usb_substream *subs)
 {
        const struct audioformat *fp;
-       struct snd_usb_audio *chip = subs->stream->chip;
+       struct snd_usb_audio *chip;
        int rate = -1;
 
+       if (!subs)
+               return false;
+       chip = subs->stream->chip;
        if (!(chip->quirk_flags & QUIRK_FLAG_FIXED_RATE))
                return false;
        list_for_each_entry(fp, &subs->fmt_list, list) {
@@ -525,6 +528,8 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
                if (snd_usb_endpoint_compatible(chip, subs->data_endpoint,
                                                fmt, hw_params))
                        goto unlock;
+               if (stop_endpoints(subs, false))
+                       sync_pending_stops(subs);
                close_endpoints(chip, subs);
        }
 
@@ -787,11 +792,27 @@ static int apply_hw_params_minmax(struct snd_interval *it, unsigned int rmin,
        return changed;
 }
 
+/* get the specified endpoint object that is being used by other streams
+ * (i.e. the parameter is locked)
+ */
+static const struct snd_usb_endpoint *
+get_endpoint_in_use(struct snd_usb_audio *chip, int endpoint,
+                   const struct snd_usb_endpoint *ref_ep)
+{
+       const struct snd_usb_endpoint *ep;
+
+       ep = snd_usb_get_endpoint(chip, endpoint);
+       if (ep && ep->cur_audiofmt && (ep != ref_ep || ep->opened > 1))
+               return ep;
+       return NULL;
+}
+
 static int hw_rule_rate(struct snd_pcm_hw_params *params,
                        struct snd_pcm_hw_rule *rule)
 {
        struct snd_usb_substream *subs = rule->private;
        struct snd_usb_audio *chip = subs->stream->chip;
+       const struct snd_usb_endpoint *ep;
        const struct audioformat *fp;
        struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
        unsigned int rmin, rmax, r;
@@ -803,6 +824,29 @@ static int hw_rule_rate(struct snd_pcm_hw_params *params,
        list_for_each_entry(fp, &subs->fmt_list, list) {
                if (!hw_check_valid_format(subs, params, fp))
                        continue;
+
+               ep = get_endpoint_in_use(chip, fp->endpoint,
+                                        subs->data_endpoint);
+               if (ep) {
+                       hwc_debug("rate limit %d for ep#%x\n",
+                                 ep->cur_rate, fp->endpoint);
+                       rmin = min(rmin, ep->cur_rate);
+                       rmax = max(rmax, ep->cur_rate);
+                       continue;
+               }
+
+               if (fp->implicit_fb) {
+                       ep = get_endpoint_in_use(chip, fp->sync_ep,
+                                                subs->sync_endpoint);
+                       if (ep) {
+                               hwc_debug("rate limit %d for sync_ep#%x\n",
+                                         ep->cur_rate, fp->sync_ep);
+                               rmin = min(rmin, ep->cur_rate);
+                               rmax = max(rmax, ep->cur_rate);
+                               continue;
+                       }
+               }
+
                r = snd_usb_endpoint_get_clock_rate(chip, fp->clock);
                if (r > 0) {
                        if (!snd_interval_test(it, r))
@@ -872,6 +916,8 @@ static int hw_rule_format(struct snd_pcm_hw_params *params,
                          struct snd_pcm_hw_rule *rule)
 {
        struct snd_usb_substream *subs = rule->private;
+       struct snd_usb_audio *chip = subs->stream->chip;
+       const struct snd_usb_endpoint *ep;
        const struct audioformat *fp;
        struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
        u64 fbits;
@@ -881,6 +927,27 @@ static int hw_rule_format(struct snd_pcm_hw_params *params,
        list_for_each_entry(fp, &subs->fmt_list, list) {
                if (!hw_check_valid_format(subs, params, fp))
                        continue;
+
+               ep = get_endpoint_in_use(chip, fp->endpoint,
+                                        subs->data_endpoint);
+               if (ep) {
+                       hwc_debug("format limit %d for ep#%x\n",
+                                 ep->cur_format, fp->endpoint);
+                       fbits |= pcm_format_to_bits(ep->cur_format);
+                       continue;
+               }
+
+               if (fp->implicit_fb) {
+                       ep = get_endpoint_in_use(chip, fp->sync_ep,
+                                                subs->sync_endpoint);
+                       if (ep) {
+                               hwc_debug("format limit %d for sync_ep#%x\n",
+                                         ep->cur_format, fp->sync_ep);
+                               fbits |= pcm_format_to_bits(ep->cur_format);
+                               continue;
+                       }
+               }
+
                fbits |= fp->formats;
        }
        return apply_hw_params_format_bits(fmt, fbits);
@@ -913,98 +980,95 @@ static int hw_rule_period_time(struct snd_pcm_hw_params *params,
        return apply_hw_params_minmax(it, pmin, UINT_MAX);
 }
 
-/* get the EP or the sync EP for implicit fb when it's already set up */
-static const struct snd_usb_endpoint *
-get_sync_ep_from_substream(struct snd_usb_substream *subs)
-{
-       struct snd_usb_audio *chip = subs->stream->chip;
-       const struct audioformat *fp;
-       const struct snd_usb_endpoint *ep;
-
-       list_for_each_entry(fp, &subs->fmt_list, list) {
-               ep = snd_usb_get_endpoint(chip, fp->endpoint);
-               if (ep && ep->cur_audiofmt) {
-                       /* if EP is already opened solely for this substream,
-                        * we still allow us to change the parameter; otherwise
-                        * this substream has to follow the existing parameter
-                        */
-                       if (ep->cur_audiofmt != subs->cur_audiofmt || ep->opened > 1)
-                               return ep;
-               }
-               if (!fp->implicit_fb)
-                       continue;
-               /* for the implicit fb, check the sync ep as well */
-               ep = snd_usb_get_endpoint(chip, fp->sync_ep);
-               if (ep && ep->cur_audiofmt)
-                       return ep;
-       }
-       return NULL;
-}
-
 /* additional hw constraints for implicit feedback mode */
-static int hw_rule_format_implicit_fb(struct snd_pcm_hw_params *params,
-                                     struct snd_pcm_hw_rule *rule)
-{
-       struct snd_usb_substream *subs = rule->private;
-       const struct snd_usb_endpoint *ep;
-       struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
-
-       ep = get_sync_ep_from_substream(subs);
-       if (!ep)
-               return 0;
-
-       hwc_debug("applying %s\n", __func__);
-       return apply_hw_params_format_bits(fmt, pcm_format_to_bits(ep->cur_format));
-}
-
-static int hw_rule_rate_implicit_fb(struct snd_pcm_hw_params *params,
-                                   struct snd_pcm_hw_rule *rule)
-{
-       struct snd_usb_substream *subs = rule->private;
-       const struct snd_usb_endpoint *ep;
-       struct snd_interval *it;
-
-       ep = get_sync_ep_from_substream(subs);
-       if (!ep)
-               return 0;
-
-       hwc_debug("applying %s\n", __func__);
-       it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
-       return apply_hw_params_minmax(it, ep->cur_rate, ep->cur_rate);
-}
-
 static int hw_rule_period_size_implicit_fb(struct snd_pcm_hw_params *params,
                                           struct snd_pcm_hw_rule *rule)
 {
        struct snd_usb_substream *subs = rule->private;
+       struct snd_usb_audio *chip = subs->stream->chip;
+       const struct audioformat *fp;
        const struct snd_usb_endpoint *ep;
        struct snd_interval *it;
+       unsigned int rmin, rmax;
 
-       ep = get_sync_ep_from_substream(subs);
-       if (!ep)
-               return 0;
-
-       hwc_debug("applying %s\n", __func__);
        it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
-       return apply_hw_params_minmax(it, ep->cur_period_frames,
-                                     ep->cur_period_frames);
+       hwc_debug("hw_rule_period_size: (%u,%u)\n", it->min, it->max);
+       rmin = UINT_MAX;
+       rmax = 0;
+       list_for_each_entry(fp, &subs->fmt_list, list) {
+               if (!hw_check_valid_format(subs, params, fp))
+                       continue;
+               ep = get_endpoint_in_use(chip, fp->endpoint,
+                                        subs->data_endpoint);
+               if (ep) {
+                       hwc_debug("period size limit %d for ep#%x\n",
+                                 ep->cur_period_frames, fp->endpoint);
+                       rmin = min(rmin, ep->cur_period_frames);
+                       rmax = max(rmax, ep->cur_period_frames);
+                       continue;
+               }
+
+               if (fp->implicit_fb) {
+                       ep = get_endpoint_in_use(chip, fp->sync_ep,
+                                                subs->sync_endpoint);
+                       if (ep) {
+                               hwc_debug("period size limit %d for sync_ep#%x\n",
+                                         ep->cur_period_frames, fp->sync_ep);
+                               rmin = min(rmin, ep->cur_period_frames);
+                               rmax = max(rmax, ep->cur_period_frames);
+                               continue;
+                       }
+               }
+       }
+
+       if (!rmax)
+               return 0; /* no limit by implicit fb */
+       return apply_hw_params_minmax(it, rmin, rmax);
 }
 
 static int hw_rule_periods_implicit_fb(struct snd_pcm_hw_params *params,
                                       struct snd_pcm_hw_rule *rule)
 {
        struct snd_usb_substream *subs = rule->private;
+       struct snd_usb_audio *chip = subs->stream->chip;
+       const struct audioformat *fp;
        const struct snd_usb_endpoint *ep;
        struct snd_interval *it;
+       unsigned int rmin, rmax;
 
-       ep = get_sync_ep_from_substream(subs);
-       if (!ep)
-               return 0;
-
-       hwc_debug("applying %s\n", __func__);
        it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIODS);
-       return apply_hw_params_minmax(it, ep->cur_buffer_periods,
-                                     ep->cur_buffer_periods);
+       hwc_debug("hw_rule_periods: (%u,%u)\n", it->min, it->max);
+       rmin = UINT_MAX;
+       rmax = 0;
+       list_for_each_entry(fp, &subs->fmt_list, list) {
+               if (!hw_check_valid_format(subs, params, fp))
+                       continue;
+               ep = get_endpoint_in_use(chip, fp->endpoint,
+                                        subs->data_endpoint);
+               if (ep) {
+                       hwc_debug("periods limit %d for ep#%x\n",
+                                 ep->cur_buffer_periods, fp->endpoint);
+                       rmin = min(rmin, ep->cur_buffer_periods);
+                       rmax = max(rmax, ep->cur_buffer_periods);
+                       continue;
+               }
+
+               if (fp->implicit_fb) {
+                       ep = get_endpoint_in_use(chip, fp->sync_ep,
+                                                subs->sync_endpoint);
+                       if (ep) {
+                               hwc_debug("periods limit %d for sync_ep#%x\n",
+                                         ep->cur_buffer_periods, fp->sync_ep);
+                               rmin = min(rmin, ep->cur_buffer_periods);
+                               rmax = max(rmax, ep->cur_buffer_periods);
+                               continue;
+                       }
+               }
+       }
+
+       if (!rmax)
+               return 0; /* no limit by implicit fb */
+       return apply_hw_params_minmax(it, rmin, rmax);
 }
 
 /*
@@ -1113,16 +1177,6 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
                return err;
 
        /* additional hw constraints for implicit fb */
-       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
-                                 hw_rule_format_implicit_fb, subs,
-                                 SNDRV_PCM_HW_PARAM_FORMAT, -1);
-       if (err < 0)
-               return err;
-       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
-                                 hw_rule_rate_implicit_fb, subs,
-                                 SNDRV_PCM_HW_PARAM_RATE, -1);
-       if (err < 0)
-               return err;
        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
                                  hw_rule_period_size_implicit_fb, subs,
                                  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
index f75601c..f10f4e6 100644 (file)
@@ -1222,6 +1222,12 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip,
                        if (err < 0)
                                return err;
                }
+
+               /* try to set the interface... */
+               usb_set_interface(chip->dev, iface_no, 0);
+               snd_usb_init_pitch(chip, fp);
+               snd_usb_init_sample_rate(chip, fp, fp->rate_max);
+               usb_set_interface(chip->dev, iface_no, altno);
        }
        return 0;
 }
index abc4186..683ca3a 100644 (file)
@@ -41,7 +41,7 @@
        (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
 
 #define MIDR_CPU_MODEL(imp, partnum) \
-       (((imp)                 << MIDR_IMPLEMENTOR_SHIFT) | \
+       ((_AT(u32, imp)         << MIDR_IMPLEMENTOR_SHIFT) | \
        (0xf                    << MIDR_ARCHITECTURE_SHIFT) | \
        ((partnum)              << MIDR_PARTNUM_SHIFT))
 
@@ -80,6 +80,7 @@
 #define ARM_CPU_PART_CORTEX_X1         0xD44
 #define ARM_CPU_PART_CORTEX_A510       0xD46
 #define ARM_CPU_PART_CORTEX_A710       0xD47
+#define ARM_CPU_PART_CORTEX_A715       0xD4D
 #define ARM_CPU_PART_CORTEX_X2         0xD48
 #define ARM_CPU_PART_NEOVERSE_N2       0xD49
 #define ARM_CPU_PART_CORTEX_A78C       0xD4B
 #define APPLE_CPU_PART_M1_FIRESTORM_PRO        0x025
 #define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028
 #define APPLE_CPU_PART_M1_FIRESTORM_MAX        0x029
+#define APPLE_CPU_PART_M2_BLIZZARD     0x032
+#define APPLE_CPU_PART_M2_AVALANCHE    0x033
 
 #define AMPERE_CPU_PART_AMPERE1                0xAC3
 
 #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
 #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
 #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
+#define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
 #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
 #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
 #define MIDR_CORTEX_A78C       MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
 #define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO)
 #define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX)
 #define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
+#define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD)
+#define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE)
 #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
 
 /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
index 316917b..a7a857f 100644 (file)
@@ -43,6 +43,7 @@
 #define __KVM_HAVE_VCPU_EVENTS
 
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_DIRTY_LOG_PAGE_OFFSET 64
 
 #define KVM_REG_SIZE(id)                                               \
        (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
index 649e50a..e48deab 100644 (file)
@@ -206,6 +206,8 @@ struct kvm_msr_list {
 struct kvm_msr_filter_range {
 #define KVM_MSR_FILTER_READ  (1 << 0)
 #define KVM_MSR_FILTER_WRITE (1 << 1)
+#define KVM_MSR_FILTER_RANGE_VALID_MASK (KVM_MSR_FILTER_READ | \
+                                        KVM_MSR_FILTER_WRITE)
        __u32 flags;
        __u32 nmsrs; /* number of msrs in bitmap */
        __u32 base;  /* MSR index the bitmap starts at */
@@ -214,8 +216,11 @@ struct kvm_msr_filter_range {
 
 #define KVM_MSR_FILTER_MAX_RANGES 16
 struct kvm_msr_filter {
+#ifndef __KERNEL__
 #define KVM_MSR_FILTER_DEFAULT_ALLOW (0 << 0)
+#endif
 #define KVM_MSR_FILTER_DEFAULT_DENY  (1 << 0)
+#define KVM_MSR_FILTER_VALID_MASK (KVM_MSR_FILTER_DEFAULT_DENY)
        __u32 flags;
        struct kvm_msr_filter_range ranges[KVM_MSR_FILTER_MAX_RANGES];
 };
index cc7070c..b4898ff 100644 (file)
 #define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
 #endif // static_assert
 
+
+/*
+ * Compile time check that field has an expected offset
+ */
+#define ASSERT_STRUCT_OFFSET(type, field, expected_offset)     \
+       BUILD_BUG_ON_MSG(offsetof(type, field) != (expected_offset),    \
+               "Offset of " #field " in " #type " has changed.")
+
+
 #endif /* _LINUX_BUILD_BUG_H */
index 20522d4..55155e2 100644 (file)
@@ -1767,6 +1767,7 @@ struct kvm_xen_hvm_attr {
                __u8 runstate_update_flag;
                struct {
                        __u64 gfn;
+#define KVM_XEN_INVALID_GFN ((__u64)-1)
                } shared_info;
                struct {
                        __u32 send_port;
@@ -1798,6 +1799,7 @@ struct kvm_xen_hvm_attr {
        } u;
 };
 
+
 /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
 #define KVM_XEN_ATTR_TYPE_LONG_MODE            0x0
 #define KVM_XEN_ATTR_TYPE_SHARED_INFO          0x1
@@ -1823,6 +1825,7 @@ struct kvm_xen_vcpu_attr {
        __u16 pad[3];
        union {
                __u64 gpa;
+#define KVM_XEN_INVALID_GPA ((__u64)-1)
                __u64 pad[8];
                struct {
                        __u64 state;
index f05670d..aaf8511 100755 (executable)
@@ -77,7 +77,20 @@ check()
        file=${build_id_dir}/.build-id/${id:0:2}/`readlink ${link}`/elf
        echo "file: ${file}"
 
-       if [ ! -x $file ]; then
+       # Check for file permission of original file
+       # in case of pe-file.exe file
+       echo $1 | grep ".exe"
+       if [ $? -eq 0 ]; then
+               if [ -x $1  -a ! -x $file ]; then
+                       echo "failed: file ${file} executable does not exist"
+                       exit 1
+               fi
+
+               if [ ! -x $file -a ! -e $file ]; then
+                       echo "failed: file ${file} does not exist"
+                       exit 1
+               fi
+       elif [ ! -x $file ]; then
                echo "failed: file ${file} does not exist"
                exit 1
        fi
index de3701a..13c3a23 100644 (file)
@@ -33,7 +33,10 @@ typedef __kernel_sa_family_t sa_family_t;
 
 struct sockaddr {
        sa_family_t     sa_family;      /* address family, AF_xxx       */
-       char            sa_data[14];    /* 14 bytes of protocol address */
+       union {
+               char sa_data_min[14];           /* Minimum 14 bytes of protocol address */
+               DECLARE_FLEX_ARRAY(char, sa_data);
+       };
 };
 
 struct linger {
index a839b30..ea9c083 100644 (file)
@@ -715,9 +715,13 @@ build_id_cache__add(const char *sbuild_id, const char *name, const char *realnam
                } else if (nsi && nsinfo__need_setns(nsi)) {
                        if (copyfile_ns(name, filename, nsi))
                                goto out_free;
-               } else if (link(realname, filename) && errno != EEXIST &&
-                               copyfile(name, filename))
-                       goto out_free;
+               } else if (link(realname, filename) && errno != EEXIST) {
+                       struct stat f_stat;
+
+                       if (!(stat(name, &f_stat) < 0) &&
+                                       copyfile_mode(name, filename, f_stat.st_mode))
+                               goto out_free;
+               }
        }
 
        /* Some binaries are stripped, but have .debug files with their symbol
index 0168a96..d47de5f 100644 (file)
@@ -42,8 +42,11 @@ static char *normalize(char *str, int runtime)
        char *dst = str;
 
        while (*str) {
-               if (*str == '\\')
+               if (*str == '\\') {
                        *dst++ = *++str;
+                       if (!*str)
+                               break;
+               }
                else if (*str == '?') {
                        char *paramval;
                        int i = 0;
index fdb7f5d..85973e5 100644 (file)
@@ -15,6 +15,10 @@ bool mirrored_kernelcore = false;
 
 struct page {};
 
+void __free_pages_core(struct page *page, unsigned int order)
+{
+}
+
 void memblock_free_pages(struct page *page, unsigned long pfn,
                         unsigned int order)
 {
index ea0978f..251794f 100644 (file)
@@ -241,7 +241,7 @@ int main(int argc, char **argv)
        while ((opt = getopt(argc, argv, "hp:t:r")) != -1) {
                switch (opt) {
                case 'p':
-                       reclaim_period_ms = atoi_non_negative("Reclaim period", optarg);
+                       reclaim_period_ms = atoi_positive("Reclaim period", optarg);
                        break;
                case 't':
                        token = atoi_paranoid(optarg);
index dae510c..13c75dc 100644 (file)
@@ -434,6 +434,7 @@ static void *juggle_shinfo_state(void *arg)
 int main(int argc, char *argv[])
 {
        struct timespec min_ts, max_ts, vm_ts;
+       struct kvm_xen_hvm_attr evt_reset;
        struct kvm_vm *vm;
        pthread_t thread;
        bool verbose;
@@ -962,10 +963,8 @@ int main(int argc, char *argv[])
        }
 
  done:
-       struct kvm_xen_hvm_attr evt_reset = {
-               .type = KVM_XEN_ATTR_TYPE_EVTCHN,
-               .u.evtchn.flags = KVM_XEN_EVTCHN_RESET,
-       };
+       evt_reset.type = KVM_XEN_ATTR_TYPE_EVTCHN;
+       evt_reset.u.evtchn.flags = KVM_XEN_EVTCHN_RESET;
        vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &evt_reset);
 
        alarm(0);
index 291144c..f7900e7 100644 (file)
@@ -20,7 +20,7 @@ CLANG_TARGET_FLAGS              := $(CLANG_TARGET_FLAGS_$(ARCH))
 
 ifeq ($(CROSS_COMPILE),)
 ifeq ($(CLANG_TARGET_FLAGS),)
-$(error Specify CROSS_COMPILE or add '--target=' option to lib.mk
+$(error Specify CROSS_COMPILE or add '--target=' option to lib.mk)
 else
 CLANG_FLAGS     += --target=$(CLANG_TARGET_FLAGS)
 endif # CLANG_TARGET_FLAGS
index 90026a2..9ba0316 100644 (file)
@@ -215,7 +215,7 @@ static char *recv_frame(const struct ring_state *ring, char *frame)
 }
 
 /* A single TPACKET_V3 block can hold multiple frames */
-static void recv_block(struct ring_state *ring)
+static bool recv_block(struct ring_state *ring)
 {
        struct tpacket_block_desc *block;
        char *frame;
@@ -223,7 +223,7 @@ static void recv_block(struct ring_state *ring)
 
        block = (void *)(ring->mmap + ring->idx * ring_block_sz);
        if (!(block->hdr.bh1.block_status & TP_STATUS_USER))
-               return;
+               return false;
 
        frame = (char *)block;
        frame += block->hdr.bh1.offset_to_first_pkt;
@@ -235,6 +235,8 @@ static void recv_block(struct ring_state *ring)
 
        block->hdr.bh1.block_status = TP_STATUS_KERNEL;
        ring->idx = (ring->idx + 1) % ring_block_nr;
+
+       return true;
 }
 
 /* simple test: sleep once unconditionally and then process all rings */
@@ -245,7 +247,7 @@ static void process_rings(void)
        usleep(1000 * cfg_timeout_msec);
 
        for (i = 0; i < num_cpus; i++)
-               recv_block(&rings[i]);
+               do {} while (recv_block(&rings[i]));
 
        fprintf(stderr, "count: pass=%u nohash=%u fail=%u\n",
                frames_received - frames_nohash - frames_error,
@@ -257,12 +259,12 @@ static char *setup_ring(int fd)
        struct tpacket_req3 req3 = {0};
        void *ring;
 
-       req3.tp_retire_blk_tov = cfg_timeout_msec;
+       req3.tp_retire_blk_tov = cfg_timeout_msec / 8;
        req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH;
 
        req3.tp_frame_size = 2048;
        req3.tp_frame_nr = 1 << 10;
-       req3.tp_block_nr = 2;
+       req3.tp_block_nr = 16;
 
        req3.tp_block_size = req3.tp_frame_size * req3.tp_frame_nr;
        req3.tp_block_size /= req3.tp_block_nr;
index d95b1cb..7588428 100644 (file)
@@ -25,6 +25,7 @@
 #undef NDEBUG
 #include <assert.h>
 #include <errno.h>
+#include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -41,7 +42,7 @@
  * 1: vsyscall VMA is --xp             vsyscall=xonly
  * 2: vsyscall VMA is r-xp             vsyscall=emulate
  */
-static int g_vsyscall;
+static volatile int g_vsyscall;
 static const char *g_proc_pid_maps_vsyscall;
 static const char *g_proc_pid_smaps_vsyscall;
 
@@ -147,11 +148,12 @@ static void vsyscall(void)
 
                g_vsyscall = 0;
                /* gettimeofday(NULL, NULL); */
+               uint64_t rax = 0xffffffffff600000;
                asm volatile (
-                       "call %P0"
-                       :
-                       : "i" (0xffffffffff600000), "D" (NULL), "S" (NULL)
-                       : "rax", "rcx", "r11"
+                       "call *%[rax]"
+                       : [rax] "+a" (rax)
+                       : "D" (NULL), "S" (NULL)
+                       : "rcx", "r11"
                );
 
                g_vsyscall = 1;
index 69551bf..cacbd2a 100644 (file)
@@ -257,11 +257,12 @@ static void vsyscall(void)
 
                g_vsyscall = 0;
                /* gettimeofday(NULL, NULL); */
+               uint64_t rax = 0xffffffffff600000;
                asm volatile (
-                       "call %P0"
-                       :
-                       : "i" (0xffffffffff600000), "D" (NULL), "S" (NULL)
-                       : "rax", "rcx", "r11"
+                       "call *%[rax]"
+                       : [rax] "+a" (rax)
+                       : "D" (NULL), "S" (NULL)
+                       : "rcx", "r11"
                );
 
                g_vsyscall = 1;
index 13e8829..9c60384 100644 (file)
@@ -3954,6 +3954,13 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
        }
 
        mutex_lock(&kvm->lock);
+
+#ifdef CONFIG_LOCKDEP
+       /* Ensure that lockdep knows vcpu->mutex is taken *inside* kvm->lock */
+       mutex_lock(&vcpu->mutex);
+       mutex_unlock(&vcpu->mutex);
+#endif
+
        if (kvm_get_vcpu_by_id(kvm, id)) {
                r = -EEXIST;
                goto unlock_vcpu_destroy;
index 495ceab..9584eb5 100644 (file)
@@ -336,7 +336,7 @@ static int kvm_vfio_has_attr(struct kvm_device *dev,
        return -ENXIO;
 }
 
-static void kvm_vfio_destroy(struct kvm_device *dev)
+static void kvm_vfio_release(struct kvm_device *dev)
 {
        struct kvm_vfio *kv = dev->private;
        struct kvm_vfio_group *kvg, *tmp;
@@ -355,7 +355,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
        kvm_vfio_update_coherency(dev);
 
        kfree(kv);
-       kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
+       kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */
 }
 
 static int kvm_vfio_create(struct kvm_device *dev, u32 type);
@@ -363,7 +363,7 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type);
 static struct kvm_device_ops kvm_vfio_ops = {
        .name = "kvm-vfio",
        .create = kvm_vfio_create,
-       .destroy = kvm_vfio_destroy,
+       .release = kvm_vfio_release,
        .set_attr = kvm_vfio_set_attr,
        .has_attr = kvm_vfio_has_attr,
 };