Merge tag 'mm-hotfixes-stable-2023-04-19-16-36' of git://git.kernel.org/pub/scm/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 20 Apr 2023 00:55:45 +0000 (17:55 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 20 Apr 2023 00:55:45 +0000 (17:55 -0700)
Pull misc fixes from Andrew Morton:
 "22 hotfixes.

  19 are cc:stable and the remainder address issues which were
  introduced during this merge cycle, or aren't considered suitable for
  -stable backporting.

  19 are for MM and the remainder are for other subsystems"

* tag 'mm-hotfixes-stable-2023-04-19-16-36' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (22 commits)
  nilfs2: initialize unused bytes in segment summary blocks
  mm: page_alloc: skip regions with hugetlbfs pages when allocating 1G pages
  mm/mmap: regression fix for unmapped_area{_topdown}
  maple_tree: fix mas_empty_area() search
  maple_tree: make maple state reusable after mas_empty_area_rev()
  mm: kmsan: handle alloc failures in kmsan_ioremap_page_range()
  mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush()
  tools/Makefile: do missed s/vm/mm/
  mm: fix memory leak on mm_init error handling
  mm/page_alloc: fix potential deadlock on zonelist_update_seq seqlock
  kernel/sys.c: fix and improve control flow in __sys_setres[ug]id()
  Revert "userfaultfd: don't fail on unrecognized features"
  writeback, cgroup: fix null-ptr-deref write in bdi_split_work_to_wbs
  maple_tree: fix a potential memory leak, OOB access, or other unpredictable bug
  tools/mm/page_owner_sort.c: fix TGID output when cull=tg is used
  mailmap: update jtoppins' entry to reference correct email
  mm/mempolicy: fix use-after-free of VMA iterator
  mm/huge_memory.c: warn with pr_warn_ratelimited instead of VM_WARN_ON_ONCE_FOLIO
  mm/mprotect: fix do_mprotect_pkey() return on error
  mm/khugepaged: check again on anon uffd-wp during isolation
  ...

608 files changed:
Documentation/admin-guide/index.rst
Documentation/admin-guide/kernel-parameters.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/reporting-issues.rst
Documentation/devicetree/bindings/interrupt-controller/loongson,cpu-interrupt-controller.yaml [moved from Documentation/devicetree/bindings/interrupt-controller/loongarch,cpu-interrupt-controller.yaml with 72% similarity]
Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
Documentation/devicetree/bindings/pinctrl/qcom,sm8550-lpass-lpi-pinctrl.yaml
Documentation/devicetree/bindings/serial/renesas,scif.yaml
Documentation/kbuild/llvm.rst
Documentation/networking/ip-sysctl.rst
Documentation/process/howto.rst
Documentation/process/index.rst
Documentation/process/researcher-guidelines.rst
Documentation/process/security-bugs.rst [moved from Documentation/admin-guide/security-bugs.rst with 100% similarity]
Documentation/process/stable-kernel-rules.rst
Documentation/process/submitting-patches.rst
Documentation/riscv/vm-layout.rst
Documentation/sound/hd-audio/models.rst
Documentation/translations/it_IT/admin-guide/security-bugs.rst
Documentation/translations/it_IT/process/submitting-patches.rst
Documentation/translations/ja_JP/howto.rst
Documentation/translations/ko_KR/howto.rst
Documentation/translations/sp_SP/howto.rst
Documentation/translations/sp_SP/process/submitting-patches.rst
Documentation/translations/zh_CN/admin-guide/security-bugs.rst
Documentation/translations/zh_CN/process/howto.rst
Documentation/translations/zh_TW/admin-guide/security-bugs.rst
Documentation/translations/zh_TW/process/howto.rst
Documentation/virt/kvm/api.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/imx6ull-colibri.dtsi
arch/arm/boot/dts/imx7d-remarkable2.dts
arch/arm/boot/dts/rk3288.dtsi
arch/arm/configs/imx_v6_v7_defconfig
arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
arch/arm64/boot/dts/freescale/imx8mp.dtsi
arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi
arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
arch/arm64/boot/dts/qcom/sc7280-herobrine.dtsi
arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi
arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts
arch/arm64/boot/dts/qcom/sm8250-mtp.dts
arch/arm64/boot/dts/rockchip/rk3326-anbernic-rg351m.dts
arch/arm64/boot/dts/rockchip/rk3326-odroid-go.dtsi
arch/arm64/boot/dts/rockchip/rk3326-odroid-go2-v11.dts
arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
arch/arm64/boot/dts/rockchip/rk3399.dtsi
arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg353x.dtsi
arch/arm64/boot/dts/rockchip/rk3566-anbernic-rg503.dts
arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
arch/arm64/boot/dts/rockchip/rk3588s.dtsi
arch/arm64/kernel/compat_alignment.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
arch/arm64/kvm/hyp/nvhe/sys_regs.c
arch/arm64/kvm/mmu.c
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/sys_regs.c
arch/arm64/net/bpf_jit.h
arch/arm64/net/bpf_jit_comp.c
arch/loongarch/Kconfig
arch/loongarch/include/asm/acpi.h
arch/loongarch/include/asm/addrspace.h
arch/loongarch/include/asm/bootinfo.h
arch/loongarch/include/asm/cpu-features.h
arch/loongarch/include/asm/cpu.h
arch/loongarch/include/asm/io.h
arch/loongarch/include/asm/loongarch.h
arch/loongarch/include/asm/module.lds.h
arch/loongarch/include/uapi/asm/ptrace.h
arch/loongarch/kernel/cpu-probe.c
arch/loongarch/kernel/proc.c
arch/loongarch/kernel/ptrace.c
arch/loongarch/kernel/setup.c
arch/loongarch/kernel/stacktrace.c
arch/loongarch/kernel/unwind.c
arch/loongarch/kernel/unwind_prologue.c
arch/loongarch/mm/init.c
arch/loongarch/net/bpf_jit.c
arch/loongarch/power/suspend_asm.S
arch/mips/bmips/dma.c
arch/mips/bmips/setup.c
arch/powerpc/include/asm/book3s/64/tlbflush.h
arch/powerpc/kernel/ptrace/ptrace-view.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/mm/numa.c
arch/powerpc/platforms/pseries/papr_scm.c
arch/powerpc/platforms/pseries/vas.c
arch/riscv/Kconfig
arch/riscv/Kconfig.erratas
arch/riscv/boot/dts/canaan/k210.dtsi
arch/riscv/include/asm/fixmap.h
arch/riscv/include/asm/hwcap.h
arch/riscv/include/asm/pgtable.h
arch/riscv/kernel/setup.c
arch/riscv/kernel/signal.c
arch/riscv/kvm/vcpu_timer.c
arch/riscv/mm/init.c
arch/riscv/purgatory/Makefile
arch/s390/Makefile
arch/s390/kernel/ptrace.c
arch/s390/kvm/intercept.c
arch/s390/kvm/kvm-s390.c
arch/s390/lib/uaccess.c
arch/x86/Makefile.um
arch/x86/include/asm/intel-family.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/x86_init.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/kvm_onhyperv.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm_onhyperv.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/x86.c
arch/x86/pci/fixup.c
arch/x86/purgatory/Makefile
arch/xtensa/kernel/traps.c
block/blk-mq.c
block/genhd.c
drivers/accel/ivpu/ivpu_drv.c
drivers/accel/ivpu/ivpu_drv.h
drivers/accel/ivpu/ivpu_hw_mtl.c
drivers/accel/ivpu/ivpu_ipc.h
drivers/accel/ivpu/ivpu_job.c
drivers/accel/ivpu/ivpu_pm.c
drivers/accel/ivpu/ivpu_pm.h
drivers/acpi/acpi_video.c
drivers/acpi/bus.c
drivers/acpi/resource.c
drivers/acpi/video_detect.c
drivers/acpi/x86/utils.c
drivers/base/cacheinfo.c
drivers/block/loop.c
drivers/block/ublk_drv.c
drivers/block/virtio_blk.c
drivers/bluetooth/btbcm.c
drivers/bluetooth/btsdio.c
drivers/bus/imx-weim.c
drivers/clk/clk-renesas-pcie.c
drivers/clk/imx/clk-imx6ul.c
drivers/clk/sprd/common.c
drivers/counter/104-quad-8.c
drivers/cpufreq/amd-pstate.c
drivers/cxl/core/hdm.c
drivers/cxl/core/pci.c
drivers/cxl/core/pmem.c
drivers/cxl/core/port.c
drivers/cxl/core/region.c
drivers/cxl/cxl.h
drivers/cxl/cxlpci.h
drivers/cxl/port.c
drivers/dma/apple-admac.c
drivers/dma/dmaengine.c
drivers/dma/xilinx/xdma.c
drivers/firmware/psci/psci.c
drivers/gpio/Kconfig
drivers/gpio/gpio-davinci.c
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/armada/armada_drv.c
drivers/gpu/drm/drm_buddy.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
drivers/gpu/drm/i915/display/icl_dsi.c
drivers/gpu/drm/i915/display/intel_color.c
drivers/gpu/drm/i915/display/intel_color.h
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_dp_mst.c
drivers/gpu/drm/i915/display/intel_dpt.c
drivers/gpu/drm/i915/display/intel_tc.c
drivers/gpu/drm/i915/gem/i915_gem_lmem.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
drivers/gpu/drm/i915/gt/intel_rps.c
drivers/gpu/drm/i915/gt/intel_rps.h
drivers/gpu/drm/i915/gt/uc/intel_huc.c
drivers/gpu/drm/i915/gt/uc/intel_huc.h
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_perf_types.h
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/nouveau_backlight.c
drivers/gpu/drm/nouveau/nouveau_dp.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/scheduler/sched_entity.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/gpu/drm/tests/drm_buddy_test.c
drivers/hid/Kconfig
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-sensor-custom.c
drivers/hid/hid-topre.c
drivers/hid/intel-ish-hid/ishtp/bus.c
drivers/hv/connection.c
drivers/hwtracing/coresight/coresight-etm4x-core.c
drivers/hwtracing/coresight/coresight-etm4x.h
drivers/i2c/busses/i2c-mchp-pci1xxxx.c
drivers/i2c/busses/i2c-ocores.c
drivers/i2c/i2c-core-of.c
drivers/iio/accel/kionix-kx022a.c
drivers/iio/adc/ad7791.c
drivers/iio/adc/ltc2497.c
drivers/iio/adc/max11410.c
drivers/iio/adc/palmas_gpadc.c
drivers/iio/adc/qcom-spmi-adc5.c
drivers/iio/adc/ti-ads7950.c
drivers/iio/dac/cio-dac.c
drivers/iio/imu/Kconfig
drivers/iio/industrialio-buffer.c
drivers/iio/light/cm32181.c
drivers/iio/light/vcnl4000.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/erdma/erdma_cq.c
drivers/infiniband/hw/erdma/erdma_hw.h
drivers/infiniband/hw/erdma/erdma_main.c
drivers/infiniband/hw/erdma/erdma_qp.c
drivers/infiniband/hw/erdma/erdma_verbs.h
drivers/infiniband/hw/irdma/cm.c
drivers/infiniband/hw/irdma/cm.h
drivers/infiniband/hw/irdma/hw.c
drivers/infiniband/hw/irdma/utils.c
drivers/infiniband/hw/mlx5/main.c
drivers/input/joystick/xpad.c
drivers/input/mouse/alps.c
drivers/input/mouse/focaltech.c
drivers/input/serio/i8042-acpipnpio.h
drivers/input/touchscreen/goodix.c
drivers/iommu/exynos-iommu.c
drivers/iommu/intel/dmar.c
drivers/iommu/intel/iommu.h
drivers/iommu/intel/irq_remapping.c
drivers/iommu/intel/perfmon.c
drivers/iommu/iommufd/pages.c
drivers/md/dm.c
drivers/md/md.c
drivers/media/i2c/imx290.c
drivers/media/platform/qcom/venus/firmware.c
drivers/memstick/core/memstick.c
drivers/mmc/host/sdhci_am654.c
drivers/mtd/mtdblock.c
drivers/mtd/nand/ecc-mxic.c
drivers/mtd/nand/raw/meson_nand.c
drivers/mtd/nand/raw/nandsim.c
drivers/mtd/nand/raw/stm32_fmc2_nand.c
drivers/mtd/spi-nor/core.c
drivers/mtd/spi-nor/core.h
drivers/mtd/spi-nor/debugfs.c
drivers/mtd/ubi/build.c
drivers/mtd/ubi/wl.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/b53/b53_mmap.c
drivers/net/dsa/microchip/ksz8795.c
drivers/net/dsa/microchip/ksz8863_smi.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/global2.c
drivers/net/dsa/mv88e6xxx/global2.h
drivers/net/dsa/realtek/realtek-mdio.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/google/gve/gve.h
drivers/net/ethernet/google/gve/gve_tx.c
drivers/net/ethernet/intel/i40e/i40e_diag.c
drivers/net/ethernet/intel/i40e/i40e_diag.h
drivers/net/ethernet/intel/iavf/iavf.h
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
drivers/net/ethernet/intel/ice/ice_sched.c
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_txrx_lib.c
drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_ppe.c
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
drivers/net/ethernet/realtek/r8169_phy_config.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_new.c
drivers/net/ethernet/wangxun/libwx/wx_type.h
drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
drivers/net/ieee802154/ca8210.c
drivers/net/ipa/gsi_trans.c
drivers/net/net_failover.c
drivers/net/phy/dp83869.c
drivers/net/phy/micrel.c
drivers/net/phy/nxp-c45-tja11xx.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/phy/sfp-bus.c
drivers/net/phy/sfp.c
drivers/net/usb/r8152.c
drivers/net/veth.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/wireless/ath/ath11k/mhi.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
drivers/net/wireless/mediatek/mt76/mt7603/main.c
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
drivers/net/wireless/mediatek/mt76/mt7615/main.c
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
drivers/net/wireless/mediatek/mt76/mt7915/main.c
drivers/net/wireless/mediatek/mt76/mt7921/init.c
drivers/net/wireless/mediatek/mt76/mt7921/main.c
drivers/net/wireless/mediatek/mt76/mt7921/pci.c
drivers/net/wireless/mediatek/mt76/mt7996/main.c
drivers/net/wwan/iosm/iosm_ipc_imem.c
drivers/net/wwan/iosm/iosm_ipc_pcie.c
drivers/net/wwan/t7xx/Makefile
drivers/net/xen-netback/common.h
drivers/net/xen-netback/netback.c
drivers/nvme/host/core.c
drivers/nvme/host/pci.c
drivers/nvme/host/tcp.c
drivers/of/dynamic.c
drivers/of/platform.c
drivers/pci/controller/dwc/pcie-designware.c
drivers/pci/doe.c
drivers/pci/remove.c
drivers/perf/amlogic/meson_g12_ddr_pmu.c
drivers/pinctrl/mediatek/Kconfig
drivers/pinctrl/pinctrl-at91-pio4.c
drivers/pinctrl/pinctrl-ocelot.c
drivers/pinctrl/stm32/pinctrl-stm32.c
drivers/platform/surface/aggregator/bus.c
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/gigabyte-wmi.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel/pmc/core.c
drivers/platform/x86/intel/tpmi.c
drivers/platform/x86/intel/vsec.c
drivers/platform/x86/think-lmi.c
drivers/platform/x86/thinkpad_acpi.c
drivers/ptp/ptp_qoriq.c
drivers/pwm/core.c
drivers/pwm/pwm-cros-ec.c
drivers/pwm/pwm-hibvt.c
drivers/pwm/pwm-iqs620a.c
drivers/pwm/pwm-meson.c
drivers/pwm/pwm-sprd.c
drivers/regulator/fan53555.c
drivers/regulator/fixed.c
drivers/regulator/sm5703-regulator.c
drivers/s390/crypto/vfio_ap_drv.c
drivers/scsi/iscsi_tcp.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/mpi3mr/mpi3mr_fw.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi.c
drivers/scsi/ses.c
drivers/spi/spi-rockchip-sfc.c
drivers/spi/spi.c
drivers/tee/optee/call.c
drivers/tee/tee_shm.c
drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
drivers/thermal/intel/intel_powerclamp.c
drivers/thermal/intel/therm_throt.c
drivers/thermal/thermal_sysfs.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/sh-sci.c
drivers/ufs/core/ufshcd.c
drivers/usb/cdns3/cdnsp-ep0.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-tegra.c
drivers/usb/host/xhci.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/option.c
drivers/usb/typec/altmodes/displayport.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
drivers/vhost/scsi.c
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/core/fbmem.c
fs/9p/xattr.c
fs/btrfs/backref.c
fs/btrfs/disk-io.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/volumes.c
fs/cifs/cifsfs.h
fs/cifs/cifssmb.c
fs/cifs/fs_context.c
fs/cifs/fs_context.h
fs/cifs/misc.c
fs/cifs/smb2pdu.c
fs/ksmbd/connection.c
fs/ksmbd/ksmbd_work.h
fs/ksmbd/server.c
fs/ksmbd/smb2pdu.c
fs/ksmbd/smb2pdu.h
fs/ksmbd/smb_common.c
fs/ksmbd/smb_common.h
fs/ksmbd/unicode.c
fs/namespace.c
fs/netfs/iterator.c
fs/nfs/Kconfig
fs/nfs/nfs4proc.c
fs/nfsd/blocklayout.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfs4xdr.c
fs/zonefs/file.c
include/acpi/video.h
include/asm-generic/atomic.h
include/asm-generic/cmpxchg-local.h
include/asm-generic/cmpxchg.h
include/asm-generic/io.h
include/drm/gpu_scheduler.h
include/linux/cpuhotplug.h
include/linux/ftrace.h
include/linux/kvm_host.h
include/linux/kvm_irqfd.h
include/linux/mlx5/device.h
include/linux/netdevice.h
include/linux/pci-doe.h
include/linux/pci.h
include/linux/phy.h
include/linux/phylink.h
include/linux/rtnetlink.h
include/linux/sfp.h
include/net/bluetooth/hci_core.h
include/net/bonding.h
include/net/raw.h
include/net/xdp.h
include/trace/events/f2fs.h
include/trace/events/rcu.h
include/trace/stages/stage5_get_offsets.h
include/uapi/linux/virtio_blk.h
include/ufs/ufshcd.h
init/initramfs.c
io_uring/alloc_cache.h
io_uring/io_uring.c
io_uring/kbuf.c
io_uring/poll.c
io_uring/rsrc.h
kernel/cgroup/cpuset.c
kernel/cgroup/legacy_freezer.c
kernel/cgroup/rstat.c
kernel/dma/swiotlb.c
kernel/events/core.c
kernel/rcu/tree.c
kernel/sched/fair.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_events_synth.c
kernel/trace/trace_osnoise.c
net/9p/trans_xen.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sync.c
net/bluetooth/hidp/core.c
net/bluetooth/l2cap_core.c
net/bluetooth/sco.c
net/can/bcm.c
net/can/isotp.c
net/can/j1939/transport.c
net/core/dev.c
net/core/netpoll.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/xdp.c
net/dsa/slave.c
net/ethtool/linkmodes.c
net/ieee802154/nl802154.c
net/ipv4/icmp.c
net/ipv4/ping.c
net/ipv4/raw.c
net/ipv4/raw_diag.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_ipv4.c
net/ipv6/ip6_output.c
net/ipv6/raw.c
net/ipv6/udp.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/util.c
net/mptcp/fastopen.c
net/mptcp/options.c
net/mptcp/protocol.c
net/mptcp/subflow.c
net/netlink/af_netlink.c
net/openvswitch/actions.c
net/qrtr/af_qrtr.c
net/qrtr/ns.c
net/sctp/socket.c
net/sctp/stream_interleave.c
net/smc/af_smc.c
net/sunrpc/auth_gss/gss_krb5_test.c
net/sunrpc/svcauth_unix.c
net/sunrpc/xprtsock.c
net/vmw_vsock/virtio_transport_common.c
net/vmw_vsock/vmci_transport.c
net/vmw_vsock/vsock_loopback.c
scripts/Makefile.package
scripts/kconfig/merge_config.sh
scripts/mod/modpost.c
scripts/package/builddeb
scripts/package/gen-diff-patch
scripts/package/mkdebian
scripts/package/mkspec
sound/core/pcm_lib.c
sound/firewire/tascam/tascam-stream.c
sound/i2c/cs8427.c
sound/pci/emu10k1/emupcm.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/ymfpci/ymfpci.c
sound/pci/ymfpci/ymfpci_main.c
sound/soc/amd/yc/acp6x-mach.c
sound/soc/codecs/da7213.c
sound/soc/codecs/hdac_hdmi.c
sound/soc/codecs/lpass-rx-macro.c
sound/soc/codecs/lpass-tx-macro.c
sound/soc/codecs/lpass-wsa-macro.c
sound/soc/intel/boards/bytcr_rt5640.c
sound/soc/intel/boards/sof_sdw.c
sound/soc/intel/common/soc-acpi-intel-adl-match.c
sound/soc/soc-pcm.c
sound/soc/sof/ipc4-topology.c
sound/soc/sof/ipc4.c
sound/usb/endpoint.c
sound/usb/endpoint.h
sound/usb/format.c
sound/usb/pcm.c
tools/arch/loongarch/include/uapi/asm/bitsperlong.h
tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
tools/testing/selftests/bpf/prog_tests/xdp_metadata.c
tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
tools/testing/selftests/bpf/progs/xdp_metadata.c
tools/testing/selftests/bpf/progs/xdp_metadata2.c
tools/testing/selftests/bpf/xdp_hw_metadata.c
tools/testing/selftests/bpf/xdp_metadata.h
tools/testing/selftests/drivers/net/bonding/Makefile
tools/testing/selftests/drivers/net/bonding/bond_options.sh [new file with mode: 0755]
tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh [new file with mode: 0644]
tools/testing/selftests/drivers/net/bonding/option_prio.sh [deleted file]
tools/testing/selftests/mount_setattr/mount_setattr_test.c
tools/testing/selftests/net/config
tools/testing/selftests/net/mptcp/userspace_pm.sh
tools/testing/selftests/net/openvswitch/ovs-dpctl.py
tools/testing/selftests/net/rps_default_mask.sh
tools/testing/selftests/sigaltstack/current_stack_pointer.h [new file with mode: 0644]
tools/testing/selftests/sigaltstack/sas.c
tools/testing/vsock/vsock_test.c
tools/virtio/virtio-trace/README
usr/gen_init_cpio.c
virt/kvm/eventfd.c
virt/kvm/kvm_main.c

index 0ad7e7e..09a563b 100644 (file)
@@ -36,7 +36,6 @@ problems and bugs in particular.
 
    reporting-issues
    reporting-regressions
-   security-bugs
    bug-hunting
    bug-bisect
    tainted-kernels
index 19600c5..6ae5f12 100644 (file)
@@ -128,6 +128,7 @@ parameter is applicable::
        KVM     Kernel Virtual Machine support is enabled.
        LIBATA  Libata driver is enabled
        LP      Printer support is enabled.
+       LOONGARCH LoongArch architecture is enabled.
        LOOP    Loopback device support is enabled.
        M68k    M68k architecture is enabled.
                        These options have more detailed description inside of
index 6221a1d..7016cb1 100644 (file)
                        When enabled, memory and cache locality will be
                        impacted.
 
+       writecombine=   [LOONGARCH] Control the MAT (Memory Access Type) of
+                       ioremap_wc().
+
+                       on   - Enable writecombine, use WUC for ioremap_wc()
+                       off  - Disable writecombine, use SUC for ioremap_wc()
+
        x2apic_phys     [X86-64,APIC] Use x2apic physical mode instead of
                        default x2apic cluster mode on platforms
                        supporting x2apic.
index ec62151..2fd5a03 100644 (file)
@@ -395,7 +395,7 @@ might want to be aware of; it for example explains how to add your issue to the
 list of tracked regressions, to ensure it won't fall through the cracks.
 
 What qualifies as security issue is left to your judgment. Consider reading
-Documentation/admin-guide/security-bugs.rst before proceeding, as it
+Documentation/process/security-bugs.rst before proceeding, as it
 provides additional details how to best handle security issues.
 
 An issue is a 'really severe problem' when something totally unacceptably bad
@@ -1269,7 +1269,7 @@ them when sending the report by mail. If you filed it in a bug tracker, forward
 the report's text to these addresses; but on top of it put a small note where
 you mention that you filed it with a link to the ticket.
 
-See Documentation/admin-guide/security-bugs.rst for more information.
+See Documentation/process/security-bugs.rst for more information.
 
 
 Duties after the report went out
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/interrupt-controller/loongarch,cpu-interrupt-controller.yaml#
+$id: http://devicetree.org/schemas/interrupt-controller/loongson,cpu-interrupt-controller.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: LoongArch CPU Interrupt Controller
@@ -11,7 +11,7 @@ maintainers:
 
 properties:
   compatible:
-    const: loongarch,cpu-interrupt-controller
+    const: loongson,cpu-interrupt-controller
 
   '#interrupt-cells':
     const: 1
@@ -28,7 +28,7 @@ required:
 examples:
   - |
     interrupt-controller {
-      compatible = "loongarch,cpu-interrupt-controller";
+      compatible = "loongson,cpu-interrupt-controller";
       #interrupt-cells = <1>;
       interrupt-controller;
     };
index 3fe981b..5473636 100644 (file)
@@ -76,6 +76,13 @@ properties:
       If "broken-flash-reset" is present then having this property does not
       make any difference.
 
+  spi-cpol: true
+  spi-cpha: true
+
+dependencies:
+  spi-cpol: [ spi-cpha ]
+  spi-cpha: [ spi-cpol ]
+
 unevaluatedProperties: false
 
 examples:
index 5e90051..8f60a91 100644 (file)
@@ -96,9 +96,11 @@ $defs:
           2: Lower Slew rate (slower edges)
           3: Reserved (No adjustments)
 
+      bias-bus-hold: true
       bias-pull-down: true
       bias-pull-up: true
       bias-disable: true
+      input-enable: true
       output-high: true
       output-low: true
 
index 1989bd6..54e4f41 100644 (file)
@@ -92,7 +92,7 @@ properties:
           - description: Error interrupt
           - description: Receive buffer full interrupt
           - description: Transmit buffer empty interrupt
-          - description: Transmit End interrupt
+          - description: Break interrupt
       - items:
           - description: Error interrupt
           - description: Receive buffer full interrupt
@@ -107,7 +107,7 @@ properties:
           - const: eri
           - const: rxi
           - const: txi
-          - const: tei
+          - const: bri
       - items:
           - const: eri
           - const: rxi
index bfb5168..c3851fe 100644 (file)
@@ -171,6 +171,10 @@ Getting Help
 Getting LLVM
 -------------
 
+We provide prebuilt stable versions of LLVM on `kernel.org <https://kernel.org/pub/tools/llvm/>`_.
+Below are links that may be useful for building LLVM from source or procuring
+it through a distribution's package manager.
+
 - https://releases.llvm.org/download.html
 - https://github.com/llvm/llvm-project
 - https://llvm.org/docs/GettingStarted.html
index 87dd1c5..58a78a3 100644 (file)
@@ -340,6 +340,8 @@ tcp_app_win - INTEGER
        Reserve max(window/2^tcp_app_win, mss) of window for application
        buffer. Value 0 is special, it means that nothing is reserved.
 
+       Possible values are [0, 31], inclusive.
+
        Default: 31
 
 tcp_autocorking - BOOLEAN
index cb6abcb..deb8235 100644 (file)
@@ -138,7 +138,7 @@ required reading:
     philosophy and is very important for people moving to Linux from
     development on other Operating Systems.
 
-  :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`
+  :ref:`Documentation/process/security-bugs.rst <securitybugs>`
     If you feel you have found a security problem in the Linux kernel,
     please follow the steps in this document to help notify the kernel
     developers, and help solve the issue.
index d4b6217..565df59 100644 (file)
@@ -35,6 +35,14 @@ Below are the essential guides that every developer should read.
    kernel-enforcement-statement
    kernel-driver-statement
 
+For security issues, see:
+
+.. toctree::
+   :maxdepth: 1
+
+   security-bugs
+   embargoed-hardware-issues
+
 Other guides to the community that are of interest to most developers are:
 
 .. toctree::
@@ -47,7 +55,6 @@ Other guides to the community that are of interest to most developers are:
    submit-checklist
    kernel-docs
    deprecated
-   embargoed-hardware-issues
    maintainers
    researcher-guidelines
 
index afc944e..9fcfed3 100644 (file)
@@ -68,7 +68,7 @@ Before contributing, carefully read the appropriate documentation:
 * Documentation/process/development-process.rst
 * Documentation/process/submitting-patches.rst
 * Documentation/admin-guide/reporting-issues.rst
-* Documentation/admin-guide/security-bugs.rst
+* Documentation/process/security-bugs.rst
 
 Then send a patch (including a commit log with all the details listed
 below) and follow up on any feedback from other developers.
index 2fd8aa5..51df119 100644 (file)
@@ -39,7 +39,7 @@ Procedure for submitting patches to the -stable tree
 
    Security patches should not be handled (solely) by the -stable review
    process but should follow the procedures in
-   :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`.
+   :ref:`Documentation/process/security-bugs.rst <securitybugs>`.
 
 For all other submissions, choose one of the following procedures
 -----------------------------------------------------------------
index 69ce64e..828997b 100644 (file)
@@ -254,7 +254,7 @@ If you have a patch that fixes an exploitable security bug, send that patch
 to security@kernel.org.  For severe bugs, a short embargo may be considered
 to allow distributors to get the patch out to users; in such cases,
 obviously, the patch should not be sent to any public lists. See also
-Documentation/admin-guide/security-bugs.rst.
+Documentation/process/security-bugs.rst.
 
 Patches that fix a severe bug in a released kernel should be directed
 toward the stable maintainers by putting a line like this::
index 3be44e7..5462c84 100644 (file)
@@ -47,7 +47,7 @@ RISC-V Linux Kernel SV39
                                                               | Kernel-space virtual memory, shared between all processes:
   ____________________________________________________________|___________________________________________________________
                     |            |                  |         |
-   ffffffc6fee00000 | -228    GB | ffffffc6feffffff |    2 MB | fixmap
+   ffffffc6fea00000 | -228    GB | ffffffc6feffffff |    6 MB | fixmap
    ffffffc6ff000000 | -228    GB | ffffffc6ffffffff |   16 MB | PCI io
    ffffffc700000000 | -228    GB | ffffffc7ffffffff |    4 GB | vmemmap
    ffffffc800000000 | -224    GB | ffffffd7ffffffff |   64 GB | vmalloc/ioremap space
@@ -83,7 +83,7 @@ RISC-V Linux Kernel SV48
                                                               | Kernel-space virtual memory, shared between all processes:
   ____________________________________________________________|___________________________________________________________
                     |            |                  |         |
-   ffff8d7ffee00000 |  -114.5 TB | ffff8d7ffeffffff |    2 MB | fixmap
+   ffff8d7ffea00000 |  -114.5 TB | ffff8d7ffeffffff |    6 MB | fixmap
    ffff8d7fff000000 |  -114.5 TB | ffff8d7fffffffff |   16 MB | PCI io
    ffff8d8000000000 |  -114.5 TB | ffff8f7fffffffff |    2 TB | vmemmap
    ffff8f8000000000 |  -112.5 TB | ffffaf7fffffffff |   32 TB | vmalloc/ioremap space
@@ -119,7 +119,7 @@ RISC-V Linux Kernel SV57
                                                               | Kernel-space virtual memory, shared between all processes:
   ____________________________________________________________|___________________________________________________________
                     |            |                  |         |
-   ff1bfffffee00000 | -57     PB | ff1bfffffeffffff |    2 MB | fixmap
+   ff1bfffffea00000 | -57     PB | ff1bfffffeffffff |    6 MB | fixmap
    ff1bffffff000000 | -57     PB | ff1bffffffffffff |   16 MB | PCI io
    ff1c000000000000 | -57     PB | ff1fffffffffffff |    1 PB | vmemmap
    ff20000000000000 | -56     PB | ff5fffffffffffff |   16 PB | vmalloc/ioremap space
index 9b52f50..1204304 100644 (file)
@@ -704,7 +704,7 @@ ref
 no-jd
     BIOS setup but without jack-detection
 intel
-    Intel DG45* mobos
+    Intel D*45* mobos
 dell-m6-amic
     Dell desktops/laptops with analog mics
 dell-m6-dmic
index 18a5822..20994f4 100644 (file)
@@ -1,6 +1,6 @@
 .. include:: ../disclaimer-ita.rst
 
-:Original: :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`
+:Original: :ref:`Documentation/process/security-bugs.rst <securitybugs>`
 
 .. _it_securitybugs:
 
index c2cfa09..167fce8 100644 (file)
@@ -272,7 +272,7 @@ embargo potrebbe essere preso in considerazione per dare il tempo alle
 distribuzioni di prendere la patch e renderla disponibile ai loro utenti;
 in questo caso, ovviamente, la patch non dovrebbe essere inviata su alcuna
 lista di discussione pubblica. Leggete anche
-Documentation/admin-guide/security-bugs.rst.
+Documentation/process/security-bugs.rst.
 
 Patch che correggono bachi importanti su un kernel già rilasciato, dovrebbero
 essere inviate ai manutentori dei kernel stabili aggiungendo la seguente riga::
index 9b0b343..8d856eb 100644 (file)
@@ -167,7 +167,7 @@ linux-api@vger.kernel.org に送ることを勧めます。
     このドキュメントは Linux 開発の思想を理解するのに非常に重要です。
     そして、他のOSでの開発者が Linux に移る時にとても重要です。
 
-  :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`
+  :ref:`Documentation/process/security-bugs.rst <securitybugs>`
     もし Linux カーネルでセキュリティ問題を発見したように思ったら、こ
     のドキュメントのステップに従ってカーネル開発者に連絡し、問題解決を
     支援してください。
index 969e91a..34f1489 100644 (file)
@@ -157,7 +157,7 @@ mtk.manpages@gmail.com의 메인테이너에게 보낼 것을 권장한다.
     리눅스로 전향하는 사람들에게는 매우 중요하다.
 
 
-  :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`
+  :ref:`Documentation/process/security-bugs.rst <securitybugs>`
     여러분들이 리눅스 커널의 보안 문제를 발견했다고 생각한다면 이 문서에
     나온 단계에 따라서 커널 개발자들에게 알리고 그 문제를 해결할 수 있도록
     도와 달라.
index f9818d6..f162973 100644 (file)
@@ -135,7 +135,7 @@ de obligada lectura:
      de Linux y es muy importante para las personas que se mudan a Linux
      tras desarrollar otros sistemas operativos.
 
-  :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`
+  :ref:`Documentation/process/security-bugs.rst <securitybugs>`
     Si cree que ha encontrado un problema de seguridad en el kernel de
     Linux, siga los pasos de este documento para ayudar a notificar a los
     desarrolladores del kernel y ayudar a resolver el problema.
index bf95ceb..c2757d9 100644 (file)
@@ -276,7 +276,7 @@ parche a security@kernel.org. Para errores graves, se debe mantener un
 poco de discreción y permitir que los distribuidores entreguen el parche a
 los usuarios; en esos casos, obviamente, el parche no debe enviarse a
 ninguna lista pública. Revise también
-Documentation/admin-guide/security-bugs.rst.
+Documentation/process/security-bugs.rst.
 
 Los parches que corrigen un error grave en un kernel en uso deben dirigirse
 hacia los maintainers estables poniendo una línea como esta::
index b812039..d6b8f8a 100644 (file)
@@ -1,6 +1,6 @@
 .. include:: ../disclaimer-zh_CN.rst
 
-:Original: :doc:`../../../admin-guide/security-bugs`
+:Original: :doc:`../../../process/security-bugs`
 
 :译者:
 
index 1025475..cc47be3 100644 (file)
@@ -125,7 +125,7 @@ Linux内核代码中包含有大量的文档。这些文档对于学习如何与
     这篇文档对于理解Linux的开发哲学至关重要。对于将开发平台从其他操作系
     统转移到Linux的人来说也很重要。
 
-  :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`
+  :ref:`Documentation/process/security-bugs.rst <securitybugs>`
     如果你认为自己发现了Linux内核的安全性问题,请根据这篇文档中的步骤来
     提醒其他内核开发者并帮助解决这个问题。
 
index eed260e..15f8e90 100644 (file)
@@ -2,7 +2,7 @@
 
 .. include:: ../disclaimer-zh_TW.rst
 
-:Original: :doc:`../../../admin-guide/security-bugs`
+:Original: :doc:`../../../process/security-bugs`
 
 :譯者:
 
index 8fb8edc..ea2f468 100644 (file)
@@ -128,7 +128,7 @@ Linux內核代碼中包含有大量的文檔。這些文檔對於學習如何與
     這篇文檔對於理解Linux的開發哲學至關重要。對於將開發平台從其他操作系
     統轉移到Linux的人來說也很重要。
 
-  :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`
+  :ref:`Documentation/process/security-bugs.rst <securitybugs>`
     如果你認爲自己發現了Linux內核的安全性問題,請根據這篇文檔中的步驟來
     提醒其他內核開發者並幫助解決這個問題。
 
index 62de076..a5c803f 100644 (file)
@@ -8296,11 +8296,11 @@ ENOSYS for the others.
 8.35 KVM_CAP_PMU_CAPABILITY
 ---------------------------
 
-:Capability KVM_CAP_PMU_CAPABILITY
+:Capability: KVM_CAP_PMU_CAPABILITY
 :Architectures: x86
 :Type: vm
 :Parameters: arg[0] is bitmask of PMU virtualization capabilities.
-:Returns 0 on success, -EINVAL when arg[0] contains invalid bits
+:Returns: 0 on success, -EINVAL when arg[0] contains invalid bits
 
 This capability alters PMU virtualization in KVM.
 
index 1dc8bd2..0e64787 100644 (file)
@@ -73,7 +73,7 @@ Tips for patch submitters
        and ideally, should come with a patch proposal. Please do not send
        automated reports to this list either. Such bugs will be handled
        better and faster in the usual public places. See
-       Documentation/admin-guide/security-bugs.rst for details.
+       Documentation/process/security-bugs.rst for details.
 
 8.     Happy hacking.
 
@@ -224,13 +224,13 @@ S:        Orphan / Obsolete
 F:     drivers/net/ethernet/8390/
 
 9P FILE SYSTEM
-M:     Eric Van Hensbergen <ericvh@gmail.com>
+M:     Eric Van Hensbergen <ericvh@kernel.org>
 M:     Latchesar Ionkov <lucho@ionkov.net>
 M:     Dominique Martinet <asmadeus@codewreck.org>
 R:     Christian Schoenebeck <linux_oss@crudebyte.com>
-L:     v9fs-developer@lists.sourceforge.net
+L:     v9fs@lists.linux.dev
 S:     Maintained
-W:     http://swik.net/v9fs
+W:     http://github.com/v9fs
 Q:     http://patchwork.kernel.org/project/v9fs-devel/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ericvh/v9fs.git
 T:     git git://github.com/martinetd/linux.git
@@ -4461,14 +4461,14 @@ F:      Documentation/devicetree/bindings/net/ieee802154/ca8210.txt
 F:     drivers/net/ieee802154/ca8210.c
 
 CANAAN/KENDRYTE K210 SOC FPIOA DRIVER
-M:     Damien Le Moal <damien.lemoal@wdc.com>
+M:     Damien Le Moal <dlemoal@kernel.org>
 L:     linux-riscv@lists.infradead.org
 L:     linux-gpio@vger.kernel.org (pinctrl driver)
 F:     Documentation/devicetree/bindings/pinctrl/canaan,k210-fpioa.yaml
 F:     drivers/pinctrl/pinctrl-k210.c
 
 CANAAN/KENDRYTE K210 SOC RESET CONTROLLER DRIVER
-M:     Damien Le Moal <damien.lemoal@wdc.com>
+M:     Damien Le Moal <dlemoal@kernel.org>
 L:     linux-kernel@vger.kernel.org
 L:     linux-riscv@lists.infradead.org
 S:     Maintained
@@ -4476,7 +4476,7 @@ F:        Documentation/devicetree/bindings/reset/canaan,k210-rst.yaml
 F:     drivers/reset/reset-k210.c
 
 CANAAN/KENDRYTE K210 SOC SYSTEM CONTROLLER DRIVER
-M:     Damien Le Moal <damien.lemoal@wdc.com>
+M:     Damien Le Moal <dlemoal@kernel.org>
 L:     linux-riscv@lists.infradead.org
 S:     Maintained
 F:      Documentation/devicetree/bindings/mfd/canaan,k210-sysctl.yaml
@@ -8216,6 +8216,7 @@ F:        drivers/net/ethernet/freescale/dpaa
 
 FREESCALE QORIQ DPAA FMAN DRIVER
 M:     Madalin Bucur <madalin.bucur@nxp.com>
+R:     Sean Anderson <sean.anderson@seco.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/fsl-fman.txt
@@ -11757,7 +11758,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
 F:     drivers/ata/sata_promise.*
 
 LIBATA SUBSYSTEM (Serial and Parallel ATA drivers)
-M:     Damien Le Moal <damien.lemoal@opensource.wdc.com>
+M:     Damien Le Moal <dlemoal@kernel.org>
 L:     linux-ide@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/libata.git
@@ -14656,10 +14657,8 @@ F:     net/ipv4/nexthop.c
 
 NFC SUBSYSTEM
 M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
-L:     linux-nfc@lists.01.org (subscribers-only)
 L:     netdev@vger.kernel.org
 S:     Maintained
-B:     mailto:linux-nfc@lists.01.org
 F:     Documentation/devicetree/bindings/net/nfc/
 F:     drivers/nfc/
 F:     include/linux/platform_data/nfcmrvl.h
@@ -14670,7 +14669,6 @@ F:      net/nfc/
 NFC VIRTUAL NCI DEVICE DRIVER
 M:     Bongsu Jeon <bongsu.jeon@samsung.com>
 L:     netdev@vger.kernel.org
-L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Supported
 F:     drivers/nfc/virtual_ncidev.c
 F:     tools/testing/selftests/nci/
@@ -15042,7 +15040,6 @@ F:      Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml
 F:     sound/soc/codecs/tfa989x.c
 
 NXP-NCI NFC DRIVER
-L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Orphan
 F:     Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml
 F:     drivers/nfc/nxp-nci
@@ -18291,8 +18288,9 @@ F:      drivers/s390/block/dasd*
 F:     include/linux/dasd_mod.h
 
 S390 IOMMU (PCI)
+M:     Niklas Schnelle <schnelle@linux.ibm.com>
 M:     Matthew Rosato <mjrosato@linux.ibm.com>
-M:     Gerald Schaefer <gerald.schaefer@linux.ibm.com>
+R:     Gerald Schaefer <gerald.schaefer@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 F:     drivers/iommu/s390-iommu.c
@@ -18487,7 +18485,6 @@ F:      include/media/drv-intf/s3c_camif.h
 
 SAMSUNG S3FWRN5 NFC DRIVER
 M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
-L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
 F:     drivers/nfc/s3fwrn5
@@ -18802,7 +18799,7 @@ F:      include/uapi/linux/sed*
 SECURITY CONTACT
 M:     Security Officers <security@kernel.org>
 S:     Supported
-F:     Documentation/admin-guide/security-bugs.rst
+F:     Documentation/process/security-bugs.rst
 
 SECURITY SUBSYSTEM
 M:     Paul Moore <paul@paul-moore.com>
@@ -20645,7 +20642,6 @@ F:      sound/soc/codecs/tscs*.h
 TENSILICA XTENSA PORT (xtensa)
 M:     Chris Zankel <chris@zankel.net>
 M:     Max Filippov <jcmvbkbc@gmail.com>
-L:     linux-xtensa@linux-xtensa.org
 S:     Maintained
 T:     git https://github.com/jcmvbkbc/linux-xtensa.git
 F:     arch/xtensa/
@@ -20981,7 +20977,6 @@ F:      drivers/iio/magnetometer/tmag5273.c
 TI TRF7970A NFC DRIVER
 M:     Mark Greer <mgreer@animalcreek.com>
 L:     linux-wireless@vger.kernel.org
-L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Supported
 F:     Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
 F:     drivers/nfc/trf7970a.c
@@ -23038,7 +23033,6 @@ F:      drivers/gpio/gpio-xra1403.c
 
 XTENSA XTFPGA PLATFORM SUPPORT
 M:     Max Filippov <jcmvbkbc@gmail.com>
-L:     linux-xtensa@linux-xtensa.org
 S:     Maintained
 F:     drivers/spi/spi-xtensa-xtfpga.c
 F:     sound/soc/xtensa/xtfpga-i2s.c
@@ -23121,7 +23115,7 @@ S:      Maintained
 F:     arch/x86/kernel/cpu/zhaoxin.c
 
 ZONEFS FILESYSTEM
-M:     Damien Le Moal <damien.lemoal@opensource.wdc.com>
+M:     Damien Le Moal <dlemoal@kernel.org>
 M:     Naohiro Aota <naohiro.aota@wdc.com>
 R:     Johannes Thumshirn <jth@kernel.org>
 L:     linux-fsdevel@vger.kernel.org
index da2586d..b5c48e3 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 3
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc7
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
index bf64ba8..fde8a19 100644 (file)
                self-powered;
                type = "micro";
 
-               ports {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-
-                       port@0 {
-                               reg = <0>;
-                               usb_dr_connector: endpoint {
-                                       remote-endpoint = <&usb1_drd_sw>;
-                               };
+               port {
+                       usb_dr_connector: endpoint {
+                               remote-endpoint = <&usb1_drd_sw>;
                        };
                };
        };
index 8b2f11e..427f8d0 100644 (file)
                reg = <0x62>;
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_epdpmic>;
-               #address-cells = <1>;
-               #size-cells = <0>;
                #thermal-sensor-cells = <0>;
                epd-pwr-good-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>;
 
index 2ca76b6..511ca86 100644 (file)
                status = "disabled";
        };
 
-       spdif: sound@ff88b0000 {
+       spdif: sound@ff8b0000 {
                compatible = "rockchip,rk3288-spdif", "rockchip,rk3066-spdif";
                reg = <0x0 0xff8b0000 0x0 0x10000>;
                #sound-dai-cells = <0>;
index 6dc6fed..8d002c6 100644 (file)
@@ -76,7 +76,7 @@ CONFIG_RFKILL=y
 CONFIG_RFKILL_INPUT=y
 CONFIG_PCI=y
 CONFIG_PCI_MSI=y
-CONFIG_PCI_IMX6=y
+CONFIG_PCI_IMX6_HOST=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
index 123a56f..feb27a0 100644 (file)
 
                        dmc: bus@38000 {
                                compatible = "simple-bus";
-                               reg = <0x0 0x38000 0x0 0x400>;
                                #address-cells = <2>;
                                #size-cells = <2>;
-                               ranges = <0x0 0x0 0x0 0x38000 0x0 0x400>;
+                               ranges = <0x0 0x0 0x0 0x38000 0x0 0x2000>;
 
                                canvas: video-lut@48 {
                                        compatible = "amlogic,canvas";
                                        reg = <0x0 0x48 0x0 0x14>;
                                };
+
+                               pmu: pmu@80 {
+                                       reg = <0x0 0x80 0x0 0x40>,
+                                             <0x0 0xc00 0x0 0x40>;
+                                       interrupts = <GIC_SPI 52 IRQ_TYPE_EDGE_RISING>;
+                               };
                        };
 
                        usb2_phy1: phy@3a000 {
                        };
                };
 
-               pmu: pmu@ff638000 {
-                       reg = <0x0 0xff638000 0x0 0x100>,
-                             <0x0 0xff638c00 0x0 0x100>;
-                       interrupts = <GIC_SPI 52 IRQ_TYPE_EDGE_RISING>;
-               };
-
                aobus: bus@ff800000 {
                        compatible = "simple-bus";
                        reg = <0x0 0xff800000 0x0 0x100000>;
index d1a6390..3f9dfd4 100644 (file)
                rohm,reset-snvs-powered;
 
                #clock-cells = <0>;
-               clocks = <&osc_32k 0>;
+               clocks = <&osc_32k>;
                clock-output-names = "clk-32k-out";
 
                regulators {
index 88321b5..6f08115 100644 (file)
@@ -99,7 +99,7 @@
                compatible = "regulator-fixed";
                enable-active-high;
                gpio = <&gpio2 20 GPIO_ACTIVE_HIGH>; /* PMIC_EN_ETH */
-               off-on-delay = <500000>;
+               off-on-delay-us = <500000>;
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_reg_eth>;
                regulator-always-on;
                enable-active-high;
                /* Verdin SD_1_PWR_EN (SODIMM 76) */
                gpio = <&gpio3 5 GPIO_ACTIVE_HIGH>;
-               off-on-delay = <100000>;
+               off-on-delay-us = <100000>;
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_usdhc2_pwr_en>;
                regulator-max-microvolt = <3300000>;
index 361426c..c296225 100644 (file)
@@ -10,7 +10,7 @@
                compatible = "regulator-fixed";
                enable-active-high;
                gpio = <&gpio_expander_21 4 GPIO_ACTIVE_HIGH>; /* ETH_PWR_EN */
-               off-on-delay = <500000>;
+               off-on-delay-us = <500000>;
                regulator-max-microvolt = <3300000>;
                regulator-min-microvolt = <3300000>;
                regulator-name = "+V3.3_ETH";
index 0dd6180..1608775 100644 (file)
@@ -87,7 +87,7 @@
                compatible = "regulator-fixed";
                enable-active-high;
                gpio = <&gpio2 20 GPIO_ACTIVE_HIGH>; /* PMIC_EN_ETH */
-               off-on-delay = <500000>;
+               off-on-delay-us = <500000>;
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_reg_eth>;
                regulator-always-on;
                enable-active-high;
                /* Verdin SD_1_PWR_EN (SODIMM 76) */
                gpio = <&gpio4 22 GPIO_ACTIVE_HIGH>;
-               off-on-delay = <100000>;
+               off-on-delay-us = <100000>;
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_usdhc2_pwr_en>;
                regulator-max-microvolt = <3300000>;
index 2dd60e3..a237275 100644 (file)
 
                        lcdif2: display-controller@32e90000 {
                                compatible = "fsl,imx8mp-lcdif";
-                               reg = <0x32e90000 0x238>;
+                               reg = <0x32e90000 0x10000>;
                                interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT>,
                                         <&clk IMX8MP_CLK_MEDIA_APB_ROOT>,
index ca3f966..5cf07ca 100644 (file)
        perst-gpios = <&tlmm 58 GPIO_ACTIVE_LOW>;
 };
 
-&pcie_phy0 {
+&pcie_qmp0 {
        status = "okay";
 };
 
-&pcie_phy1 {
+&pcie_qmp1 {
        status = "okay";
 };
 
index 651a231..1b8379b 100644 (file)
        perst-gpios = <&tlmm 61 GPIO_ACTIVE_LOW>;
 };
 
-&pcie_phy0 {
+&pcie_qmp0 {
        status = "okay";
 };
 
-&pcie_phy1 {
+&pcie_qmp1 {
        status = "okay";
 };
 
index aa0a7bd..dd92433 100644 (file)
        left_spkr: speaker@0,3 {
                compatible = "sdw10217211000";
                reg = <0 3>;
-               powerdown-gpios = <&tlmm 130 GPIO_ACTIVE_HIGH>;
+               powerdown-gpios = <&tlmm 130 GPIO_ACTIVE_LOW>;
                #thermal-sensor-cells = <0>;
                sound-name-prefix = "SpkrLeft";
                #sound-dai-cells = <0>;
        right_spkr: speaker@0,4 {
                compatible = "sdw10217211000";
                reg = <0 4>;
-               powerdown-gpios = <&tlmm 130 GPIO_ACTIVE_HIGH>;
+               powerdown-gpios = <&tlmm 130 GPIO_ACTIVE_LOW>;
                #thermal-sensor-cells = <0>;
                sound-name-prefix = "SpkrRight";
                #sound-dai-cells = <0>;
index b613781..313083e 100644 (file)
@@ -464,7 +464,7 @@ ap_i2c_tpm: &i2c14 {
 
 &mdss_dp_out {
        data-lanes = <0 1>;
-       link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000 8100000000>;
+       link-frequencies = /bits/ 64 <1620000000 2700000000 5400000000>;
 };
 
 &mdss_mdp {
index df7d28f..be446eb 100644 (file)
@@ -59,8 +59,9 @@
                #size-cells = <0>;
 
                pmk8280_pon: pon@1300 {
-                       compatible = "qcom,pm8998-pon";
-                       reg = <0x1300>;
+                       compatible = "qcom,pmk8350-pon";
+                       reg = <0x1300>, <0x800>;
+                       reg-names = "hlos", "pbs";
 
                        pmk8280_pon_pwrkey: pwrkey {
                                compatible = "qcom,pmk8350-pwrkey";
index 67d2a66..5c688cb 100644 (file)
                left_spkr: speaker@0,3 {
                        compatible = "sdw10217211000";
                        reg = <0 3>;
-                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
+                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_LOW>;
                        #thermal-sensor-cells = <0>;
                        sound-name-prefix = "SpkrLeft";
                        #sound-dai-cells = <0>;
 
                right_spkr: speaker@0,4 {
                        compatible = "sdw10217211000";
-                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
+                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_LOW>;
                        reg = <0 4>;
                        #thermal-sensor-cells = <0>;
                        sound-name-prefix = "SpkrRight";
index 9850140..41f59e3 100644 (file)
                left_spkr: speaker@0,3 {
                        compatible = "sdw10217211000";
                        reg = <0 3>;
-                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
+                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_LOW>;
                        #thermal-sensor-cells = <0>;
                        sound-name-prefix = "SpkrLeft";
                        #sound-dai-cells = <0>;
 
                right_spkr: speaker@0,4 {
                        compatible = "sdw10217211000";
-                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
+                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_LOW>;
                        reg = <0 4>;
                        #thermal-sensor-cells = <0>;
                        sound-name-prefix = "SpkrRight";
index e54cdc8..4c9de23 100644 (file)
        left_spkr: speaker@0,3 {
                compatible = "sdw10217211000";
                reg = <0 3>;
-               powerdown-gpios = <&tlmm 26 GPIO_ACTIVE_HIGH>;
+               powerdown-gpios = <&tlmm 26 GPIO_ACTIVE_LOW>;
                #thermal-sensor-cells = <0>;
                sound-name-prefix = "SpkrLeft";
                #sound-dai-cells = <0>;
        right_spkr: speaker@0,4 {
                compatible = "sdw10217211000";
                reg = <0 4>;
-               powerdown-gpios = <&tlmm 127 GPIO_ACTIVE_HIGH>;
+               powerdown-gpios = <&tlmm 127 GPIO_ACTIVE_LOW>;
                #thermal-sensor-cells = <0>;
                sound-name-prefix = "SpkrRight";
                #sound-dai-cells = <0>;
index 61b3168..ce318e0 100644 (file)
@@ -24,6 +24,8 @@
 
 &internal_display {
        compatible = "elida,kd35t133";
+       iovcc-supply = <&vcc_lcd>;
+       vdd-supply = <&vcc_lcd>;
 };
 
 &pwm0 {
index 04eba43..80fc53c 100644 (file)
        internal_display: panel@0 {
                reg = <0>;
                backlight = <&backlight>;
-               iovcc-supply = <&vcc_lcd>;
                reset-gpios = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>;
                rotation = <270>;
-               vdd-supply = <&vcc_lcd>;
 
                port {
                        mipi_in_panel: endpoint {
index 139c898..d94ac81 100644 (file)
@@ -83,6 +83,8 @@
 
 &internal_display {
        compatible = "elida,kd35t133";
+       iovcc-supply = <&vcc_lcd>;
+       vdd-supply = <&vcc_lcd>;
 };
 
 &rk817 {
index 4702183..aa6f5b1 100644 (file)
@@ -59,6 +59,8 @@
 
 &internal_display {
        compatible = "elida,kd35t133";
+       iovcc-supply = <&vcc_lcd>;
+       vdd-supply = <&vcc_lcd>;
 };
 
 &rk817_charger {
index 083452c..e47d139 100644 (file)
@@ -61,7 +61,6 @@
                pinctrl-names = "default";
                pinctrl-0 = <&bl_en>;
                pwms = <&pwm0 0 1000000 PWM_POLARITY_INVERTED>;
-               pwm-delay-us = <10000>;
        };
 
        emmc_pwrseq: emmc-pwrseq {
index ee6095b..5c1929d 100644 (file)
                power-supply = <&pp3300_disp>;
                pinctrl-names = "default";
                pinctrl-0 = <&bl_en>;
-               pwm-delay-us = <10000>;
        };
 
        gpio_keys: gpio-keys {
index a47d9f7..c5e7de6 100644 (file)
                pinctrl-names = "default";
                pinctrl-0 = <&bl_en>;
                pwms = <&pwm1 0 1000000 0>;
-               pwm-delay-us = <10000>;
        };
 
        dmic: dmic {
index 194e48c..ddd45de 100644 (file)
                pinctrl-0 = <&panel_en_pin>;
                power-supply = <&vcc3v3_panel>;
 
-               ports {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-
-                       port@0 {
-                               reg = <0>;
-                               #address-cells = <1>;
-                               #size-cells = <0>;
-
-                               panel_in_edp: endpoint@0 {
-                                       reg = <0>;
-                                       remote-endpoint = <&edp_out_panel>;
-                               };
+               port {
+                       panel_in_edp: endpoint {
+                               remote-endpoint = <&edp_out_panel>;
                        };
                };
        };
        disable-wp;
        pinctrl-names = "default";
        pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>;
-       sd-uhs-sdr104;
+       sd-uhs-sdr50;
        vmmc-supply = <&vcc3v0_sd>;
        vqmmc-supply = <&vcc_sdio>;
        status = "okay";
index 7815752..bca2b50 100644 (file)
                avdd-supply = <&avdd>;
                backlight = <&backlight>;
                dvdd-supply = <&vcc3v3_s0>;
-               ports {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
 
-                       port@0 {
-                               reg = <0>;
-
-                               mipi_in_panel: endpoint {
-                                       remote-endpoint = <&mipi_out_panel>;
-                               };
+               port {
+                       mipi_in_panel: endpoint {
+                               remote-endpoint = <&mipi_out_panel>;
                        };
                };
        };
index 1881b4b..40e7c4a 100644 (file)
                      <0x0 0xfff10000 0 0x10000>, /* GICH */
                      <0x0 0xfff20000 0 0x10000>; /* GICV */
                interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH 0>;
-               its: interrupt-controller@fee20000 {
+               its: msi-controller@fee20000 {
                        compatible = "arm,gic-v3-its";
                        msi-controller;
                        #msi-cells = <1>;
index 65a80d1..9a0e217 100644 (file)
 };
 
 &cru {
-       assigned-clocks = <&cru PLL_GPLL>, <&pmucru PLL_PPLL>, <&cru PLL_VPLL>;
-       assigned-clock-rates = <1200000000>, <200000000>, <241500000>;
+       assigned-clocks = <&pmucru CLK_RTC_32K>, <&cru PLL_GPLL>,
+                         <&pmucru PLL_PPLL>, <&cru PLL_VPLL>;
+       assigned-clock-rates = <32768>, <1200000000>,
+                              <200000000>, <241500000>;
 };
 
 &gpio_keys_control {
index b4b2df8..c763c7f 100644 (file)
 };
 
 &cru {
-       assigned-clocks = <&cru PLL_GPLL>, <&pmucru PLL_PPLL>, <&cru PLL_VPLL>;
-       assigned-clock-rates = <1200000000>, <200000000>, <500000000>;
+       assigned-clocks = <&pmucru CLK_RTC_32K>, <&cru PLL_GPLL>,
+                         <&pmucru PLL_PPLL>, <&cru PLL_VPLL>;
+       assigned-clock-rates = <32768>, <1200000000>,
+                              <200000000>, <500000000>;
 };
 
 &dsi_dphy0 {
index ce7165d..102e448 100644 (file)
        non-removable;
        pinctrl-names = "default";
        pinctrl-0 = <&sdmmc1_bus4 &sdmmc1_cmd &sdmmc1_clk>;
-       sd-uhs-sdr104;
+       sd-uhs-sdr50;
        vmmc-supply = <&vcc3v3_sys>;
        vqmmc-supply = <&vcc_1v8>;
        status = "okay";
index 005cde6..a506948 100644 (file)
                        cache-size = <131072>;
                        cache-line-size = <64>;
                        cache-sets = <512>;
+                       cache-level = <2>;
                        next-level-cache = <&l3_cache>;
                };
 
                        cache-size = <131072>;
                        cache-line-size = <64>;
                        cache-sets = <512>;
+                       cache-level = <2>;
                        next-level-cache = <&l3_cache>;
                };
 
                        cache-size = <131072>;
                        cache-line-size = <64>;
                        cache-sets = <512>;
+                       cache-level = <2>;
                        next-level-cache = <&l3_cache>;
                };
 
                        cache-size = <131072>;
                        cache-line-size = <64>;
                        cache-sets = <512>;
+                       cache-level = <2>;
                        next-level-cache = <&l3_cache>;
                };
 
                        cache-size = <524288>;
                        cache-line-size = <64>;
                        cache-sets = <1024>;
+                       cache-level = <2>;
                        next-level-cache = <&l3_cache>;
                };
 
                        cache-size = <524288>;
                        cache-line-size = <64>;
                        cache-sets = <1024>;
+                       cache-level = <2>;
                        next-level-cache = <&l3_cache>;
                };
 
                        cache-size = <524288>;
                        cache-line-size = <64>;
                        cache-sets = <1024>;
+                       cache-level = <2>;
                        next-level-cache = <&l3_cache>;
                };
 
                        cache-size = <524288>;
                        cache-line-size = <64>;
                        cache-sets = <1024>;
+                       cache-level = <2>;
                        next-level-cache = <&l3_cache>;
                };
 
                        cache-size = <3145728>;
                        cache-line-size = <64>;
                        cache-sets = <4096>;
+                       cache-level = <3>;
                };
        };
 
index 5edec2f..deff21b 100644 (file)
@@ -314,36 +314,32 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
        int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
        unsigned int type;
        u32 instr = 0;
-       u16 tinstr = 0;
        int isize = 4;
        int thumb2_32b = 0;
-       int fault;
 
        instrptr = instruction_pointer(regs);
 
        if (compat_thumb_mode(regs)) {
                __le16 __user *ptr = (__le16 __user *)(instrptr & ~1);
+               u16 tinstr, tinst2;
 
-               fault = alignment_get_thumb(regs, ptr, &tinstr);
-               if (!fault) {
-                       if (IS_T32(tinstr)) {
-                               /* Thumb-2 32-bit */
-                               u16 tinst2;
-                               fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
-                               instr = ((u32)tinstr << 16) | tinst2;
-                               thumb2_32b = 1;
-                       } else {
-                               isize = 2;
-                               instr = thumb2arm(tinstr);
-                       }
+               if (alignment_get_thumb(regs, ptr, &tinstr))
+                       return 1;
+
+               if (IS_T32(tinstr)) { /* Thumb-2 32-bit */
+                       if (alignment_get_thumb(regs, ptr + 1, &tinst2))
+                               return 1;
+                       instr = ((u32)tinstr << 16) | tinst2;
+                       thumb2_32b = 1;
+               } else {
+                       isize = 2;
+                       instr = thumb2arm(tinstr);
                }
        } else {
-               fault = alignment_get_arm(regs, (__le32 __user *)instrptr, &instr);
+               if (alignment_get_arm(regs, (__le32 __user *)instrptr, &instr))
+                       return 1;
        }
 
-       if (fault)
-               return 1;
-
        switch (CODING_BITS(instr)) {
        case 0x00000000:        /* 3.13.4 load/store instruction extensions */
                if (LDSTHD_I_BIT(instr))
index 3bd732e..4b2e16e 100644 (file)
@@ -220,6 +220,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_VCPU_ATTRIBUTES:
        case KVM_CAP_PTP_KVM:
        case KVM_CAP_ARM_SYSTEM_SUSPEND:
+       case KVM_CAP_IRQFD_RESAMPLE:
                r = 1;
                break;
        case KVM_CAP_SET_GUEST_DEBUG2:
@@ -1889,9 +1890,33 @@ static int __init do_pkvm_init(u32 hyp_va_bits)
        return ret;
 }
 
+static u64 get_hyp_id_aa64pfr0_el1(void)
+{
+       /*
+        * Track whether the system isn't affected by spectre/meltdown in the
+        * hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
+        * Although this is per-CPU, we make it global for simplicity, e.g., not
+        * to have to worry about vcpu migration.
+        *
+        * Unlike for non-protected VMs, userspace cannot override this for
+        * protected VMs.
+        */
+       u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+       val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
+                ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
+
+       val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
+                         arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
+       val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
+                         arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
+
+       return val;
+}
+
 static void kvm_hyp_init_symbols(void)
 {
-       kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+       kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1();
        kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
        kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
        kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
index 07edfc7..37440e1 100644 (file)
  * Allow for protected VMs:
  * - Floating-point and Advanced SIMD
  * - Data Independent Timing
+ * - Spectre/Meltdown Mitigation
  */
 #define PVM_ID_AA64PFR0_ALLOW (\
        ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
        ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
-       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \
+       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \
+       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \
+       ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \
        )
 
 /*
index 08d2b00..edd969a 100644 (file)
@@ -85,19 +85,12 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val,
 
 static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
 {
-       const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
        u64 set_mask = 0;
        u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
 
        set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
                PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
 
-       /* Spectre and Meltdown mitigation in KVM */
-       set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
-                              (u64)kvm->arch.pfr0_csv2);
-       set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
-                              (u64)kvm->arch.pfr0_csv3);
-
        return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
 }
 
index 7113587..3b9d4d2 100644 (file)
@@ -666,14 +666,33 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
                                   CONFIG_PGTABLE_LEVELS),
                .mm_ops         = &kvm_user_mm_ops,
        };
+       unsigned long flags;
        kvm_pte_t pte = 0;      /* Keep GCC quiet... */
        u32 level = ~0;
        int ret;
 
+       /*
+        * Disable IRQs so that we hazard against a concurrent
+        * teardown of the userspace page tables (which relies on
+        * IPI-ing threads).
+        */
+       local_irq_save(flags);
        ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level);
-       VM_BUG_ON(ret);
-       VM_BUG_ON(level >= KVM_PGTABLE_MAX_LEVELS);
-       VM_BUG_ON(!(pte & PTE_VALID));
+       local_irq_restore(flags);
+
+       if (ret)
+               return ret;
+
+       /*
+        * Not seeing an error, but not updating level? Something went
+        * deeply wrong...
+        */
+       if (WARN_ON(level >= KVM_PGTABLE_MAX_LEVELS))
+               return -EFAULT;
+
+       /* Oops, the userspace PTs are gone... Replay the fault */
+       if (!kvm_pte_valid(pte))
+               return -EAGAIN;
 
        return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
 }
@@ -1079,7 +1098,7 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
  *
  * Returns the size of the mapping.
  */
-static unsigned long
+static long
 transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
                            unsigned long hva, kvm_pfn_t *pfnp,
                            phys_addr_t *ipap)
@@ -1091,8 +1110,15 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
         * sure that the HVA and IPA are sufficiently aligned and that the
         * block map is contained within the memslot.
         */
-       if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
-           get_user_mapping_size(kvm, hva) >= PMD_SIZE) {
+       if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
+               int sz = get_user_mapping_size(kvm, hva);
+
+               if (sz < 0)
+                       return sz;
+
+               if (sz < PMD_SIZE)
+                       return PAGE_SIZE;
+
                /*
                 * The address we faulted on is backed by a transparent huge
                 * page.  However, because we map the compound huge page and
@@ -1192,7 +1218,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 {
        int ret = 0;
        bool write_fault, writable, force_pte = false;
-       bool exec_fault;
+       bool exec_fault, mte_allowed;
        bool device = false;
        unsigned long mmu_seq;
        struct kvm *kvm = vcpu->kvm;
@@ -1203,7 +1229,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        kvm_pfn_t pfn;
        bool logging_active = memslot_is_logging(memslot);
        unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
-       unsigned long vma_pagesize, fault_granule;
+       long vma_pagesize, fault_granule;
        enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
        struct kvm_pgtable *pgt;
 
@@ -1218,6 +1244,20 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        }
 
        /*
+        * Permission faults just need to update the existing leaf entry,
+        * and so normally don't require allocations from the memcache. The
+        * only exception to this is when dirty logging is enabled at runtime
+        * and a write fault needs to collapse a block entry into a table.
+        */
+       if (fault_status != ESR_ELx_FSC_PERM ||
+           (logging_active && write_fault)) {
+               ret = kvm_mmu_topup_memory_cache(memcache,
+                                                kvm_mmu_cache_min_pages(kvm));
+               if (ret)
+                       return ret;
+       }
+
+       /*
         * Let's check if we will get back a huge page backed by hugetlbfs, or
         * get block mapping for device MMIO region.
         */
@@ -1269,37 +1309,21 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                fault_ipa &= ~(vma_pagesize - 1);
 
        gfn = fault_ipa >> PAGE_SHIFT;
-       mmap_read_unlock(current->mm);
+       mte_allowed = kvm_vma_mte_allowed(vma);
 
-       /*
-        * Permission faults just need to update the existing leaf entry,
-        * and so normally don't require allocations from the memcache. The
-        * only exception to this is when dirty logging is enabled at runtime
-        * and a write fault needs to collapse a block entry into a table.
-        */
-       if (fault_status != ESR_ELx_FSC_PERM ||
-           (logging_active && write_fault)) {
-               ret = kvm_mmu_topup_memory_cache(memcache,
-                                                kvm_mmu_cache_min_pages(kvm));
-               if (ret)
-                       return ret;
-       }
+       /* Don't use the VMA after the unlock -- it may have vanished */
+       vma = NULL;
 
-       mmu_seq = vcpu->kvm->mmu_invalidate_seq;
        /*
-        * Ensure the read of mmu_invalidate_seq happens before we call
-        * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
-        * the page we just got a reference to gets unmapped before we have a
-        * chance to grab the mmu_lock, which ensure that if the page gets
-        * unmapped afterwards, the call to kvm_unmap_gfn will take it away
-        * from us again properly. This smp_rmb() interacts with the smp_wmb()
-        * in kvm_mmu_notifier_invalidate_<page|range_end>.
+        * Read mmu_invalidate_seq so that KVM can detect if the results of
+        * vma_lookup() or __gfn_to_pfn_memslot() become stale prior to
+        * acquiring kvm->mmu_lock.
         *
-        * Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is
-        * used to avoid unnecessary overhead introduced to locate the memory
-        * slot because it's always fixed even @gfn is adjusted for huge pages.
+        * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
+        * with the smp_wmb() in kvm_mmu_invalidate_end().
         */
-       smp_rmb();
+       mmu_seq = vcpu->kvm->mmu_invalidate_seq;
+       mmap_read_unlock(current->mm);
 
        pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
                                   write_fault, &writable, NULL);
@@ -1350,11 +1374,16 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
                                                                   hva, &pfn,
                                                                   &fault_ipa);
+
+               if (vma_pagesize < 0) {
+                       ret = vma_pagesize;
+                       goto out_unlock;
+               }
        }
 
        if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) {
                /* Check the VMM hasn't introduced a new disallowed VMA */
-               if (kvm_vma_mte_allowed(vma)) {
+               if (mte_allowed) {
                        sanitise_mte_tags(kvm, pfn, vma_pagesize);
                } else {
                        ret = -EFAULT;
index 2490840..5eca0cd 100644 (file)
@@ -538,7 +538,8 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
        if (!kvm_pmu_is_3p5(vcpu))
                val &= ~ARMV8_PMU_PMCR_LP;
 
-       __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
+       /* The reset bits don't indicate any state, and shouldn't be saved. */
+       __vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
 
        if (val & ARMV8_PMU_PMCR_E) {
                kvm_pmu_enable_counter_mask(vcpu,
@@ -557,6 +558,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
                for_each_set_bit(i, &mask, 32)
                        kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
        }
+       kvm_vcpu_pmu_restore_guest(vcpu);
 }
 
 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
index 53749d3..3468891 100644 (file)
@@ -794,7 +794,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                if (!kvm_supports_32bit_el0())
                        val |= ARMV8_PMU_PMCR_LC;
                kvm_pmu_handle_pmcr(vcpu, val);
-               kvm_vcpu_pmu_restore_guest(vcpu);
        } else {
                /* PMCR.P & PMCR.C are RAZ */
                val = __vcpu_sys_reg(vcpu, PMCR_EL0)
@@ -856,6 +855,22 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
        return true;
 }
 
+static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+                         u64 *val)
+{
+       u64 idx;
+
+       if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
+               /* PMCCNTR_EL0 */
+               idx = ARMV8_PMU_CYCLE_IDX;
+       else
+               /* PMEVCNTRn_EL0 */
+               idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
+
+       *val = kvm_pmu_get_counter_value(vcpu, idx);
+       return 0;
+}
+
 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
                              struct sys_reg_params *p,
                              const struct sys_reg_desc *r)
@@ -1072,7 +1087,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 /* Macro to expand the PMEVCNTRn_EL0 register */
 #define PMU_PMEVCNTR_EL0(n)                                            \
        { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)),                            \
-         .reset = reset_pmevcntr,                                      \
+         .reset = reset_pmevcntr, .get_user = get_pmu_evcntr,          \
          .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
 
 /* Macro to expand the PMEVTYPERn_EL0 register */
@@ -1982,7 +1997,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        { PMU_SYS_REG(SYS_PMCEID1_EL0),
          .access = access_pmceid, .reset = NULL },
        { PMU_SYS_REG(SYS_PMCCNTR_EL0),
-         .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
+         .access = access_pmu_evcntr, .reset = reset_unknown,
+         .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
        { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
          .access = access_pmu_evtyper, .reset = NULL },
        { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
index a6acb94..c2edadb 100644 (file)
 /* DMB */
 #define A64_DMB_ISH aarch64_insn_gen_dmb(AARCH64_INSN_MB_ISH)
 
+/* ADR */
+#define A64_ADR(Rd, offset) \
+       aarch64_insn_gen_adr(0, offset, Rd, AARCH64_INSN_ADR_TYPE_ADR)
+
 #endif /* _BPF_JIT_H */
index 62f805f..b26da8e 100644 (file)
@@ -1900,7 +1900,8 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
                restore_args(ctx, args_off, nargs);
                /* call original func */
                emit(A64_LDR64I(A64_R(10), A64_SP, retaddr_off), ctx);
-               emit(A64_BLR(A64_R(10)), ctx);
+               emit(A64_ADR(A64_LR, AARCH64_INSN_SIZE * 2), ctx);
+               emit(A64_RET(A64_R(10)), ctx);
                /* store return value */
                emit(A64_STR64I(A64_R(0), A64_SP, retval_off), ctx);
                /* reserve a nop for bpf_tramp_image_put */
index 7fd5125..3ddde33 100644 (file)
@@ -447,6 +447,22 @@ config ARCH_IOREMAP
          protection support. However, you can enable LoongArch DMW-based
          ioremap() for better performance.
 
+config ARCH_WRITECOMBINE
+       bool "Enable WriteCombine (WUC) for ioremap()"
+       help
+         LoongArch maintains cache coherency in hardware, but when paired
+         with LS7A chipsets the WUC attribute (Weak-ordered UnCached, which
+         is similar to WriteCombine) is out of the scope of cache coherency
+         machanism for PCIe devices (this is a PCIe protocol violation, which
+         may be fixed in newer chipsets).
+
+         This means WUC can only used for write-only memory regions now, so
+         this option is disabled by default, making WUC silently fallback to
+         SUC for ioremap(). You can enable this option if the kernel is ensured
+         to run on hardware without this bug.
+
+         You can override this setting via writecombine=on/off boot parameter.
+
 config ARCH_STRICT_ALIGN
        bool "Enable -mstrict-align to prevent unaligned accesses" if EXPERT
        default y
index 4198753..976a810 100644 (file)
@@ -41,8 +41,11 @@ extern void loongarch_suspend_enter(void);
 
 static inline unsigned long acpi_get_wakeup_address(void)
 {
+#ifdef CONFIG_SUSPEND
        extern void loongarch_wakeup_start(void);
        return (unsigned long)loongarch_wakeup_start;
+#endif
+       return 0UL;
 }
 
 #endif /* _ASM_LOONGARCH_ACPI_H */
index 8fb699b..5c9c03b 100644 (file)
@@ -71,9 +71,9 @@ extern unsigned long vm_map_base;
 #define _ATYPE32_      int
 #define _ATYPE64_      __s64
 #ifdef CONFIG_64BIT
-#define _CONST64_(x)   x ## L
+#define _CONST64_(x)   x ## UL
 #else
-#define _CONST64_(x)   x ## LL
+#define _CONST64_(x)   x ## ULL
 #endif
 #endif
 
index 0051b52..c607968 100644 (file)
@@ -13,7 +13,6 @@ const char *get_system_type(void);
 extern void init_environ(void);
 extern void memblock_init(void);
 extern void platform_init(void);
-extern void plat_swiotlb_setup(void);
 extern int __init init_numa_memory(void);
 
 struct loongson_board_info {
index b079742..f6177f1 100644 (file)
@@ -42,6 +42,7 @@
 #define cpu_has_fpu            cpu_opt(LOONGARCH_CPU_FPU)
 #define cpu_has_lsx            cpu_opt(LOONGARCH_CPU_LSX)
 #define cpu_has_lasx           cpu_opt(LOONGARCH_CPU_LASX)
+#define cpu_has_crc32          cpu_opt(LOONGARCH_CPU_CRC32)
 #define cpu_has_complex                cpu_opt(LOONGARCH_CPU_COMPLEX)
 #define cpu_has_crypto         cpu_opt(LOONGARCH_CPU_CRYPTO)
 #define cpu_has_lvz            cpu_opt(LOONGARCH_CPU_LVZ)
index c3da917..88773d8 100644 (file)
@@ -78,25 +78,26 @@ enum cpu_type_enum {
 #define CPU_FEATURE_FPU                        3       /* CPU has FPU */
 #define CPU_FEATURE_LSX                        4       /* CPU has LSX (128-bit SIMD) */
 #define CPU_FEATURE_LASX               5       /* CPU has LASX (256-bit SIMD) */
-#define CPU_FEATURE_COMPLEX            6       /* CPU has Complex instructions */
-#define CPU_FEATURE_CRYPTO             7       /* CPU has Crypto instructions */
-#define CPU_FEATURE_LVZ                        8       /* CPU has Virtualization extension */
-#define CPU_FEATURE_LBT_X86            9       /* CPU has X86 Binary Translation */
-#define CPU_FEATURE_LBT_ARM            10      /* CPU has ARM Binary Translation */
-#define CPU_FEATURE_LBT_MIPS           11      /* CPU has MIPS Binary Translation */
-#define CPU_FEATURE_TLB                        12      /* CPU has TLB */
-#define CPU_FEATURE_CSR                        13      /* CPU has CSR */
-#define CPU_FEATURE_WATCH              14      /* CPU has watchpoint registers */
-#define CPU_FEATURE_VINT               15      /* CPU has vectored interrupts */
-#define CPU_FEATURE_CSRIPI             16      /* CPU has CSR-IPI */
-#define CPU_FEATURE_EXTIOI             17      /* CPU has EXT-IOI */
-#define CPU_FEATURE_PREFETCH           18      /* CPU has prefetch instructions */
-#define CPU_FEATURE_PMP                        19      /* CPU has perfermance counter */
-#define CPU_FEATURE_SCALEFREQ          20      /* CPU supports cpufreq scaling */
-#define CPU_FEATURE_FLATMODE           21      /* CPU has flat mode */
-#define CPU_FEATURE_EIODECODE          22      /* CPU has EXTIOI interrupt pin decode mode */
-#define CPU_FEATURE_GUESTID            23      /* CPU has GuestID feature */
-#define CPU_FEATURE_HYPERVISOR         24      /* CPU has hypervisor (running in VM) */
+#define CPU_FEATURE_CRC32              6       /* CPU has CRC32 instructions */
+#define CPU_FEATURE_COMPLEX            7       /* CPU has Complex instructions */
+#define CPU_FEATURE_CRYPTO             8       /* CPU has Crypto instructions */
+#define CPU_FEATURE_LVZ                        9       /* CPU has Virtualization extension */
+#define CPU_FEATURE_LBT_X86            10      /* CPU has X86 Binary Translation */
+#define CPU_FEATURE_LBT_ARM            11      /* CPU has ARM Binary Translation */
+#define CPU_FEATURE_LBT_MIPS           12      /* CPU has MIPS Binary Translation */
+#define CPU_FEATURE_TLB                        13      /* CPU has TLB */
+#define CPU_FEATURE_CSR                        14      /* CPU has CSR */
+#define CPU_FEATURE_WATCH              15      /* CPU has watchpoint registers */
+#define CPU_FEATURE_VINT               16      /* CPU has vectored interrupts */
+#define CPU_FEATURE_CSRIPI             17      /* CPU has CSR-IPI */
+#define CPU_FEATURE_EXTIOI             18      /* CPU has EXT-IOI */
+#define CPU_FEATURE_PREFETCH           19      /* CPU has prefetch instructions */
+#define CPU_FEATURE_PMP                        20      /* CPU has perfermance counter */
+#define CPU_FEATURE_SCALEFREQ          21      /* CPU supports cpufreq scaling */
+#define CPU_FEATURE_FLATMODE           22      /* CPU has flat mode */
+#define CPU_FEATURE_EIODECODE          23      /* CPU has EXTIOI interrupt pin decode mode */
+#define CPU_FEATURE_GUESTID            24      /* CPU has GuestID feature */
+#define CPU_FEATURE_HYPERVISOR         25      /* CPU has hypervisor (running in VM) */
 
 #define LOONGARCH_CPU_CPUCFG           BIT_ULL(CPU_FEATURE_CPUCFG)
 #define LOONGARCH_CPU_LAM              BIT_ULL(CPU_FEATURE_LAM)
@@ -104,6 +105,7 @@ enum cpu_type_enum {
 #define LOONGARCH_CPU_FPU              BIT_ULL(CPU_FEATURE_FPU)
 #define LOONGARCH_CPU_LSX              BIT_ULL(CPU_FEATURE_LSX)
 #define LOONGARCH_CPU_LASX             BIT_ULL(CPU_FEATURE_LASX)
+#define LOONGARCH_CPU_CRC32            BIT_ULL(CPU_FEATURE_CRC32)
 #define LOONGARCH_CPU_COMPLEX          BIT_ULL(CPU_FEATURE_COMPLEX)
 #define LOONGARCH_CPU_CRYPTO           BIT_ULL(CPU_FEATURE_CRYPTO)
 #define LOONGARCH_CPU_LVZ              BIT_ULL(CPU_FEATURE_LVZ)
index 402a7d9..545e270 100644 (file)
@@ -54,8 +54,10 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
  * @offset:    bus address of the memory
  * @size:      size of the resource to map
  */
+extern pgprot_t pgprot_wc;
+
 #define ioremap_wc(offset, size)       \
-       ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_WUC))
+       ioremap_prot((offset), (size), pgprot_val(pgprot_wc))
 
 #define ioremap_cache(offset, size)    \
        ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL))
index 65b7dcd..83da5d2 100644 (file)
@@ -117,7 +117,7 @@ static inline u32 read_cpucfg(u32 reg)
 #define  CPUCFG1_EP                    BIT(22)
 #define  CPUCFG1_RPLV                  BIT(23)
 #define  CPUCFG1_HUGEPG                        BIT(24)
-#define  CPUCFG1_IOCSRBRD              BIT(25)
+#define  CPUCFG1_CRC32                 BIT(25)
 #define  CPUCFG1_MSGINT                        BIT(26)
 
 #define LOONGARCH_CPUCFG2              0x2
@@ -423,9 +423,9 @@ static __always_inline void iocsr_write64(u64 val, u32 reg)
 #define  CSR_ASID_ASID_WIDTH           10
 #define  CSR_ASID_ASID                 (_ULCAST_(0x3ff) << CSR_ASID_ASID_SHIFT)
 
-#define LOONGARCH_CSR_PGDL             0x19    /* Page table base address when VA[47] = 0 */
+#define LOONGARCH_CSR_PGDL             0x19    /* Page table base address when VA[VALEN-1] = 0 */
 
-#define LOONGARCH_CSR_PGDH             0x1a    /* Page table base address when VA[47] = 1 */
+#define LOONGARCH_CSR_PGDH             0x1a    /* Page table base address when VA[VALEN-1] = 1 */
 
 #define LOONGARCH_CSR_PGD              0x1b    /* Page table base */
 
index 438f09d..88554f9 100644 (file)
@@ -2,8 +2,8 @@
 /* Copyright (C) 2020-2022 Loongson Technology Corporation Limited */
 SECTIONS {
        . = ALIGN(4);
-       .got : { BYTE(0) }
-       .plt : { BYTE(0) }
-       .plt.idx : { BYTE(0) }
-       .ftrace_trampoline : { BYTE(0) }
+       .got : { BYTE(0) }
+       .plt : { BYTE(0) }
+       .plt.idx : { BYTE(0) }
+       .ftrace_trampoline : { BYTE(0) }
 }
index cc48ed2..82d811b 100644 (file)
@@ -47,11 +47,12 @@ struct user_fp_state {
 };
 
 struct user_watch_state {
-       uint16_t dbg_info;
+       uint64_t dbg_info;
        struct {
                uint64_t    addr;
                uint64_t    mask;
                uint32_t    ctrl;
+               uint32_t    pad;
        } dbg_regs[8];
 };
 
index 3a3fce2..5adf0f7 100644 (file)
@@ -60,7 +60,7 @@ static inline void set_elf_platform(int cpu, const char *plat)
 
 /* MAP BASE */
 unsigned long vm_map_base;
-EXPORT_SYMBOL_GPL(vm_map_base);
+EXPORT_SYMBOL(vm_map_base);
 
 static void cpu_probe_addrbits(struct cpuinfo_loongarch *c)
 {
@@ -94,13 +94,18 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
        c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
                     LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
 
-       elf_hwcap = HWCAP_LOONGARCH_CPUCFG | HWCAP_LOONGARCH_CRC32;
+       elf_hwcap = HWCAP_LOONGARCH_CPUCFG;
 
        config = read_cpucfg(LOONGARCH_CPUCFG1);
        if (config & CPUCFG1_UAL) {
                c->options |= LOONGARCH_CPU_UAL;
                elf_hwcap |= HWCAP_LOONGARCH_UAL;
        }
+       if (config & CPUCFG1_CRC32) {
+               c->options |= LOONGARCH_CPU_CRC32;
+               elf_hwcap |= HWCAP_LOONGARCH_CRC32;
+       }
+
 
        config = read_cpucfg(LOONGARCH_CPUCFG2);
        if (config & CPUCFG2_LAM) {
index 5c67cc4..0d82907 100644 (file)
@@ -76,6 +76,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        if (cpu_has_fpu)        seq_printf(m, " fpu");
        if (cpu_has_lsx)        seq_printf(m, " lsx");
        if (cpu_has_lasx)       seq_printf(m, " lasx");
+       if (cpu_has_crc32)      seq_printf(m, " crc32");
        if (cpu_has_complex)    seq_printf(m, " complex");
        if (cpu_has_crypto)     seq_printf(m, " crypto");
        if (cpu_has_lvz)        seq_printf(m, " lvz");
index 06bceae..5fcffb4 100644 (file)
@@ -391,10 +391,10 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
        return 0;
 }
 
-static int ptrace_hbp_get_resource_info(unsigned int note_type, u16 *info)
+static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info)
 {
        u8 num;
-       u16 reg = 0;
+       u64 reg = 0;
 
        switch (note_type) {
        case NT_LOONGARCH_HW_BREAK:
@@ -524,15 +524,16 @@ static int ptrace_hbp_set_addr(unsigned int note_type,
        return modify_user_hw_breakpoint(bp, &attr);
 }
 
-#define PTRACE_HBP_CTRL_SZ     sizeof(u32)
 #define PTRACE_HBP_ADDR_SZ     sizeof(u64)
 #define PTRACE_HBP_MASK_SZ     sizeof(u64)
+#define PTRACE_HBP_CTRL_SZ     sizeof(u32)
+#define PTRACE_HBP_PAD_SZ      sizeof(u32)
 
 static int hw_break_get(struct task_struct *target,
                        const struct user_regset *regset,
                        struct membuf to)
 {
-       u16 info;
+       u64 info;
        u32 ctrl;
        u64 addr, mask;
        int ret, idx = 0;
@@ -545,7 +546,7 @@ static int hw_break_get(struct task_struct *target,
 
        membuf_write(&to, &info, sizeof(info));
 
-       /* (address, ctrl) registers */
+       /* (address, mask, ctrl) registers */
        while (to.left) {
                ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
                if (ret)
@@ -562,6 +563,7 @@ static int hw_break_get(struct task_struct *target,
                membuf_store(&to, addr);
                membuf_store(&to, mask);
                membuf_store(&to, ctrl);
+               membuf_zero(&to, sizeof(u32));
                idx++;
        }
 
@@ -582,7 +584,7 @@ static int hw_break_set(struct task_struct *target,
        offset = offsetof(struct user_watch_state, dbg_regs);
        user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
 
-       /* (address, ctrl) registers */
+       /* (address, mask, ctrl) registers */
        limit = regset->n * regset->size;
        while (count && offset < limit) {
                if (count < PTRACE_HBP_ADDR_SZ)
@@ -602,7 +604,7 @@ static int hw_break_set(struct task_struct *target,
                        break;
 
                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
-                                        offset, offset + PTRACE_HBP_ADDR_SZ);
+                                        offset, offset + PTRACE_HBP_MASK_SZ);
                if (ret)
                        return ret;
 
@@ -611,8 +613,8 @@ static int hw_break_set(struct task_struct *target,
                        return ret;
                offset += PTRACE_HBP_MASK_SZ;
 
-               ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
-                                        offset, offset + PTRACE_HBP_MASK_SZ);
+               ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
+                                        offset, offset + PTRACE_HBP_CTRL_SZ);
                if (ret)
                        return ret;
 
@@ -620,6 +622,11 @@ static int hw_break_set(struct task_struct *target,
                if (ret)
                        return ret;
                offset += PTRACE_HBP_CTRL_SZ;
+
+               user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+                                         offset, offset + PTRACE_HBP_PAD_SZ);
+               offset += PTRACE_HBP_PAD_SZ;
+
                idx++;
        }
 
index bae84cc..4444b13 100644 (file)
@@ -160,6 +160,27 @@ static void __init smbios_parse(void)
        dmi_walk(find_tokens, NULL);
 }
 
+#ifdef CONFIG_ARCH_WRITECOMBINE
+pgprot_t pgprot_wc = PAGE_KERNEL_WUC;
+#else
+pgprot_t pgprot_wc = PAGE_KERNEL_SUC;
+#endif
+
+EXPORT_SYMBOL(pgprot_wc);
+
+static int __init setup_writecombine(char *p)
+{
+       if (!strcmp(p, "on"))
+               pgprot_wc = PAGE_KERNEL_WUC;
+       else if (!strcmp(p, "off"))
+               pgprot_wc = PAGE_KERNEL_SUC;
+       else
+               pr_warn("Unknown writecombine setting \"%s\".\n", p);
+
+       return 0;
+}
+early_param("writecombine", setup_writecombine);
+
 static int usermem __initdata;
 
 static int __init early_parse_mem(char *p)
@@ -368,8 +389,8 @@ static void __init arch_mem_init(char **cmdline_p)
        /*
         * In order to reduce the possibility of kernel panic when failed to
         * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
-        * low memory as small as possible before plat_swiotlb_setup(), so
-        * make sparse_init() using top-down allocation.
+        * low memory as small as possible before swiotlb_init(), so make
+        * sparse_init() using top-down allocation.
         */
        memblock_set_bottom_up(false);
        sparse_init();
index 3a690f9..2463d2f 100644 (file)
@@ -30,7 +30,7 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
 
        regs->regs[1] = 0;
        for (unwind_start(&state, task, regs);
-             !unwind_done(&state); unwind_next_frame(&state)) {
+            !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
                addr = unwind_get_return_address(&state);
                if (!addr || !consume_entry(cookie, addr))
                        break;
index a463d69..ba324ba 100644 (file)
@@ -28,5 +28,6 @@ bool default_next_frame(struct unwind_state *state)
 
        } while (!get_stack_info(state->sp, state->task, info));
 
+       state->error = true;
        return false;
 }
index 9095fde..55afc27 100644 (file)
@@ -211,7 +211,7 @@ static bool next_frame(struct unwind_state *state)
                        pc = regs->csr_era;
 
                        if (user_mode(regs) || !__kernel_text_address(pc))
-                               return false;
+                               goto out;
 
                        state->first = true;
                        state->pc = pc;
@@ -226,6 +226,8 @@ static bool next_frame(struct unwind_state *state)
 
        } while (!get_stack_info(state->sp, state->task, info));
 
+out:
+       state->error = true;
        return false;
 }
 
index e018aed..3b7d812 100644 (file)
@@ -41,7 +41,7 @@
  * don't have to care about aliases on other CPUs.
  */
 unsigned long empty_zero_page, zero_page_mask;
-EXPORT_SYMBOL_GPL(empty_zero_page);
+EXPORT_SYMBOL(empty_zero_page);
 EXPORT_SYMBOL(zero_page_mask);
 
 void setup_zero_pages(void)
@@ -270,7 +270,7 @@ pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
 #endif
 #ifndef __PAGETABLE_PMD_FOLDED
 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
-EXPORT_SYMBOL_GPL(invalid_pmd_table);
+EXPORT_SYMBOL(invalid_pmd_table);
 #endif
 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
 EXPORT_SYMBOL(invalid_pte_table);
index 288003a..d586df4 100644 (file)
@@ -1022,6 +1022,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
                emit_atomic(insn, ctx);
                break;
 
+       /* Speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               break;
+
        default:
                pr_err("bpf_jit: unknown opcode %02x\n", code);
                return -EINVAL;
index 90da899..e2fc3b4 100644 (file)
@@ -80,6 +80,10 @@ SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL)
 
        JUMP_VIRT_ADDR  t0, t1
 
+       /* Enable PG */
+       li.w            t0, 0xb0                # PLV=0, IE=0, PG=1
+       csrwr           t0, LOONGARCH_CSR_CRMD
+
        la.pcrel        t0, acpi_saved_sp
        ld.d            sp, t0, 0
        SETUP_WAKEUP
index 3378866..3779e78 100644 (file)
@@ -5,6 +5,8 @@
 #include <asm/bmips.h>
 #include <asm/io.h>
 
+bool bmips_rac_flush_disable;
+
 void arch_sync_dma_for_cpu_all(void)
 {
        void __iomem *cbr = BMIPS_GET_CBR();
@@ -15,6 +17,9 @@ void arch_sync_dma_for_cpu_all(void)
            boot_cpu_type() != CPU_BMIPS4380)
                return;
 
+       if (unlikely(bmips_rac_flush_disable))
+               return;
+
        /* Flush stale data out of the readahead cache */
        cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
        __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
index e95b3f7..549a639 100644 (file)
@@ -35,6 +35,8 @@
 #define REG_BCM6328_OTP                ((void __iomem *)CKSEG1ADDR(0x1000062c))
 #define BCM6328_TP1_DISABLED   BIT(9)
 
+extern bool bmips_rac_flush_disable;
+
 static const unsigned long kbase = VMLINUX_LOAD_ADDRESS & 0xfff00000;
 
 struct bmips_quirk {
@@ -104,6 +106,12 @@ static void bcm6358_quirks(void)
         * disable SMP for now
         */
        bmips_smp_enabled = 0;
+
+       /*
+        * RAC flush causes kernel panics on BCM6358 when booting from TP1
+        * because the bootloader is not initializing it properly.
+        */
+       bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
 }
 
 static void bcm6368_quirks(void)
index 2bbc0fc..5e26c7f 100644 (file)
@@ -148,6 +148,11 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
         */
 }
 
+static inline bool __pte_protnone(unsigned long pte)
+{
+       return (pte & (pgprot_val(PAGE_NONE) | _PAGE_RWX)) == pgprot_val(PAGE_NONE);
+}
+
 static inline bool __pte_flags_need_flush(unsigned long oldval,
                                          unsigned long newval)
 {
@@ -164,8 +169,8 @@ static inline bool __pte_flags_need_flush(unsigned long oldval,
        /*
         * We do not expect kernel mappings or non-PTEs or not-present PTEs.
         */
-       VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
-       VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
+       VM_WARN_ON_ONCE(!__pte_protnone(oldval) && oldval & _PAGE_PRIVILEGED);
+       VM_WARN_ON_ONCE(!__pte_protnone(newval) && newval & _PAGE_PRIVILEGED);
        VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
        VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
        VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
index 2087a78..5fff0d0 100644 (file)
@@ -290,6 +290,9 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
 static int ppr_get(struct task_struct *target, const struct user_regset *regset,
                   struct membuf to)
 {
+       if (!target->thread.regs)
+               return -EINVAL;
+
        return membuf_write(&to, &target->thread.regs->ppr, sizeof(u64));
 }
 
@@ -297,6 +300,9 @@ static int ppr_set(struct task_struct *target, const struct user_regset *regset,
                   unsigned int pos, unsigned int count, const void *kbuf,
                   const void __user *ubuf)
 {
+       if (!target->thread.regs)
+               return -EINVAL;
+
        return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                  &target->thread.regs->ppr, 0, sizeof(u64));
 }
index 4c5405f..d23e25e 100644 (file)
@@ -576,6 +576,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                break;
 #endif
 
+#ifdef CONFIG_HAVE_KVM_IRQFD
+       case KVM_CAP_IRQFD_RESAMPLE:
+               r = !xive_enabled();
+               break;
+#endif
+
        case KVM_CAP_PPC_ALLOC_HTAB:
                r = hv_enabled;
                break;
index b44ce71..16cfe56 100644 (file)
@@ -366,6 +366,7 @@ void update_numa_distance(struct device_node *node)
        WARN(numa_distance_table[nid][nid] == -1,
             "NUMA distance details for node %d not provided\n", nid);
 }
+EXPORT_SYMBOL_GPL(update_numa_distance);
 
 /*
  * ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN}
index 2f83855..1a53e04 100644 (file)
@@ -1428,6 +1428,13 @@ static int papr_scm_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       /*
+        * open firmware platform device create won't update the NUMA 
+        * distance table. For PAPR SCM devices we use numa_map_to_online_node()
+        * to find the nearest online NUMA node and that requires correct
+        * distance table information.
+        */
+       update_numa_distance(dn);
 
        p = kzalloc(sizeof(*p), GFP_KERNEL);
        if (!p)
index 5591123..5131804 100644 (file)
@@ -856,6 +856,13 @@ int pseries_vas_dlpar_cpu(void)
 {
        int new_nr_creds, rc;
 
+       /*
+        * NX-GZIP is not enabled. Nothing to do for DLPAR event
+        */
+       if (!copypaste_feat)
+               return 0;
+
+
        rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES,
                                      vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat,
                                      (u64)virt_to_phys(&hv_cop_caps));
@@ -1012,6 +1019,7 @@ static int __init pseries_vas_init(void)
         * Linux supports user space COPY/PASTE only with Radix
         */
        if (!radix_enabled()) {
+               copypaste_feat = false;
                pr_err("API is supported only with radix page tables\n");
                return -ENOTSUPP;
        }
index 5b182d1..eb7f29a 100644 (file)
@@ -126,6 +126,7 @@ config RISCV
        select OF_IRQ
        select PCI_DOMAINS_GENERIC if PCI
        select PCI_MSI if PCI
+       select RISCV_ALTERNATIVE if !XIP_KERNEL
        select RISCV_INTC
        select RISCV_TIMER if RISCV_SBI
        select SIFIVE_PLIC
@@ -401,9 +402,8 @@ config RISCV_ISA_C
 config RISCV_ISA_SVPBMT
        bool "SVPBMT extension support"
        depends on 64BIT && MMU
-       depends on !XIP_KERNEL
+       depends on RISCV_ALTERNATIVE
        default y
-       select RISCV_ALTERNATIVE
        help
           Adds support to dynamically detect the presence of the SVPBMT
           ISA-extension (Supervisor-mode: page-based memory types) and
@@ -428,8 +428,8 @@ config TOOLCHAIN_HAS_ZBB
 config RISCV_ISA_ZBB
        bool "Zbb extension support for bit manipulation instructions"
        depends on TOOLCHAIN_HAS_ZBB
-       depends on !XIP_KERNEL && MMU
-       select RISCV_ALTERNATIVE
+       depends on MMU
+       depends on RISCV_ALTERNATIVE
        default y
        help
           Adds support to dynamically detect the presence of the ZBB
@@ -443,9 +443,9 @@ config RISCV_ISA_ZBB
 
 config RISCV_ISA_ZICBOM
        bool "Zicbom extension support for non-coherent DMA operation"
-       depends on !XIP_KERNEL && MMU
+       depends on MMU
+       depends on RISCV_ALTERNATIVE
        default y
-       select RISCV_ALTERNATIVE
        select RISCV_DMA_NONCOHERENT
        help
           Adds support to dynamically detect the presence of the ZICBOM
index 69621ae..0c8f465 100644 (file)
@@ -2,8 +2,7 @@ menu "CPU errata selection"
 
 config ERRATA_SIFIVE
        bool "SiFive errata"
-       depends on !XIP_KERNEL
-       select RISCV_ALTERNATIVE
+       depends on RISCV_ALTERNATIVE
        help
          All SiFive errata Kconfig depend on this Kconfig. Disabling
          this Kconfig will disable all SiFive errata. Please say "Y"
@@ -35,8 +34,7 @@ config ERRATA_SIFIVE_CIP_1200
 
 config ERRATA_THEAD
        bool "T-HEAD errata"
-       depends on !XIP_KERNEL
-       select RISCV_ALTERNATIVE
+       depends on RISCV_ALTERNATIVE
        help
          All T-HEAD errata Kconfig depend on this Kconfig. Disabling
          this Kconfig will disable all T-HEAD errata. Please say "Y"
index 07e2e26..f87c516 100644 (file)
                                         <&sysclk K210_CLK_APB0>;
                                clock-names = "ssi_clk", "pclk";
                                resets = <&sysrst K210_RST_SPI2>;
-                               spi-max-frequency = <25000000>;
                        };
 
                        i2s0: i2s@50250000 {
index 5c3e7b9..0a55099 100644 (file)
  */
 enum fixed_addresses {
        FIX_HOLE,
+       /*
+        * The fdt fixmap mapping must be PMD aligned and will be mapped
+        * using PMD entries in fixmap_pmd in 64-bit and a PGD entry in 32-bit.
+        */
+       FIX_FDT_END,
+       FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1,
+
+       /* Below fixmaps will be mapped using fixmap_pte */
        FIX_PTE,
        FIX_PMD,
        FIX_PUD,
index e3021b2..6263a0d 100644 (file)
@@ -57,18 +57,31 @@ struct riscv_isa_ext_data {
        unsigned int isa_ext_id;
 };
 
+unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
+
+#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext)
+
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
+#define riscv_isa_extension_available(isa_bitmap, ext) \
+       __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
+
 static __always_inline bool
 riscv_has_extension_likely(const unsigned long ext)
 {
        compiletime_assert(ext < RISCV_ISA_EXT_MAX,
                           "ext must be < RISCV_ISA_EXT_MAX");
 
-       asm_volatile_goto(
-       ALTERNATIVE("j  %l[l_no]", "nop", 0, %[ext], 1)
-       :
-       : [ext] "i" (ext)
-       :
-       : l_no);
+       if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+               asm_volatile_goto(
+               ALTERNATIVE("j  %l[l_no]", "nop", 0, %[ext], 1)
+               :
+               : [ext] "i" (ext)
+               :
+               : l_no);
+       } else {
+               if (!__riscv_isa_extension_available(NULL, ext))
+                       goto l_no;
+       }
 
        return true;
 l_no:
@@ -81,26 +94,23 @@ riscv_has_extension_unlikely(const unsigned long ext)
        compiletime_assert(ext < RISCV_ISA_EXT_MAX,
                           "ext must be < RISCV_ISA_EXT_MAX");
 
-       asm_volatile_goto(
-       ALTERNATIVE("nop", "j   %l[l_yes]", 0, %[ext], 1)
-       :
-       : [ext] "i" (ext)
-       :
-       : l_yes);
+       if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+               asm_volatile_goto(
+               ALTERNATIVE("nop", "j   %l[l_yes]", 0, %[ext], 1)
+               :
+               : [ext] "i" (ext)
+               :
+               : l_yes);
+       } else {
+               if (__riscv_isa_extension_available(NULL, ext))
+                       goto l_yes;
+       }
 
        return false;
 l_yes:
        return true;
 }
 
-unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
-
-#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext)
-
-bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
-#define riscv_isa_extension_available(isa_bitmap, ext) \
-       __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
-
 #endif
 
 #endif /* _ASM_RISCV_HWCAP_H */
index ab05f89..f641837 100644 (file)
 
 #define FIXADDR_TOP      PCI_IO_START
 #ifdef CONFIG_64BIT
-#define FIXADDR_SIZE     PMD_SIZE
+#define MAX_FDT_SIZE    PMD_SIZE
+#define FIX_FDT_SIZE    (MAX_FDT_SIZE + SZ_2M)
+#define FIXADDR_SIZE     (PMD_SIZE + FIX_FDT_SIZE)
 #else
-#define FIXADDR_SIZE     PGDIR_SIZE
+#define MAX_FDT_SIZE    PGDIR_SIZE
+#define FIX_FDT_SIZE    MAX_FDT_SIZE
+#define FIXADDR_SIZE     (PGDIR_SIZE + FIX_FDT_SIZE)
 #endif
 #define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)
 
index 376d282..a059b73 100644 (file)
@@ -278,12 +278,8 @@ void __init setup_arch(char **cmdline_p)
 #if IS_ENABLED(CONFIG_BUILTIN_DTB)
        unflatten_and_copy_device_tree();
 #else
-       if (early_init_dt_verify(__va(XIP_FIXUP(dtb_early_pa))))
-               unflatten_device_tree();
-       else
-               pr_err("No DTB found in kernel mappings\n");
+       unflatten_device_tree();
 #endif
-       early_init_fdt_scan_reserved_mem();
        misc_mem_init();
 
        init_resources();
index bfb2afa..dee66c9 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/signal32.h>
 #include <asm/switch_to.h>
 #include <asm/csr.h>
+#include <asm/cacheflush.h>
 
 extern u32 __user_rt_sigreturn[2];
 
@@ -181,6 +182,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
 {
        struct rt_sigframe __user *frame;
        long err = 0;
+       unsigned long __maybe_unused addr;
 
        frame = get_sigframe(ksig, regs, sizeof(*frame));
        if (!access_ok(frame, sizeof(*frame)))
@@ -209,7 +211,12 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
        if (copy_to_user(&frame->sigreturn_code, __user_rt_sigreturn,
                         sizeof(frame->sigreturn_code)))
                return -EFAULT;
-       regs->ra = (unsigned long)&frame->sigreturn_code;
+
+       addr = (unsigned long)&frame->sigreturn_code;
+       /* Make sure the two instructions are pushed to icache. */
+       flush_icache_range(addr, addr + sizeof(frame->sigreturn_code));
+
+       regs->ra = addr;
 #endif /* CONFIG_MMU */
 
        /*
index ad34519..3ac2ff6 100644 (file)
@@ -147,10 +147,8 @@ static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu)
                return;
 
        delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
-       if (delta_ns) {
-               hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
-               t->next_set = true;
-       }
+       hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
+       t->next_set = true;
 }
 
 static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu)
index 478d676..0f14f4a 100644 (file)
@@ -57,7 +57,6 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
 EXPORT_SYMBOL(empty_zero_page);
 
 extern char _start[];
-#define DTB_EARLY_BASE_VA      PGDIR_SIZE
 void *_dtb_early_va __initdata;
 uintptr_t _dtb_early_pa __initdata;
 
@@ -236,31 +235,22 @@ static void __init setup_bootmem(void)
        set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
 
        reserve_initrd_mem();
+
+       /*
+        * No allocation should be done before reserving the memory as defined
+        * in the device tree, otherwise the allocation could end up in a
+        * reserved region.
+        */
+       early_init_fdt_scan_reserved_mem();
+
        /*
         * If DTB is built in, no need to reserve its memblock.
         * Otherwise, do reserve it but avoid using
         * early_init_fdt_reserve_self() since __pa() does
         * not work for DTB pointers that are fixmap addresses
         */
-       if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) {
-               /*
-                * In case the DTB is not located in a memory region we won't
-                * be able to locate it later on via the linear mapping and
-                * get a segfault when accessing it via __va(dtb_early_pa).
-                * To avoid this situation copy DTB to a memory region.
-                * Note that memblock_phys_alloc will also reserve DTB region.
-                */
-               if (!memblock_is_memory(dtb_early_pa)) {
-                       size_t fdt_size = fdt_totalsize(dtb_early_va);
-                       phys_addr_t new_dtb_early_pa = memblock_phys_alloc(fdt_size, PAGE_SIZE);
-                       void *new_dtb_early_va = early_memremap(new_dtb_early_pa, fdt_size);
-
-                       memcpy(new_dtb_early_va, dtb_early_va, fdt_size);
-                       early_memunmap(new_dtb_early_va, fdt_size);
-                       _dtb_early_pa = new_dtb_early_pa;
-               } else
-                       memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
-       }
+       if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
+               memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
 
        dma_contiguous_reserve(dma32_phys_limit);
        if (IS_ENABLED(CONFIG_64BIT))
@@ -279,9 +269,6 @@ pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
 static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
 
 pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
-static p4d_t __maybe_unused early_dtb_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
-static pud_t __maybe_unused early_dtb_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
-static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
 
 #ifdef CONFIG_XIP_KERNEL
 #define pt_ops                 (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
@@ -626,9 +613,6 @@ static void __init create_p4d_mapping(p4d_t *p4dp,
 #define trampoline_pgd_next    (pgtable_l5_enabled ?                   \
                (uintptr_t)trampoline_p4d : (pgtable_l4_enabled ?       \
                (uintptr_t)trampoline_pud : (uintptr_t)trampoline_pmd))
-#define early_dtb_pgd_next     (pgtable_l5_enabled ?                   \
-               (uintptr_t)early_dtb_p4d : (pgtable_l4_enabled ?        \
-               (uintptr_t)early_dtb_pud : (uintptr_t)early_dtb_pmd))
 #else
 #define pgd_next_t             pte_t
 #define alloc_pgd_next(__va)   pt_ops.alloc_pte(__va)
@@ -636,7 +620,6 @@ static void __init create_p4d_mapping(p4d_t *p4dp,
 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)     \
        create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
 #define fixmap_pgd_next                ((uintptr_t)fixmap_pte)
-#define early_dtb_pgd_next     ((uintptr_t)early_dtb_pmd)
 #define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
 #define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
 #define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
@@ -860,32 +843,28 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
  * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
  * entry.
  */
-static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa)
+static void __init create_fdt_early_page_table(pgd_t *pgdir,
+                                              uintptr_t fix_fdt_va,
+                                              uintptr_t dtb_pa)
 {
-#ifndef CONFIG_BUILTIN_DTB
        uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
 
-       create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
-                          IS_ENABLED(CONFIG_64BIT) ? early_dtb_pgd_next : pa,
-                          PGDIR_SIZE,
-                          IS_ENABLED(CONFIG_64BIT) ? PAGE_TABLE : PAGE_KERNEL);
-
-       if (pgtable_l5_enabled)
-               create_p4d_mapping(early_dtb_p4d, DTB_EARLY_BASE_VA,
-                                  (uintptr_t)early_dtb_pud, P4D_SIZE, PAGE_TABLE);
-
-       if (pgtable_l4_enabled)
-               create_pud_mapping(early_dtb_pud, DTB_EARLY_BASE_VA,
-                                  (uintptr_t)early_dtb_pmd, PUD_SIZE, PAGE_TABLE);
+#ifndef CONFIG_BUILTIN_DTB
+       /* Make sure the fdt fixmap address is always aligned on PMD size */
+       BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE));
 
-       if (IS_ENABLED(CONFIG_64BIT)) {
-               create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
+       /* In 32-bit only, the fdt lies in its own PGD */
+       if (!IS_ENABLED(CONFIG_64BIT)) {
+               create_pgd_mapping(early_pg_dir, fix_fdt_va,
+                                  pa, MAX_FDT_SIZE, PAGE_KERNEL);
+       } else {
+               create_pmd_mapping(fixmap_pmd, fix_fdt_va,
                                   pa, PMD_SIZE, PAGE_KERNEL);
-               create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
+               create_pmd_mapping(fixmap_pmd, fix_fdt_va + PMD_SIZE,
                                   pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
        }
 
-       dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
+       dtb_early_va = (void *)fix_fdt_va + (dtb_pa & (PMD_SIZE - 1));
 #else
        /*
         * For 64-bit kernel, __va can't be used since it would return a linear
@@ -1055,7 +1034,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
        create_kernel_page_table(early_pg_dir, true);
 
        /* Setup early mapping for FDT early scan */
-       create_fdt_early_page_table(early_pg_dir, dtb_pa);
+       create_fdt_early_page_table(early_pg_dir,
+                                   __fix_to_virt(FIX_FDT), dtb_pa);
 
        /*
         * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
@@ -1097,6 +1077,16 @@ static void __init setup_vm_final(void)
        u64 i;
 
        /* Setup swapper PGD for fixmap */
+#if !defined(CONFIG_64BIT)
+       /*
+        * In 32-bit, the device tree lies in a pgd entry, so it must be copied
+        * directly in swapper_pg_dir in addition to the pgd entry that points
+        * to fixmap_pte.
+        */
+       unsigned long idx = pgd_index(__fix_to_virt(FIX_FDT));
+
+       set_pgd(&swapper_pg_dir[idx], early_pg_dir[idx]);
+#endif
        create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
                           __pa_symbol(fixmap_pgd_next),
                           PGDIR_SIZE, PAGE_TABLE);
index d16bf71..5730797 100644 (file)
@@ -84,12 +84,7 @@ CFLAGS_string.o                      += $(PURGATORY_CFLAGS)
 CFLAGS_REMOVE_ctype.o          += $(PURGATORY_CFLAGS_REMOVE)
 CFLAGS_ctype.o                 += $(PURGATORY_CFLAGS)
 
-AFLAGS_REMOVE_entry.o          += -Wa,-gdwarf-2
-AFLAGS_REMOVE_memcpy.o         += -Wa,-gdwarf-2
-AFLAGS_REMOVE_memset.o         += -Wa,-gdwarf-2
-AFLAGS_REMOVE_strcmp.o         += -Wa,-gdwarf-2
-AFLAGS_REMOVE_strlen.o         += -Wa,-gdwarf-2
-AFLAGS_REMOVE_strncmp.o                += -Wa,-gdwarf-2
+asflags-remove-y               += $(foreach x, -g -gdwarf-4 -gdwarf-5, $(x) -Wa,$(x))
 
 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
                $(call if_changed,ld)
index b3235ab..ed646c5 100644 (file)
@@ -162,7 +162,7 @@ vdso_prepare: prepare0
 
 ifdef CONFIG_EXPOLINE_EXTERN
 modules_prepare: expoline_prepare
-expoline_prepare:
+expoline_prepare: scripts
        $(Q)$(MAKE) $(build)=arch/s390/lib/expoline arch/s390/lib/expoline/expoline.o
 endif
 endif
index cf9659e..ea244a7 100644 (file)
@@ -474,9 +474,7 @@ long arch_ptrace(struct task_struct *child, long request,
                }
                return 0;
        case PTRACE_GET_LAST_BREAK:
-               put_user(child->thread.last_break,
-                        (unsigned long __user *) data);
-               return 0;
+               return put_user(child->thread.last_break, (unsigned long __user *)data);
        case PTRACE_ENABLE_TE:
                if (!MACHINE_HAS_TE)
                        return -EIO;
@@ -824,9 +822,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                }
                return 0;
        case PTRACE_GET_LAST_BREAK:
-               put_user(child->thread.last_break,
-                        (unsigned int __user *) data);
-               return 0;
+               return put_user(child->thread.last_break, (unsigned int __user *)data);
        }
        return compat_ptrace_request(child, request, addr, data);
 }
index 0ee02da..2cda8d9 100644 (file)
@@ -271,10 +271,18 @@ static int handle_prog(struct kvm_vcpu *vcpu)
  * handle_external_interrupt - used for external interruption interceptions
  * @vcpu: virtual cpu
  *
- * This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
- * the new PSW does not have external interrupts disabled. In the first case,
- * we've got to deliver the interrupt manually, and in the second case, we
- * drop to userspace to handle the situation there.
+ * This interception occurs if:
+ * - the CPUSTAT_EXT_INT bit was already set when the external interrupt
+ *   occurred. In this case, the interrupt needs to be injected manually to
+ *   preserve interrupt priority.
+ * - the external new PSW has external interrupts enabled, which will cause an
+ *   interruption loop. We drop to userspace in this case.
+ *
+ * The latter case can be detected by inspecting the external mask bit in the
+ * external new psw.
+ *
+ * Under PV, only the latter case can occur, since interrupt priorities are
+ * handled in the ultravisor.
  */
 static int handle_external_interrupt(struct kvm_vcpu *vcpu)
 {
@@ -285,10 +293,18 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
 
        vcpu->stat.exit_external_interrupt++;
 
-       rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
-       if (rc)
-               return rc;
-       /* We can not handle clock comparator or timer interrupt with bad PSW */
+       if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+               newpsw = vcpu->arch.sie_block->gpsw;
+       } else {
+               rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
+               if (rc)
+                       return rc;
+       }
+
+       /*
+        * Clock comparator or timer interrupt with external interrupt enabled
+        * will cause interrupt loop. Drop to userspace.
+        */
        if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
            (newpsw.mask & PSW_MASK_EXT))
                return -EOPNOTSUPP;
index 39b3656..1eeb9ae 100644 (file)
@@ -573,6 +573,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_S390_VCPU_RESETS:
        case KVM_CAP_SET_GUEST_DEBUG:
        case KVM_CAP_S390_DIAG318:
+       case KVM_CAP_IRQFD_RESAMPLE:
                r = 1;
                break;
        case KVM_CAP_SET_GUEST_DEBUG2:
index 720036f..d442140 100644 (file)
@@ -172,7 +172,7 @@ unsigned long __clear_user(void __user *to, unsigned long size)
                "4: slgr  %0,%0\n"
                "5:\n"
                EX_TABLE(0b,2b) EX_TABLE(6b,2b) EX_TABLE(3b,5b) EX_TABLE(7b,5b)
-               : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
+               : "+&a" (size), "+&a" (to), "+a" (tmp1), "=&a" (tmp2)
                : "a" (empty_zero_page), [spec] "d" (spec.val)
                : "cc", "memory", "0");
        return size;
index b70559b..2106a2b 100644 (file)
@@ -3,9 +3,14 @@ core-y += arch/x86/crypto/
 
 #
 # Disable SSE and other FP/SIMD instructions to match normal x86
+# This is required to work around issues in older LLVM versions, but breaks
+# GCC versions < 11. See:
+# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99652
 #
+ifeq ($(CONFIG_CC_IS_CLANG),y)
 KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
 KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
+endif
 
 ifeq ($(CONFIG_X86_32),y)
 START := 0x8048000
index cbaf174..b3af2d4 100644 (file)
 
 #define INTEL_FAM6_LUNARLAKE_M         0xBD
 
+#define INTEL_FAM6_ARROWLAKE           0xC6
+
 /* "Small Core" Processors (Atom/E-Core) */
 
 #define INTEL_FAM6_ATOM_BONNELL                0x1C /* Diamondville, Pineview */
index 1c38174..0dac4ab 100644 (file)
@@ -146,7 +146,11 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
 
                pr_debug("Local APIC address 0x%08x\n", madt->address);
        }
-       if (madt->header.revision >= 5)
+
+       /* ACPI 6.3 and newer support the online capable bit. */
+       if (acpi_gbl_FADT.header.revision > 6 ||
+           (acpi_gbl_FADT.header.revision == 6 &&
+            acpi_gbl_FADT.minor_revision >= 3))
                acpi_support_online_capable = true;
 
        default_acpi_madt_oem_check(madt->header.oem_id,
@@ -193,7 +197,8 @@ static bool __init acpi_is_processor_usable(u32 lapic_flags)
        if (lapic_flags & ACPI_MADT_ENABLED)
                return true;
 
-       if (acpi_support_online_capable && (lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
+       if (!acpi_support_online_capable ||
+           (lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
                return true;
 
        return false;
index f36dc2f..f119736 100644 (file)
@@ -358,12 +358,16 @@ static void __init ms_hyperv_init_platform(void)
         * To mirror what Windows does we should extract CPU management
         * features and use the ReservedIdentityBit to detect if Linux is the
         * root partition. But that requires negotiating CPU management
-        * interface (a process to be finalized).
+        * interface (a process to be finalized). For now, use the privilege
+        * flag as the indicator for running as root.
         *
-        * For now, use the privilege flag as the indicator for running as
-        * root.
+        * Hyper-V should never specify running as root and as a Confidential
+        * VM. But to protect against a compromised/malicious Hyper-V trying
+        * to exploit root behavior to expose Confidential VM memory, ignore
+        * the root partition setting if also a Confidential VM.
         */
-       if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_CPU_MANAGEMENT) {
+       if ((ms_hyperv.priv_high & HV_CPU_MANAGEMENT) &&
+           !(ms_hyperv.priv_high & HV_ISOLATION)) {
                hv_root_partition = true;
                pr_info("Hyper-V: running as root partition\n");
        }
index ef80d36..10622cf 100644 (file)
@@ -33,8 +33,8 @@ static int __init iommu_init_noop(void) { return 0; }
 static void iommu_shutdown_noop(void) { }
 bool __init bool_x86_init_noop(void) { return false; }
 void x86_op_int_noop(int cpu) { }
-static __init int set_rtc_noop(const struct timespec64 *now) { return -EINVAL; }
-static __init void get_rtc_noop(struct timespec64 *now) { }
+static int set_rtc_noop(const struct timespec64 *now) { return -EINVAL; }
+static void get_rtc_noop(struct timespec64 *now) { }
 
 static __initconst const struct of_device_id of_cmos_match[] = {
        { .compatible = "motorola,mc146818" },
index 042dee5..995eb50 100644 (file)
@@ -368,9 +368,39 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
                mask_after = e->fields.mask;
                if (mask_before != mask_after)
                        kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
-               if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
-                   && ioapic->irr & (1 << index))
-                       ioapic_service(ioapic, index, false);
+               if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
+                   ioapic->irr & (1 << index) && !e->fields.mask && !e->fields.remote_irr) {
+                       /*
+                        * Pending status in irr may be outdated: the IRQ line may have
+                        * already been deasserted by a device while the IRQ was masked.
+                        * This occurs, for instance, if the interrupt is handled in a
+                        * Linux guest as a oneshot interrupt (IRQF_ONESHOT). In this
+                        * case the guest acknowledges the interrupt to the device in
+                        * its threaded irq handler, i.e. after the EOI but before
+                        * unmasking, so at the time of unmasking the IRQ line is
+                        * already down but our pending irr bit is still set. In such
+                        * cases, injecting this pending interrupt to the guest is
+                        * buggy: the guest will receive an extra unwanted interrupt.
+                        *
+                        * So we need to check here if the IRQ is actually still pending.
+                        * As we are generally not able to probe the IRQ line status
+                        * directly, we do it through irqfd resampler. Namely, we clear
+                        * the pending status and notify the resampler that this interrupt
+                        * is done, without actually injecting it into the guest. If the
+                        * IRQ line is actually already deasserted, we are done. If it is
+                        * still asserted, a new interrupt will be shortly triggered
+                        * through irqfd and injected into the guest.
+                        *
+                        * If, however, it's not possible to resample (no irqfd resampler
+                        * registered for this irq), then unconditionally inject this
+                        * pending interrupt into the guest, so the guest will not miss
+                        * an interrupt, although may get an extra unwanted interrupt.
+                        */
+                       if (kvm_notify_irqfd_resampler(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index))
+                               ioapic->irr &= ~(1 << index);
+                       else
+                               ioapic_service(ioapic, index, false);
+               }
                if (e->fields.delivery_mode == APIC_DM_FIXED) {
                        struct kvm_lapic_irq irq;
 
index 287e98e..6272dab 100644 (file)
@@ -12,6 +12,11 @@ int hv_remote_flush_tlb_with_range(struct kvm *kvm,
 int hv_remote_flush_tlb(struct kvm *kvm);
 void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp);
 #else /* !CONFIG_HYPERV */
+static inline int hv_remote_flush_tlb(struct kvm *kvm)
+{
+       return -EOPNOTSUPP;
+}
+
 static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
 {
 }
index 252e7f3..f25bc3c 100644 (file)
@@ -3729,7 +3729,7 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
        svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
 }
 
-static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
+static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
@@ -3753,6 +3753,37 @@ static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
                svm->current_vmcb->asid_generation--;
 }
 
+static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
+{
+       hpa_t root_tdp = vcpu->arch.mmu->root.hpa;
+
+       /*
+        * When running on Hyper-V with EnlightenedNptTlb enabled, explicitly
+        * flush the NPT mappings via hypercall as flushing the ASID only
+        * affects virtual to physical mappings, it does not invalidate guest
+        * physical to host physical mappings.
+        */
+       if (svm_hv_is_enlightened_tlb_enabled(vcpu) && VALID_PAGE(root_tdp))
+               hyperv_flush_guest_mapping(root_tdp);
+
+       svm_flush_tlb_asid(vcpu);
+}
+
+static void svm_flush_tlb_all(struct kvm_vcpu *vcpu)
+{
+       /*
+        * When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB
+        * flushes should be routed to hv_remote_flush_tlb() without requesting
+        * a "regular" remote flush.  Reaching this point means either there's
+        * a KVM bug or a prior hv_remote_flush_tlb() call failed, both of
+        * which might be fatal to the guest.  Yell, but try to recover.
+        */
+       if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu)))
+               hv_remote_flush_tlb(vcpu->kvm);
+
+       svm_flush_tlb_asid(vcpu);
+}
+
 static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -4745,10 +4776,10 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .set_rflags = svm_set_rflags,
        .get_if_flag = svm_get_if_flag,
 
-       .flush_tlb_all = svm_flush_tlb_current,
+       .flush_tlb_all = svm_flush_tlb_all,
        .flush_tlb_current = svm_flush_tlb_current,
        .flush_tlb_gva = svm_flush_tlb_gva,
-       .flush_tlb_guest = svm_flush_tlb_current,
+       .flush_tlb_guest = svm_flush_tlb_asid,
 
        .vcpu_pre_run = svm_vcpu_pre_run,
        .vcpu_run = svm_vcpu_run,
index cff838f..786d46d 100644 (file)
@@ -6,6 +6,8 @@
 #ifndef __ARCH_X86_KVM_SVM_ONHYPERV_H__
 #define __ARCH_X86_KVM_SVM_ONHYPERV_H__
 
+#include <asm/mshyperv.h>
+
 #if IS_ENABLED(CONFIG_HYPERV)
 
 #include "kvm_onhyperv.h"
@@ -15,6 +17,14 @@ static struct kvm_x86_ops svm_x86_ops;
 
 int svm_hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu);
 
+static inline bool svm_hv_is_enlightened_tlb_enabled(struct kvm_vcpu *vcpu)
+{
+       struct hv_vmcb_enlightenments *hve = &to_svm(vcpu)->vmcb->control.hv_enlightenments;
+
+       return ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB &&
+              !!hve->hv_enlightenments_control.enlightened_npt_tlb;
+}
+
 static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
 {
        struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments;
@@ -80,6 +90,11 @@ static inline void svm_hv_update_vp_id(struct vmcb *vmcb, struct kvm_vcpu *vcpu)
 }
 #else
 
+static inline bool svm_hv_is_enlightened_tlb_enabled(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+
 static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
 {
 }
index 1bc2b80..7684876 100644 (file)
@@ -3868,7 +3868,12 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu)
                exit_qual = 0;
        }
 
-       if (ex->has_error_code) {
+       /*
+        * Unlike AMD's Paged Real Mode, which reports an error code on #PF
+        * VM-Exits even if the CPU is in Real Mode, Intel VMX never sets the
+        * "has error code" flags on VM-Exit if the CPU is in Real Mode.
+        */
+       if (ex->has_error_code && is_protmode(vcpu)) {
                /*
                 * Intel CPUs do not generate error codes with bits 31:16 set,
                 * and more importantly VMX disallows setting bits 31:16 in the
index 7713420..3d852ce 100644 (file)
@@ -4432,6 +4432,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_VAPIC:
        case KVM_CAP_ENABLE_CAP:
        case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES:
+       case KVM_CAP_IRQFD_RESAMPLE:
                r = 1;
                break;
        case KVM_CAP_EXIT_HYPERCALL:
@@ -8903,6 +8904,8 @@ restart:
        }
 
        if (ctxt->have_exception) {
+               WARN_ON_ONCE(vcpu->mmio_needed && !vcpu->mmio_is_write);
+               vcpu->mmio_needed = false;
                r = 1;
                inject_emulated_exception(vcpu);
        } else if (vcpu->arch.pio.count) {
@@ -9906,13 +9909,20 @@ int kvm_check_nested_events(struct kvm_vcpu *vcpu)
 
 static void kvm_inject_exception(struct kvm_vcpu *vcpu)
 {
+       /*
+        * Suppress the error code if the vCPU is in Real Mode, as Real Mode
+        * exceptions don't report error codes.  The presence of an error code
+        * is carried with the exception and only stripped when the exception
+        * is injected as intercepted #PF VM-Exits for AMD's Paged Real Mode do
+        * report an error code despite the CPU being in Real Mode.
+        */
+       vcpu->arch.exception.has_error_code &= is_protmode(vcpu);
+
        trace_kvm_inj_exception(vcpu->arch.exception.vector,
                                vcpu->arch.exception.has_error_code,
                                vcpu->arch.exception.error_code,
                                vcpu->arch.exception.injected);
 
-       if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
-               vcpu->arch.exception.error_code = false;
        static_call(kvm_x86_inject_exception)(vcpu);
 }
 
index 615a76d..bf5161d 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/dmi.h>
 #include <linux/pci.h>
 #include <linux/vgaarb.h>
+#include <asm/amd_nb.h>
 #include <asm/hpet.h>
 #include <asm/pci_x86.h>
 
@@ -824,3 +825,23 @@ static void rs690_fix_64bit_dma(struct pci_dev *pdev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
 
 #endif
+
+#ifdef CONFIG_AMD_NB
+
+#define AMD_15B8_RCC_DEV2_EPF0_STRAP2                                  0x10136008
+#define AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK       0x00000080L
+
+static void quirk_clear_strap_no_soft_reset_dev2_f0(struct pci_dev *dev)
+{
+       u32 data;
+
+       if (!amd_smn_read(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, &data)) {
+               data &= ~AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK;
+               if (amd_smn_write(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, data))
+                       pci_err(dev, "Failed to write data 0x%x\n", data);
+       } else {
+               pci_err(dev, "Failed to read data\n");
+       }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b8, quirk_clear_strap_no_soft_reset_dev2_f0);
+#endif
index 17f09dc..82fec66 100644 (file)
@@ -69,8 +69,7 @@ CFLAGS_sha256.o                       += $(PURGATORY_CFLAGS)
 CFLAGS_REMOVE_string.o         += $(PURGATORY_CFLAGS_REMOVE)
 CFLAGS_string.o                        += $(PURGATORY_CFLAGS)
 
-AFLAGS_REMOVE_setup-x86_$(BITS).o      += -Wa,-gdwarf-2
-AFLAGS_REMOVE_entry64.o                        += -Wa,-gdwarf-2
+asflags-remove-y               += $(foreach x, -g -gdwarf-4 -gdwarf-5, $(x) -Wa,$(x))
 
 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
                $(call if_changed,ld)
index cd98366..f0a7d1c 100644 (file)
@@ -539,7 +539,7 @@ static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
 
 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
 {
-       size_t len;
+       size_t len, off = 0;
 
        if (!sp)
                sp = stack_pointer(task);
@@ -548,9 +548,17 @@ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
                  kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
 
        printk("%sStack:\n", loglvl);
-       print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
-                      STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
-                      sp, len, false);
+       while (off < len) {
+               u8 line[STACK_DUMP_LINE_SIZE];
+               size_t line_len = len - off > STACK_DUMP_LINE_SIZE ?
+                       STACK_DUMP_LINE_SIZE : len - off;
+
+               __memcpy(line, (u8 *)sp + off, line_len);
+               print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
+                              STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
+                              line, line_len, false);
+               off += STACK_DUMP_LINE_SIZE;
+       }
        show_trace(task, sp, loglvl);
 }
 
index cf1a39a..f0ea9dc 100644 (file)
@@ -1359,8 +1359,6 @@ bool blk_rq_is_poll(struct request *rq)
                return false;
        if (rq->mq_hctx->type != HCTX_TYPE_POLL)
                return false;
-       if (WARN_ON_ONCE(!rq->bio))
-               return false;
        return true;
 }
 EXPORT_SYMBOL_GPL(blk_rq_is_poll);
@@ -1368,7 +1366,7 @@ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
 {
        do {
-               bio_poll(rq->bio, NULL, 0);
+               blk_mq_poll(rq->q, blk_rq_to_qc(rq), NULL, 0);
                cond_resched();
        } while (!completion_done(wait));
 }
index 02d9cfb..7f87473 100644 (file)
@@ -368,7 +368,6 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
        if (disk->open_partitions)
                return -EBUSY;
 
-       set_bit(GD_NEED_PART_SCAN, &disk->state);
        /*
         * If the device is opened exclusively by current thread already, it's
         * safe to scan partitons, otherwise, use bd_prepare_to_claim() to
@@ -381,12 +380,19 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
                        return ret;
        }
 
+       set_bit(GD_NEED_PART_SCAN, &disk->state);
        bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL);
        if (IS_ERR(bdev))
                ret =  PTR_ERR(bdev);
        else
                blkdev_put(bdev, mode & ~FMODE_EXCL);
 
+       /*
+        * If blkdev_get_by_dev() failed early, GD_NEED_PART_SCAN is still set,
+        * and this will cause that re-assemble partitioned raid device will
+        * creat partition for underlying disk.
+        */
+       clear_bit(GD_NEED_PART_SCAN, &disk->state);
        if (!(mode & FMODE_EXCL))
                bd_abort_claiming(disk->part0, disk_scan_partitions);
        return ret;
index 231f29b..6a320a7 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/pci.h>
 
 #include <drm/drm_accel.h>
-#include <drm/drm_drv.h>
 #include <drm/drm_file.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_ioctl.h>
@@ -118,6 +117,10 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
        struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
        struct drm_ivpu_param *args = data;
        int ret = 0;
+       int idx;
+
+       if (!drm_dev_enter(dev, &idx))
+               return -ENODEV;
 
        switch (args->param) {
        case DRM_IVPU_PARAM_DEVICE_ID:
@@ -171,6 +174,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
                break;
        }
 
+       drm_dev_exit(idx);
        return ret;
 }
 
@@ -470,8 +474,8 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
 
        vdev->hw->ops = &ivpu_hw_mtl_ops;
        vdev->platform = IVPU_PLATFORM_INVALID;
-       vdev->context_xa_limit.min = IVPU_GLOBAL_CONTEXT_MMU_SSID + 1;
-       vdev->context_xa_limit.max = IVPU_CONTEXT_LIMIT;
+       vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
+       vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
        atomic64_set(&vdev->unique_id_counter, 0);
        xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
        xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
@@ -565,6 +569,8 @@ err_mmu_gctx_fini:
        ivpu_mmu_global_context_fini(vdev);
 err_power_down:
        ivpu_hw_power_down(vdev);
+       if (IVPU_WA(d3hot_after_power_off))
+               pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
 err_xa_destroy:
        xa_destroy(&vdev->submitted_jobs_xa);
        xa_destroy(&vdev->context_xa);
@@ -575,7 +581,11 @@ static void ivpu_dev_fini(struct ivpu_device *vdev)
 {
        ivpu_pm_disable(vdev);
        ivpu_shutdown(vdev);
+       if (IVPU_WA(d3hot_after_power_off))
+               pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
        ivpu_job_done_thread_fini(vdev);
+       ivpu_pm_cancel_recovery(vdev);
+
        ivpu_ipc_fini(vdev);
        ivpu_fw_fini(vdev);
        ivpu_mmu_global_context_fini(vdev);
@@ -622,7 +632,7 @@ static void ivpu_remove(struct pci_dev *pdev)
 {
        struct ivpu_device *vdev = pci_get_drvdata(pdev);
 
-       drm_dev_unregister(&vdev->drm);
+       drm_dev_unplug(&vdev->drm);
        ivpu_dev_fini(vdev);
 }
 
index f47b496..d3013fb 100644 (file)
@@ -7,6 +7,7 @@
 #define __IVPU_DRV_H__
 
 #include <drm/drm_device.h>
+#include <drm/drm_drv.h>
 #include <drm/drm_managed.h>
 #include <drm/drm_mm.h>
 #include <drm/drm_print.h>
 #define PCI_DEVICE_ID_MTL   0x7d1d
 
 #define IVPU_GLOBAL_CONTEXT_MMU_SSID 0
-#define IVPU_CONTEXT_LIMIT          64
+/* SSID 1 is used by the VPU to represent invalid context */
+#define IVPU_USER_CONTEXT_MIN_SSID   2
+#define IVPU_USER_CONTEXT_MAX_SSID   (IVPU_USER_CONTEXT_MIN_SSID + 63)
+
 #define IVPU_NUM_ENGINES            2
 
 #define IVPU_PLATFORM_SILICON 0
@@ -70,6 +74,7 @@
 struct ivpu_wa_table {
        bool punit_disabled;
        bool clear_runtime_mem;
+       bool d3hot_after_power_off;
 };
 
 struct ivpu_hw_info;
index 62bfaa9..382ec12 100644 (file)
 #include "ivpu_mmu.h"
 #include "ivpu_pm.h"
 
-#define TILE_FUSE_ENABLE_BOTH       0x0
-#define TILE_FUSE_ENABLE_UPPER      0x1
-#define TILE_FUSE_ENABLE_LOWER      0x2
-
-#define TILE_SKU_BOTH_MTL           0x3630
-#define TILE_SKU_LOWER_MTL          0x3631
-#define TILE_SKU_UPPER_MTL          0x3632
+#define TILE_FUSE_ENABLE_BOTH        0x0
+#define TILE_SKU_BOTH_MTL            0x3630
 
 /* Work point configuration values */
-#define WP_CONFIG_1_TILE_5_3_RATIO   0x0101
-#define WP_CONFIG_1_TILE_4_3_RATIO   0x0102
-#define WP_CONFIG_2_TILE_5_3_RATIO   0x0201
-#define WP_CONFIG_2_TILE_4_3_RATIO   0x0202
-#define WP_CONFIG_0_TILE_PLL_OFF     0x0000
+#define CONFIG_1_TILE                0x01
+#define CONFIG_2_TILE                0x02
+#define PLL_RATIO_5_3                0x01
+#define PLL_RATIO_4_3                0x02
+#define WP_CONFIG(tile, ratio)       (((tile) << 8) | (ratio))
+#define WP_CONFIG_1_TILE_5_3_RATIO   WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_5_3)
+#define WP_CONFIG_1_TILE_4_3_RATIO   WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_4_3)
+#define WP_CONFIG_2_TILE_5_3_RATIO   WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_5_3)
+#define WP_CONFIG_2_TILE_4_3_RATIO   WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_4_3)
+#define WP_CONFIG_0_TILE_PLL_OFF     WP_CONFIG(0, 0)
 
 #define PLL_REF_CLK_FREQ            (50 * 1000000)
 #define PLL_SIMULATION_FREQ         (10 * 1000000)
-#define PLL_RATIO_TO_FREQ(x)        ((x) * PLL_REF_CLK_FREQ)
 #define PLL_DEFAULT_EPP_VALUE       0x80
 
 #define TIM_SAFE_ENABLE                     0xf1d0dead
@@ -101,6 +100,7 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
 {
        vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
        vdev->wa.clear_runtime_mem = false;
+       vdev->wa.d3hot_after_power_off = true;
 }
 
 static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
@@ -218,7 +218,8 @@ static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
                config = 0;
        }
 
-       ivpu_dbg(vdev, PM, "PLL workpoint request: %d Hz\n", PLL_RATIO_TO_FREQ(target_ratio));
+       ivpu_dbg(vdev, PM, "PLL workpoint request: config 0x%04x pll ratio 0x%x\n",
+                config, target_ratio);
 
        ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, target_ratio, config);
        if (ret) {
@@ -403,11 +404,6 @@ static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev)
        return ivpu_boot_host_ss_axi_drive(vdev, true);
 }
 
-static int ivpu_boot_host_ss_axi_disable(struct ivpu_device *vdev)
-{
-       return ivpu_boot_host_ss_axi_drive(vdev, false);
-}
-
 static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable)
 {
        int ret;
@@ -441,11 +437,6 @@ static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
        return ivpu_boot_host_ss_top_noc_drive(vdev, true);
 }
 
-static int ivpu_boot_host_ss_top_noc_disable(struct ivpu_device *vdev)
-{
-       return ivpu_boot_host_ss_top_noc_drive(vdev, false);
-}
-
 static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
 {
        u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
@@ -504,16 +495,6 @@ static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable)
        REGV_WR32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, val);
 }
 
-static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
-{
-       ivpu_boot_dpu_active_drive(vdev, false);
-       ivpu_boot_pwr_island_isolation_drive(vdev, true);
-       ivpu_boot_pwr_island_trickle_drive(vdev, false);
-       ivpu_boot_pwr_island_drive(vdev, false);
-
-       return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
-}
-
 static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
 {
        int ret;
@@ -629,34 +610,10 @@ static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
 static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev)
 {
        struct ivpu_hw_info *hw = vdev->hw;
-       u32 tile_fuse;
-
-       tile_fuse = REGB_RD32(MTL_BUTTRESS_TILE_FUSE);
-       if (!REG_TEST_FLD(MTL_BUTTRESS_TILE_FUSE, VALID, tile_fuse))
-               ivpu_warn(vdev, "Tile Fuse: Invalid (0x%x)\n", tile_fuse);
-
-       hw->tile_fuse = REG_GET_FLD(MTL_BUTTRESS_TILE_FUSE, SKU, tile_fuse);
-       switch (hw->tile_fuse) {
-       case TILE_FUSE_ENABLE_LOWER:
-               hw->sku = TILE_SKU_LOWER_MTL;
-               hw->config = WP_CONFIG_1_TILE_5_3_RATIO;
-               ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Lower\n");
-               break;
-       case TILE_FUSE_ENABLE_UPPER:
-               hw->sku = TILE_SKU_UPPER_MTL;
-               hw->config = WP_CONFIG_1_TILE_4_3_RATIO;
-               ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Upper\n");
-               break;
-       case TILE_FUSE_ENABLE_BOTH:
-               hw->sku = TILE_SKU_BOTH_MTL;
-               hw->config = WP_CONFIG_2_TILE_5_3_RATIO;
-               ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Both\n");
-               break;
-       default:
-               hw->config = WP_CONFIG_0_TILE_PLL_OFF;
-               ivpu_dbg(vdev, MISC, "Tile Fuse: Disable\n");
-               break;
-       }
+
+       hw->tile_fuse = TILE_FUSE_ENABLE_BOTH;
+       hw->sku = TILE_SKU_BOTH_MTL;
+       hw->config = WP_CONFIG_2_TILE_4_3_RATIO;
 
        ivpu_pll_init_frequency_ratios(vdev);
 
@@ -797,21 +754,8 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
 {
        int ret = 0;
 
-       /* FPGA requires manual clearing of IP_Reset bit by enabling quiescent state */
-       if (ivpu_is_fpga(vdev)) {
-               if (ivpu_boot_host_ss_top_noc_disable(vdev)) {
-                       ivpu_err(vdev, "Failed to disable TOP NOC\n");
-                       ret = -EIO;
-               }
-
-               if (ivpu_boot_host_ss_axi_disable(vdev)) {
-                       ivpu_err(vdev, "Failed to disable AXI\n");
-                       ret = -EIO;
-               }
-       }
-
-       if (ivpu_boot_pwr_domain_disable(vdev)) {
-               ivpu_err(vdev, "Failed to disable power domain\n");
+       if (ivpu_hw_mtl_reset(vdev)) {
+               ivpu_err(vdev, "Failed to reset the VPU\n");
                ret = -EIO;
        }
 
@@ -844,6 +788,19 @@ static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev)
        REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val);
 }
 
+static u32 ivpu_hw_mtl_pll_to_freq(u32 ratio, u32 config)
+{
+       u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
+       u32 cpu_clock;
+
+       if ((config & 0xff) == PLL_RATIO_4_3)
+               cpu_clock = pll_clock * 2 / 4;
+       else
+               cpu_clock = pll_clock * 2 / 5;
+
+       return cpu_clock;
+}
+
 /* Register indirect accesses */
 static u32 ivpu_hw_mtl_reg_pll_freq_get(struct ivpu_device *vdev)
 {
@@ -855,7 +812,7 @@ static u32 ivpu_hw_mtl_reg_pll_freq_get(struct ivpu_device *vdev)
        if (!ivpu_is_silicon(vdev))
                return PLL_SIMULATION_FREQ;
 
-       return PLL_RATIO_TO_FREQ(pll_curr_ratio);
+       return ivpu_hw_mtl_pll_to_freq(pll_curr_ratio, vdev->hw->config);
 }
 
 static u32 ivpu_hw_mtl_reg_telemetry_offset_get(struct ivpu_device *vdev)
index 9838202..68f5b66 100644 (file)
@@ -21,7 +21,7 @@ struct ivpu_bo;
 #define IVPU_IPC_ALIGNMENT        64
 
 #define IVPU_IPC_HDR_FREE         0
-#define IVPU_IPC_HDR_ALLOCATED    0
+#define IVPU_IPC_HDR_ALLOCATED    1
 
 /**
  * struct ivpu_ipc_hdr - The IPC message header structure, exchanged
index 94068ae..3c6f1e1 100644 (file)
@@ -461,26 +461,22 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
 
        job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
 
-       ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
-                                       &acquire_ctx);
+       ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx);
        if (ret) {
                ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
                return ret;
        }
 
-       for (i = 0; i < buf_count; i++) {
-               ret = dma_resv_reserve_fences(job->bos[i]->base.resv, 1);
-               if (ret) {
-                       ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
-                       goto unlock_reservations;
-               }
+       ret = dma_resv_reserve_fences(bo->base.resv, 1);
+       if (ret) {
+               ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
+               goto unlock_reservations;
        }
 
-       for (i = 0; i < buf_count; i++)
-               dma_resv_add_fence(job->bos[i]->base.resv, job->done_fence, DMA_RESV_USAGE_WRITE);
+       dma_resv_add_fence(bo->base.resv, job->done_fence, DMA_RESV_USAGE_WRITE);
 
 unlock_reservations:
-       drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx);
+       drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx);
 
        wmb(); /* Flush write combining buffers */
 
@@ -489,12 +485,12 @@ unlock_reservations:
 
 int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 {
-       int ret = 0;
        struct ivpu_file_priv *file_priv = file->driver_priv;
        struct ivpu_device *vdev = file_priv->vdev;
        struct drm_ivpu_submit *params = data;
        struct ivpu_job *job;
        u32 *buf_handles;
+       int idx, ret;
 
        if (params->engine > DRM_IVPU_ENGINE_COPY)
                return -EINVAL;
@@ -523,6 +519,11 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                goto free_handles;
        }
 
+       if (!drm_dev_enter(&vdev->drm, &idx)) {
+               ret = -ENODEV;
+               goto free_handles;
+       }
+
        ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
                 file_priv->ctx.id, params->buffer_count);
 
@@ -530,7 +531,7 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        if (!job) {
                ivpu_err(vdev, "Failed to create job\n");
                ret = -ENOMEM;
-               goto free_handles;
+               goto dev_exit;
        }
 
        ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
@@ -548,6 +549,8 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 
 job_put:
        job_put(job);
+dev_exit:
+       drm_dev_exit(idx);
 free_handles:
        kfree(buf_handles);
 
index 553bcbd..bde42d6 100644 (file)
@@ -98,12 +98,18 @@ retry:
 static void ivpu_pm_recovery_work(struct work_struct *work)
 {
        struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
-       struct ivpu_device *vdev =  pm->vdev;
+       struct ivpu_device *vdev = pm->vdev;
        char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
        int ret;
 
-       ret = pci_reset_function(to_pci_dev(vdev->drm.dev));
-       if (ret)
+retry:
+       ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
+       if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) {
+               cond_resched();
+               goto retry;
+       }
+
+       if (ret && ret != -EAGAIN)
                ivpu_err(vdev, "Failed to reset VPU: %d\n", ret);
 
        kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
@@ -134,32 +140,28 @@ int ivpu_pm_suspend_cb(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
        struct ivpu_device *vdev = to_ivpu_device(drm);
-       int ret;
+       unsigned long timeout;
 
        ivpu_dbg(vdev, PM, "Suspend..\n");
 
-       ret = ivpu_suspend(vdev);
-       if (ret && vdev->pm->suspend_reschedule_counter) {
-               ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n",
-                        vdev->pm->suspend_reschedule_counter);
-               pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend);
-               vdev->pm->suspend_reschedule_counter--;
-               return -EBUSY;
-       } else if (!vdev->pm->suspend_reschedule_counter) {
-               ivpu_warn(vdev, "Failed to enter idle, force suspend\n");
-               ivpu_pm_prepare_cold_boot(vdev);
-       } else {
-               ivpu_pm_prepare_warm_boot(vdev);
+       timeout = jiffies + msecs_to_jiffies(vdev->timeout.tdr);
+       while (!ivpu_hw_is_idle(vdev)) {
+               cond_resched();
+               if (time_after_eq(jiffies, timeout)) {
+                       ivpu_err(vdev, "Failed to enter idle on system suspend\n");
+                       return -EBUSY;
+               }
        }
 
-       vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
+       ivpu_suspend(vdev);
+       ivpu_pm_prepare_warm_boot(vdev);
 
        pci_save_state(to_pci_dev(dev));
        pci_set_power_state(to_pci_dev(dev), PCI_D3hot);
 
        ivpu_dbg(vdev, PM, "Suspend done.\n");
 
-       return ret;
+       return 0;
 }
 
 int ivpu_pm_resume_cb(struct device *dev)
@@ -306,6 +308,11 @@ int ivpu_pm_init(struct ivpu_device *vdev)
        return 0;
 }
 
+void ivpu_pm_cancel_recovery(struct ivpu_device *vdev)
+{
+       cancel_work_sync(&vdev->pm->recovery_work);
+}
+
 void ivpu_pm_enable(struct ivpu_device *vdev)
 {
        struct device *dev = vdev->drm.dev;
index dc1b375..baca981 100644 (file)
@@ -21,6 +21,7 @@ struct ivpu_pm_info {
 int ivpu_pm_init(struct ivpu_device *vdev);
 void ivpu_pm_enable(struct ivpu_device *vdev);
 void ivpu_pm_disable(struct ivpu_device *vdev);
+void ivpu_pm_cancel_recovery(struct ivpu_device *vdev);
 
 int ivpu_pm_suspend_cb(struct device *dev);
 int ivpu_pm_resume_cb(struct device *dev);
index 97b711e..c7a6d0b 100644 (file)
@@ -1984,6 +1984,7 @@ static int instance;
 static int acpi_video_bus_add(struct acpi_device *device)
 {
        struct acpi_video_bus *video;
+       bool auto_detect;
        int error;
        acpi_status status;
 
@@ -2045,10 +2046,20 @@ static int acpi_video_bus_add(struct acpi_device *device)
        mutex_unlock(&video_list_lock);
 
        /*
-        * The userspace visible backlight_device gets registered separately
-        * from acpi_video_register_backlight().
+        * If backlight-type auto-detection is used then a native backlight may
+        * show up later and this may change the result from video to native.
+        * Therefor normally the userspace visible /sys/class/backlight device
+        * gets registered separately by the GPU driver calling
+        * acpi_video_register_backlight() when an internal panel is detected.
+        * Register the backlight now when not using auto-detection, so that
+        * when the kernel cmdline or DMI-quirks are used the backlight will
+        * get registered even if acpi_video_register_backlight() is not called.
         */
        acpi_video_run_bcl_for_osi(video);
+       if (__acpi_video_get_backlight_type(false, &auto_detect) == acpi_backlight_video &&
+           !auto_detect)
+               acpi_video_bus_register_backlight(video);
+
        acpi_video_bus_add_notify_handler(video);
 
        return 0;
index 9531dd0..a96da65 100644 (file)
@@ -459,85 +459,67 @@ out_free:
                              Notification Handling
    -------------------------------------------------------------------------- */
 
-/*
- * acpi_bus_notify
- * ---------------
- * Callback for all 'system-level' device notifications (values 0x00-0x7F).
+/**
+ * acpi_bus_notify - Global system-level (0x00-0x7F) notifications handler
+ * @handle: Target ACPI object.
+ * @type: Notification type.
+ * @data: Ignored.
+ *
+ * This only handles notifications related to device hotplug.
  */
 static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
 {
        struct acpi_device *adev;
-       u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
-       bool hotplug_event = false;
 
        switch (type) {
        case ACPI_NOTIFY_BUS_CHECK:
                acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
-               hotplug_event = true;
                break;
 
        case ACPI_NOTIFY_DEVICE_CHECK:
                acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
-               hotplug_event = true;
                break;
 
        case ACPI_NOTIFY_DEVICE_WAKE:
                acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_WAKE event\n");
-               break;
+               return;
 
        case ACPI_NOTIFY_EJECT_REQUEST:
                acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
-               hotplug_event = true;
                break;
 
        case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
                acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK_LIGHT event\n");
                /* TBD: Exactly what does 'light' mean? */
-               break;
+               return;
 
        case ACPI_NOTIFY_FREQUENCY_MISMATCH:
                acpi_handle_err(handle, "Device cannot be configured due "
                                "to a frequency mismatch\n");
-               break;
+               return;
 
        case ACPI_NOTIFY_BUS_MODE_MISMATCH:
                acpi_handle_err(handle, "Device cannot be configured due "
                                "to a bus mode mismatch\n");
-               break;
+               return;
 
        case ACPI_NOTIFY_POWER_FAULT:
                acpi_handle_err(handle, "Device has suffered a power fault\n");
-               break;
+               return;
 
        default:
                acpi_handle_debug(handle, "Unknown event type 0x%x\n", type);
-               break;
+               return;
        }
 
        adev = acpi_get_acpi_dev(handle);
-       if (!adev)
-               goto err;
-
-       if (adev->dev.driver) {
-               struct acpi_driver *driver = to_acpi_driver(adev->dev.driver);
-
-               if (driver && driver->ops.notify &&
-                   (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
-                       driver->ops.notify(adev, type);
-       }
-
-       if (!hotplug_event) {
-               acpi_put_acpi_dev(adev);
-               return;
-       }
 
-       if (ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
+       if (adev && ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
                return;
 
        acpi_put_acpi_dev(adev);
 
- err:
-       acpi_evaluate_ost(handle, type, ost_code, NULL);
+       acpi_evaluate_ost(handle, type, ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
 }
 
 static void acpi_notify_device(acpi_handle handle, u32 event, void *data)
@@ -562,42 +544,51 @@ static u32 acpi_device_fixed_event(void *data)
        return ACPI_INTERRUPT_HANDLED;
 }
 
-static int acpi_device_install_notify_handler(struct acpi_device *device)
+static int acpi_device_install_notify_handler(struct acpi_device *device,
+                                             struct acpi_driver *acpi_drv)
 {
        acpi_status status;
 
-       if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
+       if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
                status =
                    acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
                                                     acpi_device_fixed_event,
                                                     device);
-       else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
+       } else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
                status =
                    acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
                                                     acpi_device_fixed_event,
                                                     device);
-       else
-               status = acpi_install_notify_handler(device->handle,
-                                                    ACPI_DEVICE_NOTIFY,
+       } else {
+               u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
+                               ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
+
+               status = acpi_install_notify_handler(device->handle, type,
                                                     acpi_notify_device,
                                                     device);
+       }
 
        if (ACPI_FAILURE(status))
                return -EINVAL;
        return 0;
 }
 
-static void acpi_device_remove_notify_handler(struct acpi_device *device)
+static void acpi_device_remove_notify_handler(struct acpi_device *device,
+                                             struct acpi_driver *acpi_drv)
 {
-       if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
+       if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
                acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
                                                acpi_device_fixed_event);
-       else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
+       } else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
                acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
                                                acpi_device_fixed_event);
-       else
-               acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+       } else {
+               u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
+                               ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
+
+               acpi_remove_notify_handler(device->handle, type,
                                           acpi_notify_device);
+       }
 }
 
 /* Handle events targeting \_SB device (at present only graceful shutdown) */
@@ -1039,7 +1030,7 @@ static int acpi_device_probe(struct device *dev)
                 acpi_drv->name, acpi_dev->pnp.bus_id);
 
        if (acpi_drv->ops.notify) {
-               ret = acpi_device_install_notify_handler(acpi_dev);
+               ret = acpi_device_install_notify_handler(acpi_dev, acpi_drv);
                if (ret) {
                        if (acpi_drv->ops.remove)
                                acpi_drv->ops.remove(acpi_dev);
@@ -1062,7 +1053,7 @@ static void acpi_device_remove(struct device *dev)
        struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
 
        if (acpi_drv->ops.notify)
-               acpi_device_remove_notify_handler(acpi_dev);
+               acpi_device_remove_notify_handler(acpi_dev, acpi_drv);
 
        if (acpi_drv->ops.remove)
                acpi_drv->ops.remove(acpi_dev);
index 7b4801c..e8492b3 100644 (file)
@@ -440,6 +440,13 @@ static const struct dmi_system_id asus_laptop[] = {
                },
        },
        {
+               .ident = "Asus ExpertBook B1502CBA",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "B1502CBA"),
+               },
+       },
+       {
                .ident = "Asus ExpertBook B2402CBA",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
index fd7cbce..e85729f 100644 (file)
@@ -277,6 +277,43 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
        },
 
        /*
+        * Models which need acpi_video backlight control where the GPU drivers
+        * do not call acpi_video_register_backlight() because no internal panel
+        * is detected. Typically these are all-in-ones (monitors with builtin
+        * PC) where the panel connection shows up as regular DP instead of eDP.
+        */
+       {
+        .callback = video_detect_force_video,
+        /* Apple iMac14,1 */
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,1"),
+               },
+       },
+       {
+        .callback = video_detect_force_video,
+        /* Apple iMac14,2 */
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,2"),
+               },
+       },
+
+       /*
+        * Older models with nvidia GPU which need acpi_video backlight
+        * control and where the old nvidia binary driver series does not
+        * call acpi_video_register_backlight().
+        */
+       {
+        .callback = video_detect_force_video,
+        /* ThinkPad W530 */
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
+               },
+       },
+
+       /*
         * These models have a working acpi_video backlight control, and using
         * native backlight causes a regression where backlight does not work
         * when userspace is not handling brightness key events. Disable
@@ -782,7 +819,7 @@ static bool prefer_native_over_acpi_video(void)
  * Determine which type of backlight interface to use on this system,
  * First check cmdline, then dmi quirks, then do autodetect.
  */
-static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto_detect)
 {
        static DEFINE_MUTEX(init_mutex);
        static bool nvidia_wmi_ec_present;
@@ -807,6 +844,9 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
                native_available = true;
        mutex_unlock(&init_mutex);
 
+       if (auto_detect)
+               *auto_detect = false;
+
        /*
         * The below heuristics / detection steps are in order of descending
         * presedence. The commandline takes presedence over anything else.
@@ -818,6 +858,9 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
        if (acpi_backlight_dmi != acpi_backlight_undef)
                return acpi_backlight_dmi;
 
+       if (auto_detect)
+               *auto_detect = true;
+
        /* Special cases such as nvidia_wmi_ec and apple gmux. */
        if (nvidia_wmi_ec_present)
                return acpi_backlight_nvidia_wmi_ec;
@@ -837,15 +880,4 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
        /* No ACPI video/native (old hw), use vendor specific fw methods. */
        return acpi_backlight_vendor;
 }
-
-enum acpi_backlight_type acpi_video_get_backlight_type(void)
-{
-       return __acpi_video_get_backlight_type(false);
-}
-EXPORT_SYMBOL(acpi_video_get_backlight_type);
-
-bool acpi_video_backlight_use_native(void)
-{
-       return __acpi_video_get_backlight_type(true) == acpi_backlight_native;
-}
-EXPORT_SYMBOL(acpi_video_backlight_use_native);
+EXPORT_SYMBOL(__acpi_video_get_backlight_type);
index da57270..ba420a2 100644 (file)
@@ -213,6 +213,7 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
       disk in the system.
  */
 static const struct x86_cpu_id storage_d3_cpu_ids[] = {
+       X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 24, NULL),  /* Picasso */
        X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL),  /* Renoir */
        X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
        X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL),  /* Cezanne */
index f6573c3..f3903d0 100644 (file)
@@ -474,12 +474,18 @@ int detect_cache_attributes(unsigned int cpu)
 
 populate_leaves:
        /*
-        * populate_cache_leaves() may completely setup the cache leaves and
-        * shared_cpu_map or it may leave it partially setup.
+        * If LLC is valid the cache leaves were already populated so just go to
+        * update the cpu map.
         */
-       ret = populate_cache_leaves(cpu);
-       if (ret)
-               goto free_ci;
+       if (!last_level_cache_is_valid(cpu)) {
+               /*
+                * populate_cache_leaves() may completely setup the cache leaves and
+                * shared_cpu_map or it may leave it partially setup.
+                */
+               ret = populate_cache_leaves(cpu);
+               if (ret)
+                       goto free_ci;
+       }
 
        /*
         * For systems using DT for cache hierarchy, fw_token
index 28eb59f..bc31bb7 100644 (file)
@@ -1010,9 +1010,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
        /* This is safe, since we have a reference from open(). */
        __module_get(THIS_MODULE);
 
-       /* suppress uevents while reconfiguring the device */
-       dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
-
        /*
         * If we don't hold exclusive handle for the device, upgrade to it
         * here to avoid changing device under exclusive owner.
@@ -1067,6 +1064,9 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
                }
        }
 
+       /* suppress uevents while reconfiguring the device */
+       dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
+
        disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
        set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
 
@@ -1109,17 +1109,17 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
        if (partscan)
                clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
 
+       /* enable and uncork uevent now that we are done */
+       dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
+
        loop_global_unlock(lo, is_loop);
        if (partscan)
                loop_reread_partitions(lo);
+
        if (!(mode & FMODE_EXCL))
                bd_abort_claiming(bdev, loop_configure);
 
-       error = 0;
-done:
-       /* enable and uncork uevent now that we are done */
-       dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
-       return error;
+       return 0;
 
 out_unlock:
        loop_global_unlock(lo, is_loop);
@@ -1130,7 +1130,7 @@ out_putf:
        fput(file);
        /* This is safe: open() is still holding a reference. */
        module_put(THIS_MODULE);
-       goto done;
+       return error;
 }
 
 static void __loop_clr_fd(struct loop_device *lo, bool release)
index c73cc57..604c1a1 100644 (file)
@@ -246,7 +246,7 @@ static int ublk_validate_params(const struct ublk_device *ub)
        if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
                const struct ublk_param_basic *p = &ub->params.basic;
 
-               if (p->logical_bs_shift > PAGE_SHIFT)
+               if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
                        return -EINVAL;
 
                if (p->logical_bs_shift > p->physical_bs_shift)
@@ -1261,9 +1261,10 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
        ublk_queue_cmd(ubq, req);
 }
 
-static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
+                              unsigned int issue_flags,
+                              struct ublksrv_io_cmd *ub_cmd)
 {
-       struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
        struct ublk_device *ub = cmd->file->private_data;
        struct ublk_queue *ubq;
        struct ublk_io *io;
@@ -1362,6 +1363,23 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
        return -EIOCBQUEUED;
 }
 
+static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+       struct ublksrv_io_cmd *ub_src = (struct ublksrv_io_cmd *) cmd->cmd;
+       struct ublksrv_io_cmd ub_cmd;
+
+       /*
+        * Not necessary for async retry, but let's keep it simple and always
+        * copy the values to avoid any potential reuse.
+        */
+       ub_cmd.q_id = READ_ONCE(ub_src->q_id);
+       ub_cmd.tag = READ_ONCE(ub_src->tag);
+       ub_cmd.result = READ_ONCE(ub_src->result);
+       ub_cmd.addr = READ_ONCE(ub_src->addr);
+
+       return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
+}
+
 static const struct file_operations ublk_ch_fops = {
        .owner = THIS_MODULE,
        .open = ublk_ch_open,
@@ -1952,6 +1970,8 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
                /* clear all we don't support yet */
                ub->params.types &= UBLK_PARAM_TYPE_ALL;
                ret = ublk_validate_params(ub);
+               if (ret)
+                       ub->params.types = 0;
        }
        mutex_unlock(&ub->mutex);
 
index 2723eed..2b918e2 100644 (file)
@@ -96,16 +96,14 @@ struct virtblk_req {
 
                /*
                 * The zone append command has an extended in header.
-                * The status field in zone_append_in_hdr must have
-                * the same offset in virtblk_req as the non-zoned
-                * status field above.
+                * The status field in zone_append_in_hdr must always
+                * be the last byte.
                 */
                struct {
+                       __virtio64 sector;
                        u8 status;
-                       u8 reserved[7];
-                       __le64 append_sector;
-               } zone_append_in_hdr;
-       };
+               } zone_append;
+       } in_hdr;
 
        size_t in_hdr_len;
 
@@ -154,7 +152,7 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
                        sgs[num_out + num_in++] = vbr->sg_table.sgl;
        }
 
-       sg_init_one(&in_hdr, &vbr->status, vbr->in_hdr_len);
+       sg_init_one(&in_hdr, &vbr->in_hdr.status, vbr->in_hdr_len);
        sgs[num_out + num_in++] = &in_hdr;
 
        return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
@@ -242,11 +240,14 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
                                      struct request *req,
                                      struct virtblk_req *vbr)
 {
-       size_t in_hdr_len = sizeof(vbr->status);
+       size_t in_hdr_len = sizeof(vbr->in_hdr.status);
        bool unmap = false;
        u32 type;
        u64 sector = 0;
 
+       if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && op_is_zone_mgmt(req_op(req)))
+               return BLK_STS_NOTSUPP;
+
        /* Set fields for all request types */
        vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
 
@@ -287,7 +288,7 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
        case REQ_OP_ZONE_APPEND:
                type = VIRTIO_BLK_T_ZONE_APPEND;
                sector = blk_rq_pos(req);
-               in_hdr_len = sizeof(vbr->zone_append_in_hdr);
+               in_hdr_len = sizeof(vbr->in_hdr.zone_append);
                break;
        case REQ_OP_ZONE_RESET:
                type = VIRTIO_BLK_T_ZONE_RESET;
@@ -297,7 +298,10 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
                type = VIRTIO_BLK_T_ZONE_RESET_ALL;
                break;
        case REQ_OP_DRV_IN:
-               /* Out header already filled in, nothing to do */
+               /*
+                * Out header has already been prepared by the caller (virtblk_get_id()
+                * or virtblk_submit_zone_report()), nothing to do here.
+                */
                return 0;
        default:
                WARN_ON_ONCE(1);
@@ -318,16 +322,28 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
        return 0;
 }
 
+/*
+ * The status byte is always the last byte of the virtblk request
+ * in-header. This helper fetches its value for all in-header formats
+ * that are currently defined.
+ */
+static inline u8 virtblk_vbr_status(struct virtblk_req *vbr)
+{
+       return *((u8 *)&vbr->in_hdr + vbr->in_hdr_len - 1);
+}
+
 static inline void virtblk_request_done(struct request *req)
 {
        struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
-       blk_status_t status = virtblk_result(vbr->status);
+       blk_status_t status = virtblk_result(virtblk_vbr_status(vbr));
+       struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
 
        virtblk_unmap_data(req, vbr);
        virtblk_cleanup_cmd(req);
 
        if (req_op(req) == REQ_OP_ZONE_APPEND)
-               req->__sector = le64_to_cpu(vbr->zone_append_in_hdr.append_sector);
+               req->__sector = virtio64_to_cpu(vblk->vdev,
+                                               vbr->in_hdr.zone_append.sector);
 
        blk_mq_end_request(req, status);
 }
@@ -355,7 +371,7 @@ static int virtblk_handle_req(struct virtio_blk_vq *vq,
 
                if (likely(!blk_should_fake_timeout(req->q)) &&
                    !blk_mq_complete_request_remote(req) &&
-                   !blk_mq_add_to_batch(req, iob, vbr->status,
+                   !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
                                         virtblk_complete_batch))
                        virtblk_request_done(req);
                req_done++;
@@ -550,7 +566,6 @@ static void virtio_queue_rqs(struct request **rqlist)
 #ifdef CONFIG_BLK_DEV_ZONED
 static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk,
                                          unsigned int nr_zones,
-                                         unsigned int zone_sectors,
                                          size_t *buflen)
 {
        struct request_queue *q = vblk->disk->queue;
@@ -558,7 +573,7 @@ static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk,
        void *buf;
 
        nr_zones = min_t(unsigned int, nr_zones,
-                        get_capacity(vblk->disk) >> ilog2(zone_sectors));
+                        get_capacity(vblk->disk) >> ilog2(vblk->zone_sectors));
 
        bufsize = sizeof(struct virtio_blk_zone_report) +
                nr_zones * sizeof(struct virtio_blk_zone_descriptor);
@@ -592,7 +607,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk,
                return PTR_ERR(req);
 
        vbr = blk_mq_rq_to_pdu(req);
-       vbr->in_hdr_len = sizeof(vbr->status);
+       vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
        vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT);
        vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector);
 
@@ -601,7 +616,7 @@ static int virtblk_submit_zone_report(struct virtio_blk *vblk,
                goto out;
 
        blk_execute_rq(req, false);
-       err = blk_status_to_errno(virtblk_result(vbr->status));
+       err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
 out:
        blk_mq_free_request(req);
        return err;
@@ -609,29 +624,72 @@ out:
 
 static int virtblk_parse_zone(struct virtio_blk *vblk,
                               struct virtio_blk_zone_descriptor *entry,
-                              unsigned int idx, unsigned int zone_sectors,
-                              report_zones_cb cb, void *data)
+                              unsigned int idx, report_zones_cb cb, void *data)
 {
        struct blk_zone zone = { };
 
-       if (entry->z_type != VIRTIO_BLK_ZT_SWR &&
-           entry->z_type != VIRTIO_BLK_ZT_SWP &&
-           entry->z_type != VIRTIO_BLK_ZT_CONV) {
-               dev_err(&vblk->vdev->dev, "invalid zone type %#x\n",
-                       entry->z_type);
-               return -EINVAL;
+       zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start);
+       if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk))
+               zone.len = vblk->zone_sectors;
+       else
+               zone.len = get_capacity(vblk->disk) - zone.start;
+       zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap);
+       zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp);
+
+       switch (entry->z_type) {
+       case VIRTIO_BLK_ZT_SWR:
+               zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
+               break;
+       case VIRTIO_BLK_ZT_SWP:
+               zone.type = BLK_ZONE_TYPE_SEQWRITE_PREF;
+               break;
+       case VIRTIO_BLK_ZT_CONV:
+               zone.type = BLK_ZONE_TYPE_CONVENTIONAL;
+               break;
+       default:
+               dev_err(&vblk->vdev->dev, "zone %llu: invalid type %#x\n",
+                       zone.start, entry->z_type);
+               return -EIO;
        }
 
-       zone.type = entry->z_type;
-       zone.cond = entry->z_state;
-       zone.len = zone_sectors;
-       zone.capacity = le64_to_cpu(entry->z_cap);
-       zone.start = le64_to_cpu(entry->z_start);
-       if (zone.cond == BLK_ZONE_COND_FULL)
+       switch (entry->z_state) {
+       case VIRTIO_BLK_ZS_EMPTY:
+               zone.cond = BLK_ZONE_COND_EMPTY;
+               break;
+       case VIRTIO_BLK_ZS_CLOSED:
+               zone.cond = BLK_ZONE_COND_CLOSED;
+               break;
+       case VIRTIO_BLK_ZS_FULL:
+               zone.cond = BLK_ZONE_COND_FULL;
                zone.wp = zone.start + zone.len;
-       else
-               zone.wp = le64_to_cpu(entry->z_wp);
+               break;
+       case VIRTIO_BLK_ZS_EOPEN:
+               zone.cond = BLK_ZONE_COND_EXP_OPEN;
+               break;
+       case VIRTIO_BLK_ZS_IOPEN:
+               zone.cond = BLK_ZONE_COND_IMP_OPEN;
+               break;
+       case VIRTIO_BLK_ZS_NOT_WP:
+               zone.cond = BLK_ZONE_COND_NOT_WP;
+               break;
+       case VIRTIO_BLK_ZS_RDONLY:
+               zone.cond = BLK_ZONE_COND_READONLY;
+               zone.wp = ULONG_MAX;
+               break;
+       case VIRTIO_BLK_ZS_OFFLINE:
+               zone.cond = BLK_ZONE_COND_OFFLINE;
+               zone.wp = ULONG_MAX;
+               break;
+       default:
+               dev_err(&vblk->vdev->dev, "zone %llu: invalid condition %#x\n",
+                       zone.start, entry->z_state);
+               return -EIO;
+       }
 
+       /*
+        * The callback below checks the validity of the reported
+        * entry data, no need to further validate it here.
+        */
        return cb(&zone, idx, data);
 }
 
@@ -641,39 +699,47 @@ static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
 {
        struct virtio_blk *vblk = disk->private_data;
        struct virtio_blk_zone_report *report;
-       unsigned int zone_sectors = vblk->zone_sectors;
-       unsigned int nz, i;
-       int ret, zone_idx = 0;
+       unsigned long long nz, i;
        size_t buflen;
+       unsigned int zone_idx = 0;
+       int ret;
 
        if (WARN_ON_ONCE(!vblk->zone_sectors))
                return -EOPNOTSUPP;
 
-       report = virtblk_alloc_report_buffer(vblk, nr_zones,
-                                            zone_sectors, &buflen);
+       report = virtblk_alloc_report_buffer(vblk, nr_zones, &buflen);
        if (!report)
                return -ENOMEM;
 
+       mutex_lock(&vblk->vdev_mutex);
+
+       if (!vblk->vdev) {
+               ret = -ENXIO;
+               goto fail_report;
+       }
+
        while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) {
                memset(report, 0, buflen);
 
                ret = virtblk_submit_zone_report(vblk, (char *)report,
                                                 buflen, sector);
-               if (ret) {
-                       if (ret > 0)
-                               ret = -EIO;
-                       goto out_free;
-               }
-               nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones);
+               if (ret)
+                       goto fail_report;
+
+               nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones),
+                          nr_zones);
                if (!nz)
                        break;
 
                for (i = 0; i < nz && zone_idx < nr_zones; i++) {
                        ret = virtblk_parse_zone(vblk, &report->zones[i],
-                                                zone_idx, zone_sectors, cb, data);
+                                                zone_idx, cb, data);
                        if (ret)
-                               goto out_free;
-                       sector = le64_to_cpu(report->zones[i].z_start) + zone_sectors;
+                               goto fail_report;
+
+                       sector = virtio64_to_cpu(vblk->vdev,
+                                                report->zones[i].z_start) +
+                                vblk->zone_sectors;
                        zone_idx++;
                }
        }
@@ -682,7 +748,8 @@ static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
                ret = zone_idx;
        else
                ret = -EINVAL;
-out_free:
+fail_report:
+       mutex_unlock(&vblk->vdev_mutex);
        kvfree(report);
        return ret;
 }
@@ -691,20 +758,28 @@ static void virtblk_revalidate_zones(struct virtio_blk *vblk)
 {
        u8 model;
 
-       if (!vblk->zone_sectors)
-               return;
-
        virtio_cread(vblk->vdev, struct virtio_blk_config,
                     zoned.model, &model);
-       if (!blk_revalidate_disk_zones(vblk->disk, NULL))
-               set_capacity_and_notify(vblk->disk, 0);
+       switch (model) {
+       default:
+               dev_err(&vblk->vdev->dev, "unknown zone model %d\n", model);
+               fallthrough;
+       case VIRTIO_BLK_Z_NONE:
+       case VIRTIO_BLK_Z_HA:
+               disk_set_zoned(vblk->disk, BLK_ZONED_NONE);
+               return;
+       case VIRTIO_BLK_Z_HM:
+               WARN_ON_ONCE(!vblk->zone_sectors);
+               if (!blk_revalidate_disk_zones(vblk->disk, NULL))
+                       set_capacity_and_notify(vblk->disk, 0);
+       }
 }
 
 static int virtblk_probe_zoned_device(struct virtio_device *vdev,
                                       struct virtio_blk *vblk,
                                       struct request_queue *q)
 {
-       u32 v;
+       u32 v, wg;
        u8 model;
        int ret;
 
@@ -713,16 +788,11 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
 
        switch (model) {
        case VIRTIO_BLK_Z_NONE:
+       case VIRTIO_BLK_Z_HA:
+               /* Present the host-aware device as non-zoned */
                return 0;
        case VIRTIO_BLK_Z_HM:
                break;
-       case VIRTIO_BLK_Z_HA:
-               /*
-                * Present the host-aware device as a regular drive.
-                * TODO It is possible to add an option to make it appear
-                * in the system as a zoned drive.
-                */
-               return 0;
        default:
                dev_err(&vdev->dev, "unsupported zone model %d\n", model);
                return -EINVAL;
@@ -735,32 +805,31 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
 
        virtio_cread(vdev, struct virtio_blk_config,
                     zoned.max_open_zones, &v);
-       disk_set_max_open_zones(vblk->disk, le32_to_cpu(v));
-
-       dev_dbg(&vdev->dev, "max open zones = %u\n", le32_to_cpu(v));
+       disk_set_max_open_zones(vblk->disk, v);
+       dev_dbg(&vdev->dev, "max open zones = %u\n", v);
 
        virtio_cread(vdev, struct virtio_blk_config,
                     zoned.max_active_zones, &v);
-       disk_set_max_active_zones(vblk->disk, le32_to_cpu(v));
-       dev_dbg(&vdev->dev, "max active zones = %u\n", le32_to_cpu(v));
+       disk_set_max_active_zones(vblk->disk, v);
+       dev_dbg(&vdev->dev, "max active zones = %u\n", v);
 
        virtio_cread(vdev, struct virtio_blk_config,
-                    zoned.write_granularity, &v);
-       if (!v) {
+                    zoned.write_granularity, &wg);
+       if (!wg) {
                dev_warn(&vdev->dev, "zero write granularity reported\n");
                return -ENODEV;
        }
-       blk_queue_physical_block_size(q, le32_to_cpu(v));
-       blk_queue_io_min(q, le32_to_cpu(v));
+       blk_queue_physical_block_size(q, wg);
+       blk_queue_io_min(q, wg);
 
-       dev_dbg(&vdev->dev, "write granularity = %u\n", le32_to_cpu(v));
+       dev_dbg(&vdev->dev, "write granularity = %u\n", wg);
 
        /*
         * virtio ZBD specification doesn't require zones to be a power of
         * two sectors in size, but the code in this driver expects that.
         */
-       virtio_cread(vdev, struct virtio_blk_config, zoned.zone_sectors, &v);
-       vblk->zone_sectors = le32_to_cpu(v);
+       virtio_cread(vdev, struct virtio_blk_config, zoned.zone_sectors,
+                    &vblk->zone_sectors);
        if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) {
                dev_err(&vdev->dev,
                        "zoned device with non power of two zone size %u\n",
@@ -783,36 +852,46 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
                        dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
                        return -ENODEV;
                }
-               blk_queue_max_zone_append_sectors(q, le32_to_cpu(v));
-               dev_dbg(&vdev->dev, "max append sectors = %u\n", le32_to_cpu(v));
+               if ((v << SECTOR_SHIFT) < wg) {
+                       dev_err(&vdev->dev,
+                               "write granularity %u exceeds max_append_sectors %u limit\n",
+                               wg, v);
+                       return -ENODEV;
+               }
+
+               blk_queue_max_zone_append_sectors(q, v);
+               dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
        }
 
        return ret;
 }
 
-static inline bool virtblk_has_zoned_feature(struct virtio_device *vdev)
-{
-       return virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED);
-}
 #else
 
 /*
  * Zoned block device support is not configured in this kernel.
- * We only need to define a few symbols to avoid compilation errors.
+ * Host-managed zoned devices can't be supported, but others are
+ * good to go as regular block devices.
  */
 #define virtblk_report_zones       NULL
+
 static inline void virtblk_revalidate_zones(struct virtio_blk *vblk)
 {
 }
+
 static inline int virtblk_probe_zoned_device(struct virtio_device *vdev,
                        struct virtio_blk *vblk, struct request_queue *q)
 {
-       return -EOPNOTSUPP;
-}
+       u8 model;
 
-static inline bool virtblk_has_zoned_feature(struct virtio_device *vdev)
-{
-       return false;
+       virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model);
+       if (model == VIRTIO_BLK_Z_HM) {
+               dev_err(&vdev->dev,
+                       "virtio_blk: zoned devices are not supported");
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
 }
 #endif /* CONFIG_BLK_DEV_ZONED */
 
@@ -831,7 +910,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
                return PTR_ERR(req);
 
        vbr = blk_mq_rq_to_pdu(req);
-       vbr->in_hdr_len = sizeof(vbr->status);
+       vbr->in_hdr_len = sizeof(vbr->in_hdr.status);
        vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
        vbr->out_hdr.sector = 0;
 
@@ -840,7 +919,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
                goto out;
 
        blk_execute_rq(req, false);
-       err = blk_status_to_errno(virtblk_result(vbr->status));
+       err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status));
 out:
        blk_mq_free_request(req);
        return err;
@@ -1498,15 +1577,16 @@ static int virtblk_probe(struct virtio_device *vdev)
        virtblk_update_capacity(vblk, false);
        virtio_device_ready(vdev);
 
-       if (virtblk_has_zoned_feature(vdev)) {
+       /*
+        * All steps that follow use the VQs therefore they need to be
+        * placed after the virtio_device_ready() call above.
+        */
+       if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) {
                err = virtblk_probe_zoned_device(vdev, vblk, q);
                if (err)
                        goto out_cleanup_disk;
        }
 
-       dev_info(&vdev->dev, "blk config size: %zu\n",
-               sizeof(struct virtio_blk_config));
-
        err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
        if (err)
                goto out_cleanup_disk;
@@ -1607,10 +1687,7 @@ static unsigned int features[] = {
        VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
        VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
        VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
-       VIRTIO_BLK_F_SECURE_ERASE,
-#ifdef CONFIG_BLK_DEV_ZONED
-       VIRTIO_BLK_F_ZONED,
-#endif /* CONFIG_BLK_DEV_ZONED */
+       VIRTIO_BLK_F_SECURE_ERASE, VIRTIO_BLK_F_ZONED,
 };
 
 static struct virtio_driver virtio_blk = {
index 3006e2a..43e98a5 100644 (file)
@@ -511,7 +511,7 @@ static const char *btbcm_get_board_name(struct device *dev)
        len = strlen(tmp) + 1;
        board_type = devm_kzalloc(dev, len, GFP_KERNEL);
        strscpy(board_type, tmp, len);
-       for (i = 0; i < board_type[i]; i++) {
+       for (i = 0; i < len; i++) {
                if (board_type[i] == '/')
                        board_type[i] = '-';
        }
index 0289360..5100032 100644 (file)
@@ -358,6 +358,7 @@ static void btsdio_remove(struct sdio_func *func)
        if (!data)
                return;
 
+       cancel_work_sync(&data->work);
        hdev = data->hdev;
 
        sdio_set_drvdata(func, NULL);
index 36d4248..cf463c1 100644 (file)
@@ -329,6 +329,12 @@ static int of_weim_notify(struct notifier_block *nb, unsigned long action,
                                 "Failed to setup timing for '%pOF'\n", rd->dn);
 
                if (!of_node_check_flag(rd->dn, OF_POPULATED)) {
+                       /*
+                        * Clear the flag before adding the device so that
+                        * fw_devlink doesn't skip adding consumers to this
+                        * device.
+                        */
+                       rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
                        if (!of_platform_device_create(rd->dn, NULL, &pdev->dev)) {
                                dev_err(&pdev->dev,
                                        "Failed to create child device '%pOF'\n",
index f91f305..ff3a52d 100644 (file)
@@ -143,8 +143,9 @@ static int rs9_regmap_i2c_read(void *context,
 static const struct regmap_config rs9_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
-       .cache_type = REGCACHE_NONE,
+       .cache_type = REGCACHE_FLAT,
        .max_register = RS9_REG_BCP,
+       .num_reg_defaults_raw = 0x8,
        .rd_table = &rs9_readable_table,
        .wr_table = &rs9_writeable_table,
        .reg_write = rs9_regmap_i2c_write,
index 2836adb..e3696a8 100644 (file)
@@ -95,14 +95,16 @@ static const struct clk_div_table video_div_table[] = {
        { }
 };
 
-static const char * enet1_ref_sels[] = { "enet1_ref_125m", "enet1_ref_pad", };
+static const char * enet1_ref_sels[] = { "enet1_ref_125m", "enet1_ref_pad", "dummy", "dummy"};
 static const u32 enet1_ref_sels_table[] = { IMX6UL_GPR1_ENET1_TX_CLK_DIR,
-                                           IMX6UL_GPR1_ENET1_CLK_SEL };
+                                           IMX6UL_GPR1_ENET1_CLK_SEL, 0,
+                                           IMX6UL_GPR1_ENET1_TX_CLK_DIR | IMX6UL_GPR1_ENET1_CLK_SEL };
 static const u32 enet1_ref_sels_table_mask = IMX6UL_GPR1_ENET1_TX_CLK_DIR |
                                             IMX6UL_GPR1_ENET1_CLK_SEL;
-static const char * enet2_ref_sels[] = { "enet2_ref_125m", "enet2_ref_pad", };
+static const char * enet2_ref_sels[] = { "enet2_ref_125m", "enet2_ref_pad", "dummy", "dummy"};
 static const u32 enet2_ref_sels_table[] = { IMX6UL_GPR1_ENET2_TX_CLK_DIR,
-                                           IMX6UL_GPR1_ENET2_CLK_SEL };
+                                           IMX6UL_GPR1_ENET2_CLK_SEL, 0,
+                                           IMX6UL_GPR1_ENET2_TX_CLK_DIR | IMX6UL_GPR1_ENET2_CLK_SEL };
 static const u32 enet2_ref_sels_table_mask = IMX6UL_GPR1_ENET2_TX_CLK_DIR |
                                             IMX6UL_GPR1_ENET2_CLK_SEL;
 
index ce81e40..2bfbab8 100644 (file)
@@ -17,7 +17,6 @@ static const struct regmap_config sprdclk_regmap_config = {
        .reg_bits       = 32,
        .reg_stride     = 4,
        .val_bits       = 32,
-       .max_register   = 0xffff,
        .fast_io        = true,
 };
 
@@ -43,6 +42,8 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
        struct device *dev = &pdev->dev;
        struct device_node *node = dev->of_node, *np;
        struct regmap *regmap;
+       struct resource *res;
+       struct regmap_config reg_config = sprdclk_regmap_config;
 
        if (of_find_property(node, "sprd,syscon", NULL)) {
                regmap = syscon_regmap_lookup_by_phandle(node, "sprd,syscon");
@@ -59,12 +60,14 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
                        return PTR_ERR(regmap);
                }
        } else {
-               base = devm_platform_ioremap_resource(pdev, 0);
+               base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
                if (IS_ERR(base))
                        return PTR_ERR(base);
 
+               reg_config.max_register = resource_size(res) - reg_config.reg_stride;
+
                regmap = devm_regmap_init_mmio(&pdev->dev, base,
-                                              &sprdclk_regmap_config);
+                                              &reg_config);
                if (IS_ERR(regmap)) {
                        pr_err("failed to init regmap\n");
                        return PTR_ERR(regmap);
index deed4af..d9cb937 100644 (file)
@@ -97,10 +97,6 @@ struct quad8 {
        struct quad8_reg __iomem *reg;
 };
 
-/* Borrow Toggle flip-flop */
-#define QUAD8_FLAG_BT BIT(0)
-/* Carry Toggle flip-flop */
-#define QUAD8_FLAG_CT BIT(1)
 /* Error flag */
 #define QUAD8_FLAG_E BIT(4)
 /* Up/Down flag */
@@ -133,6 +129,9 @@ struct quad8 {
 #define QUAD8_CMR_QUADRATURE_X2 0x10
 #define QUAD8_CMR_QUADRATURE_X4 0x18
 
+/* Each Counter is 24 bits wide */
+#define LS7267_CNTR_MAX GENMASK(23, 0)
+
 static int quad8_signal_read(struct counter_device *counter,
                             struct counter_signal *signal,
                             enum counter_signal_level *level)
@@ -156,18 +155,10 @@ static int quad8_count_read(struct counter_device *counter,
 {
        struct quad8 *const priv = counter_priv(counter);
        struct channel_reg __iomem *const chan = priv->reg->channel + count->id;
-       unsigned int flags;
-       unsigned int borrow;
-       unsigned int carry;
        unsigned long irqflags;
        int i;
 
-       flags = ioread8(&chan->control);
-       borrow = flags & QUAD8_FLAG_BT;
-       carry = !!(flags & QUAD8_FLAG_CT);
-
-       /* Borrow XOR Carry effectively doubles count range */
-       *val = (unsigned long)(borrow ^ carry) << 24;
+       *val = 0;
 
        spin_lock_irqsave(&priv->lock, irqflags);
 
@@ -191,8 +182,7 @@ static int quad8_count_write(struct counter_device *counter,
        unsigned long irqflags;
        int i;
 
-       /* Only 24-bit values are supported */
-       if (val > 0xFFFFFF)
+       if (val > LS7267_CNTR_MAX)
                return -ERANGE;
 
        spin_lock_irqsave(&priv->lock, irqflags);
@@ -378,7 +368,7 @@ static int quad8_action_read(struct counter_device *counter,
 
        /* Handle Index signals */
        if (synapse->signal->id >= 16) {
-               if (priv->preset_enable[count->id])
+               if (!priv->preset_enable[count->id])
                        *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
                else
                        *action = COUNTER_SYNAPSE_ACTION_NONE;
@@ -806,8 +796,7 @@ static int quad8_count_preset_write(struct counter_device *counter,
        struct quad8 *const priv = counter_priv(counter);
        unsigned long irqflags;
 
-       /* Only 24-bit values are supported */
-       if (preset > 0xFFFFFF)
+       if (preset > LS7267_CNTR_MAX)
                return -ERANGE;
 
        spin_lock_irqsave(&priv->lock, irqflags);
@@ -834,8 +823,7 @@ static int quad8_count_ceiling_read(struct counter_device *counter,
                *ceiling = priv->preset[count->id];
                break;
        default:
-               /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
-               *ceiling = 0x1FFFFFF;
+               *ceiling = LS7267_CNTR_MAX;
                break;
        }
 
@@ -850,8 +838,7 @@ static int quad8_count_ceiling_write(struct counter_device *counter,
        struct quad8 *const priv = counter_priv(counter);
        unsigned long irqflags;
 
-       /* Only 24-bit values are supported */
-       if (ceiling > 0xFFFFFF)
+       if (ceiling > LS7267_CNTR_MAX)
                return -ERANGE;
 
        spin_lock_irqsave(&priv->lock, irqflags);
index 73c7643..8dd46fa 100644 (file)
@@ -840,22 +840,20 @@ static int amd_pstate_update_status(const char *buf, size_t size)
 
        switch(mode_idx) {
        case AMD_PSTATE_DISABLE:
-               if (!current_pstate_driver)
-                       return -EINVAL;
-               if (cppc_state == AMD_PSTATE_ACTIVE)
-                       return -EBUSY;
-               cpufreq_unregister_driver(current_pstate_driver);
-               amd_pstate_driver_cleanup();
+               if (current_pstate_driver) {
+                       cpufreq_unregister_driver(current_pstate_driver);
+                       amd_pstate_driver_cleanup();
+               }
                break;
        case AMD_PSTATE_PASSIVE:
                if (current_pstate_driver) {
                        if (current_pstate_driver == &amd_pstate_driver)
                                return 0;
                        cpufreq_unregister_driver(current_pstate_driver);
-                       cppc_state = AMD_PSTATE_PASSIVE;
-                       current_pstate_driver = &amd_pstate_driver;
                }
 
+               current_pstate_driver = &amd_pstate_driver;
+               cppc_state = AMD_PSTATE_PASSIVE;
                ret = cpufreq_register_driver(current_pstate_driver);
                break;
        case AMD_PSTATE_ACTIVE:
@@ -863,10 +861,10 @@ static int amd_pstate_update_status(const char *buf, size_t size)
                        if (current_pstate_driver == &amd_pstate_epp_driver)
                                return 0;
                        cpufreq_unregister_driver(current_pstate_driver);
-                       current_pstate_driver = &amd_pstate_epp_driver;
-                       cppc_state = AMD_PSTATE_ACTIVE;
                }
 
+               current_pstate_driver = &amd_pstate_epp_driver;
+               cppc_state = AMD_PSTATE_ACTIVE;
                ret = cpufreq_register_driver(current_pstate_driver);
                break;
        default:
index 45deda1..02cc2c3 100644 (file)
@@ -101,25 +101,40 @@ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
                                      BIT(CXL_CM_CAP_CAP_ID_HDM));
 }
 
-static struct cxl_hdm *devm_cxl_setup_emulated_hdm(struct cxl_port *port,
-                                                  struct cxl_endpoint_dvsec_info *info)
+static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
 {
-       struct device *dev = &port->dev;
        struct cxl_hdm *cxlhdm;
+       void __iomem *hdm;
+       u32 ctrl;
+       int i;
 
-       if (!info->mem_enabled)
-               return ERR_PTR(-ENODEV);
+       if (!info)
+               return false;
 
-       cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
-       if (!cxlhdm)
-               return ERR_PTR(-ENOMEM);
+       cxlhdm = dev_get_drvdata(&info->port->dev);
+       hdm = cxlhdm->regs.hdm_decoder;
 
-       cxlhdm->port = port;
-       cxlhdm->decoder_count = info->ranges;
-       cxlhdm->target_count = info->ranges;
-       dev_set_drvdata(&port->dev, cxlhdm);
+       if (!hdm)
+               return true;
 
-       return cxlhdm;
+       /*
+        * If HDM decoders are present and the driver is in control of
+        * Mem_Enable skip DVSEC based emulation
+        */
+       if (!info->mem_enabled)
+               return false;
+
+       /*
+        * If any decoders are committed already, there should not be any
+        * emulated DVSEC decoders.
+        */
+       for (i = 0; i < cxlhdm->decoder_count; i++) {
+               ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
+               if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
+                       return false;
+       }
+
+       return true;
 }
 
 /**
@@ -138,13 +153,14 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
        cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
        if (!cxlhdm)
                return ERR_PTR(-ENOMEM);
-
        cxlhdm->port = port;
-       crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
-       if (!crb) {
-               if (info && info->mem_enabled)
-                       return devm_cxl_setup_emulated_hdm(port, info);
+       dev_set_drvdata(dev, cxlhdm);
 
+       crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
+       if (!crb && info && info->mem_enabled) {
+               cxlhdm->decoder_count = info->ranges;
+               return cxlhdm;
+       } else if (!crb) {
                dev_err(dev, "No component registers mapped\n");
                return ERR_PTR(-ENXIO);
        }
@@ -160,7 +176,15 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
                return ERR_PTR(-ENXIO);
        }
 
-       dev_set_drvdata(dev, cxlhdm);
+       /*
+        * Now that the hdm capability is parsed, decide if range
+        * register emulation is needed and fixup cxlhdm accordingly.
+        */
+       if (should_emulate_decoders(info)) {
+               dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
+                       info->ranges > 1 ? "s" : "");
+               cxlhdm->decoder_count = info->ranges;
+       }
 
        return cxlhdm;
 }
@@ -714,14 +738,20 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
        return 0;
 }
 
-static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port,
-                                           struct cxl_decoder *cxld, int which,
-                                           struct cxl_endpoint_dvsec_info *info)
+static int cxl_setup_hdm_decoder_from_dvsec(
+       struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
+       int which, struct cxl_endpoint_dvsec_info *info)
 {
+       struct cxl_endpoint_decoder *cxled;
+       u64 len;
+       int rc;
+
        if (!is_cxl_endpoint(port))
                return -EOPNOTSUPP;
 
-       if (!range_len(&info->dvsec_range[which]))
+       cxled = to_cxl_endpoint_decoder(&cxld->dev);
+       len = range_len(&info->dvsec_range[which]);
+       if (!len)
                return -ENOENT;
 
        cxld->target_type = CXL_DECODER_EXPANDER;
@@ -736,40 +766,24 @@ static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port,
        cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
        port->commit_end = cxld->id;
 
-       return 0;
-}
-
-static bool should_emulate_decoders(struct cxl_port *port)
-{
-       struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
-       void __iomem *hdm = cxlhdm->regs.hdm_decoder;
-       u32 ctrl;
-       int i;
-
-       if (!is_cxl_endpoint(cxlhdm->port))
-               return false;
-
-       if (!hdm)
-               return true;
-
-       /*
-        * If any decoders are committed already, there should not be any
-        * emulated DVSEC decoders.
-        */
-       for (i = 0; i < cxlhdm->decoder_count; i++) {
-               ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
-               if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
-                       return false;
+       rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
+       if (rc) {
+               dev_err(&port->dev,
+                       "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
+                       port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
+               return rc;
        }
+       *dpa_base += len;
+       cxled->state = CXL_DECODER_STATE_AUTO;
 
-       return true;
+       return 0;
 }
 
 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
                            int *target_map, void __iomem *hdm, int which,
                            u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
 {
-       struct cxl_endpoint_decoder *cxled = NULL;
+       struct cxl_endpoint_decoder *cxled;
        u64 size, base, skip, dpa_size;
        bool committed;
        u32 remainder;
@@ -780,11 +794,9 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
                unsigned char target_id[8];
        } target_list;
 
-       if (should_emulate_decoders(port))
-               return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info);
-
-       if (is_endpoint_decoder(&cxld->dev))
-               cxled = to_cxl_endpoint_decoder(&cxld->dev);
+       if (should_emulate_decoders(info))
+               return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
+                                                       which, info);
 
        ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
        base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
@@ -806,9 +818,6 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
                .end = base + size - 1,
        };
 
-       if (cxled && !committed && range_len(&info->dvsec_range[which]))
-               return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info);
-
        /* decoders are enabled if committed */
        if (committed) {
                cxld->flags |= CXL_DECODER_F_ENABLE;
@@ -846,7 +855,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
        if (rc)
                return rc;
 
-       if (!cxled) {
+       if (!info) {
                target_list.value =
                        ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
                for (i = 0; i < cxld->interleave_ways; i++)
@@ -866,6 +875,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
                return -ENXIO;
        }
        skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
+       cxled = to_cxl_endpoint_decoder(&cxld->dev);
        rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
        if (rc) {
                dev_err(&port->dev,
index 7328a25..523d5b9 100644 (file)
@@ -462,7 +462,7 @@ static struct pci_doe_mb *find_cdat_doe(struct device *uport)
        return NULL;
 }
 
-#define CDAT_DOE_REQ(entry_handle)                                     \
+#define CDAT_DOE_REQ(entry_handle) cpu_to_le32                         \
        (FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE,                      \
                    CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) |               \
         FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE,                    \
@@ -475,8 +475,8 @@ static void cxl_doe_task_complete(struct pci_doe_task *task)
 }
 
 struct cdat_doe_task {
-       u32 request_pl;
-       u32 response_pl[32];
+       __le32 request_pl;
+       __le32 response_pl[32];
        struct completion c;
        struct pci_doe_task task;
 };
@@ -510,10 +510,10 @@ static int cxl_cdat_get_length(struct device *dev,
                return rc;
        }
        wait_for_completion(&t.c);
-       if (t.task.rv < sizeof(u32))
+       if (t.task.rv < 2 * sizeof(__le32))
                return -EIO;
 
-       *length = t.response_pl[1];
+       *length = le32_to_cpu(t.response_pl[1]);
        dev_dbg(dev, "CDAT length %zu\n", *length);
 
        return 0;
@@ -524,13 +524,13 @@ static int cxl_cdat_read_table(struct device *dev,
                               struct cxl_cdat *cdat)
 {
        size_t length = cdat->length;
-       u32 *data = cdat->table;
+       __le32 *data = cdat->table;
        int entry_handle = 0;
 
        do {
                DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t);
+               struct cdat_entry_header *entry;
                size_t entry_dw;
-               u32 *entry;
                int rc;
 
                rc = pci_doe_submit_task(cdat_doe, &t.task);
@@ -539,26 +539,34 @@ static int cxl_cdat_read_table(struct device *dev,
                        return rc;
                }
                wait_for_completion(&t.c);
-               /* 1 DW header + 1 DW data min */
-               if (t.task.rv < (2 * sizeof(u32)))
+
+               /* 1 DW Table Access Response Header + CDAT entry */
+               entry = (struct cdat_entry_header *)(t.response_pl + 1);
+               if ((entry_handle == 0 &&
+                    t.task.rv != sizeof(__le32) + sizeof(struct cdat_header)) ||
+                   (entry_handle > 0 &&
+                    (t.task.rv < sizeof(__le32) + sizeof(*entry) ||
+                     t.task.rv != sizeof(__le32) + le16_to_cpu(entry->length))))
                        return -EIO;
 
                /* Get the CXL table access header entry handle */
                entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
-                                        t.response_pl[0]);
-               entry = t.response_pl + 1;
-               entry_dw = t.task.rv / sizeof(u32);
+                                        le32_to_cpu(t.response_pl[0]));
+               entry_dw = t.task.rv / sizeof(__le32);
                /* Skip Header */
                entry_dw -= 1;
-               entry_dw = min(length / sizeof(u32), entry_dw);
+               entry_dw = min(length / sizeof(__le32), entry_dw);
                /* Prevent length < 1 DW from causing a buffer overflow */
                if (entry_dw) {
-                       memcpy(data, entry, entry_dw * sizeof(u32));
-                       length -= entry_dw * sizeof(u32);
+                       memcpy(data, entry, entry_dw * sizeof(__le32));
+                       length -= entry_dw * sizeof(__le32);
                        data += entry_dw;
                }
        } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
 
+       /* Length in CDAT header may exceed concatenation of CDAT entries */
+       cdat->length -= length;
+
        return 0;
 }
 
index c2e4b10..f8c38d9 100644 (file)
@@ -62,9 +62,9 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
        return is_cxl_nvdimm_bridge(dev);
 }
 
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *start)
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd)
 {
-       struct cxl_port *port = find_cxl_root(start);
+       struct cxl_port *port = find_cxl_root(dev_get_drvdata(&cxlmd->dev));
        struct device *dev;
 
        if (!port)
@@ -253,7 +253,7 @@ int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd)
        struct device *dev;
        int rc;
 
-       cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev);
+       cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
        if (!cxl_nvb)
                return -ENODEV;
 
index 8ee6b6e..4d1f9c5 100644 (file)
@@ -823,41 +823,17 @@ static bool dev_is_cxl_root_child(struct device *dev)
        return false;
 }
 
-/* Find a 2nd level CXL port that has a dport that is an ancestor of @match */
-static int match_root_child(struct device *dev, const void *match)
+struct cxl_port *find_cxl_root(struct cxl_port *port)
 {
-       const struct device *iter = NULL;
-       struct cxl_dport *dport;
-       struct cxl_port *port;
-
-       if (!dev_is_cxl_root_child(dev))
-               return 0;
-
-       port = to_cxl_port(dev);
-       iter = match;
-       while (iter) {
-               dport = cxl_find_dport_by_dev(port, iter);
-               if (dport)
-                       break;
-               iter = iter->parent;
-       }
-
-       return !!iter;
-}
+       struct cxl_port *iter = port;
 
-struct cxl_port *find_cxl_root(struct device *dev)
-{
-       struct device *port_dev;
-       struct cxl_port *root;
+       while (iter && !is_cxl_root(iter))
+               iter = to_cxl_port(iter->dev.parent);
 
-       port_dev = bus_find_device(&cxl_bus_type, NULL, dev, match_root_child);
-       if (!port_dev)
+       if (!iter)
                return NULL;
-
-       root = to_cxl_port(port_dev->parent);
-       get_device(&root->dev);
-       put_device(port_dev);
-       return root;
+       get_device(&iter->dev);
+       return iter;
 }
 EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
 
index f290281..b2fd67f 100644 (file)
@@ -134,9 +134,13 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
                struct cxl_endpoint_decoder *cxled = p->targets[i];
                struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
                struct cxl_port *iter = cxled_to_port(cxled);
+               struct cxl_dev_state *cxlds = cxlmd->cxlds;
                struct cxl_ep *ep;
                int rc = 0;
 
+               if (cxlds->rcd)
+                       goto endpoint_reset;
+
                while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
                        iter = to_cxl_port(iter->dev.parent);
 
@@ -153,6 +157,7 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
                                return rc;
                }
 
+endpoint_reset:
                rc = cxled->cxld.reset(&cxled->cxld);
                if (rc)
                        return rc;
@@ -1199,6 +1204,7 @@ static void cxl_region_teardown_targets(struct cxl_region *cxlr)
 {
        struct cxl_region_params *p = &cxlr->params;
        struct cxl_endpoint_decoder *cxled;
+       struct cxl_dev_state *cxlds;
        struct cxl_memdev *cxlmd;
        struct cxl_port *iter;
        struct cxl_ep *ep;
@@ -1214,6 +1220,10 @@ static void cxl_region_teardown_targets(struct cxl_region *cxlr)
        for (i = 0; i < p->nr_targets; i++) {
                cxled = p->targets[i];
                cxlmd = cxled_to_memdev(cxled);
+               cxlds = cxlmd->cxlds;
+
+               if (cxlds->rcd)
+                       continue;
 
                iter = cxled_to_port(cxled);
                while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
@@ -1229,14 +1239,24 @@ static int cxl_region_setup_targets(struct cxl_region *cxlr)
 {
        struct cxl_region_params *p = &cxlr->params;
        struct cxl_endpoint_decoder *cxled;
+       struct cxl_dev_state *cxlds;
+       int i, rc, rch = 0, vh = 0;
        struct cxl_memdev *cxlmd;
        struct cxl_port *iter;
        struct cxl_ep *ep;
-       int i, rc;
 
        for (i = 0; i < p->nr_targets; i++) {
                cxled = p->targets[i];
                cxlmd = cxled_to_memdev(cxled);
+               cxlds = cxlmd->cxlds;
+
+               /* validate that all targets agree on topology */
+               if (!cxlds->rcd) {
+                       vh++;
+               } else {
+                       rch++;
+                       continue;
+               }
 
                iter = cxled_to_port(cxled);
                while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
@@ -1256,6 +1276,12 @@ static int cxl_region_setup_targets(struct cxl_region *cxlr)
                }
        }
 
+       if (rch && vh) {
+               dev_err(&cxlr->dev, "mismatched CXL topologies detected\n");
+               cxl_region_teardown_targets(cxlr);
+               return -ENXIO;
+       }
+
        return 0;
 }
 
@@ -1648,6 +1674,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
                if (rc)
                        goto err_decrement;
                p->state = CXL_CONFIG_ACTIVE;
+               set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
        }
 
        cxled->cxld.interleave_ways = p->interleave_ways;
@@ -1749,8 +1776,6 @@ static int attach_target(struct cxl_region *cxlr,
 
        down_read(&cxl_dpa_rwsem);
        rc = cxl_region_attach(cxlr, cxled, pos);
-       if (rc == 0)
-               set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags);
        up_read(&cxl_dpa_rwsem);
        up_write(&cxl_region_rwsem);
        return rc;
@@ -2251,7 +2276,7 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
                 * bridge for one device is the same for all.
                 */
                if (i == 0) {
-                       cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev);
+                       cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
                        if (!cxl_nvb) {
                                cxlr_pmem = ERR_PTR(-ENODEV);
                                goto out;
index f2b0962..044a92d 100644 (file)
@@ -658,7 +658,7 @@ struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
 struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
                                   resource_size_t component_reg_phys,
                                   struct cxl_dport *parent_dport);
-struct cxl_port *find_cxl_root(struct device *dev);
+struct cxl_port *find_cxl_root(struct cxl_port *port);
 int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
 void cxl_bus_rescan(void);
 void cxl_bus_drain(void);
@@ -695,13 +695,15 @@ int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
 
 /**
  * struct cxl_endpoint_dvsec_info - Cached DVSEC info
- * @mem_enabled: cached value of mem_enabled in the DVSEC, PCIE_DEVICE
+ * @mem_enabled: cached value of mem_enabled in the DVSEC at init time
  * @ranges: Number of active HDM ranges this device uses.
+ * @port: endpoint port associated with this info instance
  * @dvsec_range: cached attributes of the ranges in the DVSEC, PCIE_DEVICE
  */
 struct cxl_endpoint_dvsec_info {
        bool mem_enabled;
        int ranges;
+       struct cxl_port *port;
        struct range dvsec_range[2];
 };
 
@@ -758,7 +760,7 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
 bool is_cxl_nvdimm(struct device *dev);
 bool is_cxl_nvdimm_bridge(struct device *dev);
 int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd);
-struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *dev);
+struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd);
 
 #ifdef CONFIG_CXL_REGION
 bool is_cxl_pmem_region(struct device *dev);
index be6a2ef..0465ef9 100644 (file)
@@ -68,6 +68,20 @@ enum cxl_regloc_type {
        CXL_REGLOC_RBI_TYPES
 };
 
+struct cdat_header {
+       __le32 length;
+       u8 revision;
+       u8 checksum;
+       u8 reserved[6];
+       __le32 sequence;
+} __packed;
+
+struct cdat_entry_header {
+       u8 type;
+       u8 reserved;
+       __le16 length;
+} __packed;
+
 int devm_cxl_port_enumerate_dports(struct cxl_port *port);
 struct cxl_dev_state;
 int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
index 1049bb5..22a7ab2 100644 (file)
@@ -78,8 +78,8 @@ static int cxl_switch_port_probe(struct cxl_port *port)
 
 static int cxl_endpoint_port_probe(struct cxl_port *port)
 {
+       struct cxl_endpoint_dvsec_info info = { .port = port };
        struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport);
-       struct cxl_endpoint_dvsec_info info = { 0 };
        struct cxl_dev_state *cxlds = cxlmd->cxlds;
        struct cxl_hdm *cxlhdm;
        struct cxl_port *root;
@@ -119,7 +119,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
         * This can't fail in practice as CXL root exit unregisters all
         * descendant ports and that in turn synchronizes with cxl_port_probe()
         */
-       root = find_cxl_root(&cxlmd->dev);
+       root = find_cxl_root(port);
 
        /*
         * Now that all endpoint decoders are successfully enumerated, try to
index 90f28bd..4cf8da7 100644 (file)
@@ -75,6 +75,7 @@
 
 #define REG_TX_INTSTATE(idx)           (0x0030 + (idx) * 4)
 #define REG_RX_INTSTATE(idx)           (0x0040 + (idx) * 4)
+#define REG_GLOBAL_INTSTATE(idx)       (0x0050 + (idx) * 4)
 #define REG_CHAN_INTSTATUS(ch, idx)    (0x8010 + (ch) * 0x200 + (idx) * 4)
 #define REG_CHAN_INTMASK(ch, idx)      (0x8020 + (ch) * 0x200 + (idx) * 4)
 
@@ -511,7 +512,10 @@ static int admac_terminate_all(struct dma_chan *chan)
        admac_stop_chan(adchan);
        admac_reset_rings(adchan);
 
-       adchan->current_tx = NULL;
+       if (adchan->current_tx) {
+               list_add_tail(&adchan->current_tx->node, &adchan->to_free);
+               adchan->current_tx = NULL;
+       }
        /*
         * Descriptors can only be freed after the tasklet
         * has been killed (in admac_synchronize).
@@ -672,13 +676,14 @@ static void admac_handle_chan_int(struct admac_data *ad, int no)
 static irqreturn_t admac_interrupt(int irq, void *devid)
 {
        struct admac_data *ad = devid;
-       u32 rx_intstate, tx_intstate;
+       u32 rx_intstate, tx_intstate, global_intstate;
        int i;
 
        rx_intstate = readl_relaxed(ad->base + REG_RX_INTSTATE(ad->irq_index));
        tx_intstate = readl_relaxed(ad->base + REG_TX_INTSTATE(ad->irq_index));
+       global_intstate = readl_relaxed(ad->base + REG_GLOBAL_INTSTATE(ad->irq_index));
 
-       if (!tx_intstate && !rx_intstate)
+       if (!tx_intstate && !rx_intstate && !global_intstate)
                return IRQ_NONE;
 
        for (i = 0; i < ad->nchannels; i += 2) {
@@ -693,6 +698,12 @@ static irqreturn_t admac_interrupt(int irq, void *devid)
                rx_intstate >>= 1;
        }
 
+       if (global_intstate) {
+               dev_warn(ad->dev, "clearing unknown global interrupt flag: %x\n",
+                        global_intstate);
+               writel_relaxed(~(u32) 0, ad->base + REG_GLOBAL_INTSTATE(ad->irq_index));
+       }
+
        return IRQ_HANDLED;
 }
 
@@ -850,6 +861,9 @@ static int admac_probe(struct platform_device *pdev)
 
        dma->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
        dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+       dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+                       BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+                       BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
        dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
                        BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
                        BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
index c24bca2..826b982 100644 (file)
@@ -1342,7 +1342,7 @@ int dmaenginem_async_device_register(struct dma_device *device)
        if (ret)
                return ret;
 
-       return devm_add_action(device->dev, dmaenginem_async_device_unregister, device);
+       return devm_add_action_or_reset(device->dev, dmaenginem_async_device_unregister, device);
 }
 EXPORT_SYMBOL(dmaenginem_async_device_register);
 
index 462109c..93ee298 100644 (file)
@@ -277,7 +277,7 @@ failed:
 
 /**
  * xdma_xfer_start - Start DMA transfer
- * @xdma_chan: DMA channel pointer
+ * @xchan: DMA channel pointer
  */
 static int xdma_xfer_start(struct xdma_chan *xchan)
 {
index 29619f4..d9629ff 100644 (file)
@@ -167,7 +167,8 @@ int psci_set_osi_mode(bool enable)
 
        err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE, suspend_mode, 0, 0);
        if (err < 0)
-               pr_warn("failed to set %s mode: %d\n", enable ? "OSI" : "PC", err);
+               pr_info(FW_BUG "failed to set %s mode: %d\n",
+                               enable ? "OSI" : "PC", err);
        return psci_to_linux_errno(err);
 }
 
index 13be729..badbe05 100644 (file)
@@ -100,7 +100,7 @@ config GPIO_GENERIC
        tristate
 
 config GPIO_REGMAP
-       depends on REGMAP
+       select REGMAP
        tristate
 
 # put drivers in the right section, in alphabetical order
index 26b1f74..43b2dc8 100644 (file)
@@ -324,7 +324,7 @@ static struct irq_chip gpio_irqchip = {
        .irq_enable     = gpio_irq_enable,
        .irq_disable    = gpio_irq_disable,
        .irq_set_type   = gpio_irq_type,
-       .flags          = IRQCHIP_SET_TYPE_MASKED,
+       .flags          = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
 };
 
 static void gpio_irq_handler(struct irq_desc *desc)
@@ -641,9 +641,6 @@ static void davinci_gpio_save_context(struct davinci_gpio_controller *chips,
                context->set_falling = readl_relaxed(&g->set_falling);
        }
 
-       /* Clear Bank interrupt enable bit */
-       writel_relaxed(0, base + BINTEN);
-
        /* Clear all interrupt status registers */
        writel_relaxed(GENMASK(31, 0), &g->intstat);
 }
index 60b1857..aeeec21 100644 (file)
@@ -981,7 +981,12 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
  */
 bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
 {
-       if (adev->flags & AMD_IS_APU)
+       if ((adev->flags & AMD_IS_APU) &&
+           adev->gfx.imu.funcs) /* Not need to do mode2 reset for IMU enabled APUs */
+               return false;
+
+       if ((adev->flags & AMD_IS_APU) &&
+           amdgpu_acpi_is_s3_active(adev))
                return false;
 
        if (amdgpu_sriov_vf(adev))
index 1583157..efd025d 100644 (file)
@@ -177,6 +177,40 @@ void dm_helpers_dp_update_branch_info(
        const struct dc_link *link)
 {}
 
+static void dm_helpers_construct_old_payload(
+                       struct dc_link *link,
+                       int pbn_per_slot,
+                       struct drm_dp_mst_atomic_payload *new_payload,
+                       struct drm_dp_mst_atomic_payload *old_payload)
+{
+       struct link_mst_stream_allocation_table current_link_table =
+                                                                       link->mst_stream_alloc_table;
+       struct link_mst_stream_allocation *dc_alloc;
+       int i;
+
+       *old_payload = *new_payload;
+
+       /* Set correct time_slots/PBN of old payload.
+        * other fields (delete & dsc_enabled) in
+        * struct drm_dp_mst_atomic_payload are don't care fields
+        * while calling drm_dp_remove_payload()
+        */
+       for (i = 0; i < current_link_table.stream_count; i++) {
+               dc_alloc =
+                       &current_link_table.stream_allocations[i];
+
+               if (dc_alloc->vcp_id == new_payload->vcpi) {
+                       old_payload->time_slots = dc_alloc->slot_count;
+                       old_payload->pbn = dc_alloc->slot_count * pbn_per_slot;
+                       break;
+               }
+       }
+
+       /* make sure there is an old payload*/
+       ASSERT(i != current_link_table.stream_count);
+
+}
+
 /*
  * Writes payload allocation table in immediate downstream device.
  */
@@ -188,7 +222,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
 {
        struct amdgpu_dm_connector *aconnector;
        struct drm_dp_mst_topology_state *mst_state;
-       struct drm_dp_mst_atomic_payload *payload;
+       struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload;
        struct drm_dp_mst_topology_mgr *mst_mgr;
 
        aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
@@ -204,17 +238,26 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
        mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
 
        /* It's OK for this to fail */
-       payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
-       if (enable)
-               drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
-       else
-               drm_dp_remove_payload(mst_mgr, mst_state, payload, payload);
+       new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
+
+       if (enable) {
+               target_payload = new_payload;
+
+               drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload);
+       } else {
+               /* construct old payload by VCPI*/
+               dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div,
+                                               new_payload, &old_payload);
+               target_payload = &old_payload;
+
+               drm_dp_remove_payload(mst_mgr, mst_state, &old_payload, new_payload);
+       }
 
        /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
         * AUX message. The sequence is slot 1-63 allocated sequence for each
         * stream. AMD ASIC stream slot allocation should follow the same
         * sequence. copy DRM MST allocation to dc */
-       fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
+       fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
 
        return true;
 }
index e25e1b2..8dc442f 100644 (file)
@@ -212,6 +212,21 @@ bool needs_dsc_aux_workaround(struct dc_link *link)
        return false;
 }
 
+bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port)
+{
+       u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F
+
+       if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) {
+               if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+                               IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) {
+                       DRM_INFO("Synaptics Cascaded MST hub\n");
+                       return true;
+               }
+       }
+
+       return false;
+}
+
 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
 {
        struct dc_sink *dc_sink = aconnector->dc_sink;
@@ -235,6 +250,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
            needs_dsc_aux_workaround(aconnector->dc_link))
                aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux;
 
+       /* synaptics cascaded MST hub case */
+       if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port))
+               aconnector->dsc_aux = port->mgr->aux;
+
        if (!aconnector->dsc_aux)
                return false;
 
@@ -662,12 +681,25 @@ struct dsc_mst_fairness_params {
        struct amdgpu_dm_connector *aconnector;
 };
 
-static int kbps_to_peak_pbn(int kbps)
+static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
+{
+       u8 link_coding_cap;
+       uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
+
+       link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
+       if (link_coding_cap == DP_128b_132b_ENCODING)
+               fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
+
+       return fec_overhead_multiplier_x1000;
+}
+
+static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
 {
        u64 peak_kbps = kbps;
 
        peak_kbps *= 1006;
-       peak_kbps = div_u64(peak_kbps, 1000);
+       peak_kbps *= fec_overhead_multiplier_x1000;
+       peak_kbps = div_u64(peak_kbps, 1000 * 1000);
        return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
 }
 
@@ -761,11 +793,12 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
        int link_timeslots_used;
        int fair_pbn_alloc;
        int ret = 0;
+       uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
 
        for (i = 0; i < count; i++) {
                if (vars[i + k].dsc_enabled) {
                        initial_slack[i] =
-                       kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
+                       kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
                        bpp_increased[i] = false;
                        remaining_to_increase += 1;
                } else {
@@ -861,6 +894,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
        int next_index;
        int remaining_to_try = 0;
        int ret;
+       uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
 
        for (i = 0; i < count; i++) {
                if (vars[i + k].dsc_enabled
@@ -890,7 +924,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
                if (next_index == -1)
                        break;
 
-               vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
+               vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
                ret = drm_dp_atomic_find_time_slots(state,
                                                    params[next_index].port->mgr,
                                                    params[next_index].port,
@@ -903,7 +937,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
                        vars[next_index].dsc_enabled = false;
                        vars[next_index].bpp_x16 = 0;
                } else {
-                       vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
+                       vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
                        ret = drm_dp_atomic_find_time_slots(state,
                                                            params[next_index].port->mgr,
                                                            params[next_index].port,
@@ -932,6 +966,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
        int count = 0;
        int i, k, ret;
        bool debugfs_overwrite = false;
+       uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
 
        memset(params, 0, sizeof(params));
 
@@ -993,7 +1028,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
        /* Try no compression */
        for (i = 0; i < count; i++) {
                vars[i + k].aconnector = params[i].aconnector;
-               vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+               vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
                vars[i + k].dsc_enabled = false;
                vars[i + k].bpp_x16 = 0;
                ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@@ -1012,7 +1047,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
        /* Try max compression */
        for (i = 0; i < count; i++) {
                if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
-                       vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
+                       vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
                        vars[i + k].dsc_enabled = true;
                        vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
                        ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1020,7 +1055,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
                        if (ret < 0)
                                return ret;
                } else {
-                       vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
+                       vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
                        vars[i + k].dsc_enabled = false;
                        vars[i + k].bpp_x16 = 0;
                        ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
index 97fd70d..1e4ede1 100644 (file)
 #define SYNAPTICS_RC_OFFSET        0x4BC
 #define SYNAPTICS_RC_DATA          0x4C0
 
+#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C
+
+/**
+ * Panamera MST Hub detection
+ * Offset DPCD 050Eh == 0x5A indicates cascaded MST hub case
+ * Check from beginning of branch device vendor specific field (050Ch)
+ */
+#define IS_SYNAPTICS_PANAMERA(branchDevName) (((int)branchDevName[4] & 0xF0) == 0x50 ? 1 : 0)
+#define BRANCH_HW_REVISION_PANAMERA_A2 0x10
+#define SYNAPTICS_CASCADED_HUB_ID  0x5A
+#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0)
+
+#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B     1031
+#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B  1000
+
 struct amdgpu_display_manager;
 struct amdgpu_dm_connector;
 
index f085cb9..85a090b 100644 (file)
 #define CTF_OFFSET_HOTSPOT             5
 #define CTF_OFFSET_MEM                 5
 
+static const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5};
+static const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
+
+#define DECODE_GEN_SPEED(gen_speed_idx)                (pmfw_decoded_link_speed[gen_speed_idx])
+#define DECODE_LANE_WIDTH(lane_width_idx)      (pmfw_decoded_link_width[lane_width_idx])
+
 struct smu_13_0_max_sustainable_clocks {
        uint32_t display_clock;
        uint32_t phy_clock;
index 27448ff..a5c97d6 100644 (file)
@@ -1144,8 +1144,8 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
                                        (pcie_table->pcie_lane[i] == 5) ? "x12" :
                                        (pcie_table->pcie_lane[i] == 6) ? "x16" : "",
                                        pcie_table->clk_freq[i],
-                                       ((gen_speed - 1) == pcie_table->pcie_gen[i]) &&
-                                       (lane_width == link_width[pcie_table->pcie_lane[i]]) ?
+                                       (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
+                                       (lane_width == DECODE_LANE_WIDTH(link_width[pcie_table->pcie_lane[i]])) ?
                                        "*" : "");
                break;
 
index 9e1967d..4399416 100644 (file)
@@ -575,6 +575,14 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
                                                     dpm_table);
                if (ret)
                        return ret;
+
+               if (skutable->DriverReportedClocks.GameClockAc &&
+                       (dpm_table->dpm_levels[dpm_table->count - 1].value >
+                       skutable->DriverReportedClocks.GameClockAc)) {
+                       dpm_table->dpm_levels[dpm_table->count - 1].value =
+                               skutable->DriverReportedClocks.GameClockAc;
+                       dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
+               }
        } else {
                dpm_table->count = 1;
                dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
@@ -828,6 +836,57 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
        return ret;
 }
 
+static int smu_v13_0_7_get_dpm_ultimate_freq(struct smu_context *smu,
+                                            enum smu_clk_type clk_type,
+                                            uint32_t *min,
+                                            uint32_t *max)
+{
+       struct smu_13_0_dpm_context *dpm_context =
+               smu->smu_dpm.dpm_context;
+       struct smu_13_0_dpm_table *dpm_table;
+
+       switch (clk_type) {
+       case SMU_MCLK:
+       case SMU_UCLK:
+               /* uclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.uclk_table;
+               break;
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+               /* gfxclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.gfx_table;
+               break;
+       case SMU_SOCCLK:
+               /* socclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.soc_table;
+               break;
+       case SMU_FCLK:
+               /* fclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.fclk_table;
+               break;
+       case SMU_VCLK:
+       case SMU_VCLK1:
+               /* vclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.vclk_table;
+               break;
+       case SMU_DCLK:
+       case SMU_DCLK1:
+               /* dclk dpm table */
+               dpm_table = &dpm_context->dpm_tables.dclk_table;
+               break;
+       default:
+               dev_err(smu->adev->dev, "Unsupported clock type!\n");
+               return -EINVAL;
+       }
+
+       if (min)
+               *min = dpm_table->min;
+       if (max)
+               *max = dpm_table->max;
+
+       return 0;
+}
+
 static int smu_v13_0_7_read_sensor(struct smu_context *smu,
                                   enum amd_pp_sensors sensor,
                                   void *data,
@@ -1074,8 +1133,8 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
                                        (pcie_table->pcie_lane[i] == 5) ? "x12" :
                                        (pcie_table->pcie_lane[i] == 6) ? "x16" : "",
                                        pcie_table->clk_freq[i],
-                                       (gen_speed == pcie_table->pcie_gen[i]) &&
-                                       (lane_width == pcie_table->pcie_lane[i]) ?
+                                       (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
+                                       (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
                                        "*" : "");
                break;
 
@@ -1329,9 +1388,17 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
                                &dpm_context->dpm_tables.fclk_table;
        struct smu_umd_pstate_table *pstate_table =
                                &smu->pstate_table;
+       struct smu_table_context *table_context = &smu->smu_table;
+       PPTable_t *pptable = table_context->driver_pptable;
+       DriverReportedClocks_t driver_clocks =
+               pptable->SkuTable.DriverReportedClocks;
 
        pstate_table->gfxclk_pstate.min = gfx_table->min;
-       pstate_table->gfxclk_pstate.peak = gfx_table->max;
+       if (driver_clocks.GameClockAc &&
+               (driver_clocks.GameClockAc < gfx_table->max))
+               pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
+       else
+               pstate_table->gfxclk_pstate.peak = gfx_table->max;
 
        pstate_table->uclk_pstate.min = mem_table->min;
        pstate_table->uclk_pstate.peak = mem_table->max;
@@ -1348,12 +1415,12 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
        pstate_table->fclk_pstate.min = fclk_table->min;
        pstate_table->fclk_pstate.peak = fclk_table->max;
 
-       /*
-        * For now, just use the mininum clock frequency.
-        * TODO: update them when the real pstate settings available
-        */
-       pstate_table->gfxclk_pstate.standard = gfx_table->min;
-       pstate_table->uclk_pstate.standard = mem_table->min;
+       if (driver_clocks.BaseClockAc &&
+               driver_clocks.BaseClockAc < gfx_table->max)
+               pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
+       else
+               pstate_table->gfxclk_pstate.standard = gfx_table->max;
+       pstate_table->uclk_pstate.standard = mem_table->max;
        pstate_table->socclk_pstate.standard = soc_table->min;
        pstate_table->vclk_pstate.standard = vclk_table->min;
        pstate_table->dclk_pstate.standard = dclk_table->min;
@@ -1676,7 +1743,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
        .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
        .init_pptable_microcode = smu_v13_0_init_pptable_microcode,
        .populate_umd_state_clk = smu_v13_0_7_populate_umd_state_clk,
-       .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
+       .get_dpm_ultimate_freq = smu_v13_0_7_get_dpm_ultimate_freq,
        .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
        .read_sensor = smu_v13_0_7_read_sensor,
        .feature_is_enabled = smu_cmn_feature_is_enabled,
index 0643887..142668c 100644 (file)
@@ -99,7 +99,6 @@ static int armada_drm_bind(struct device *dev)
        if (ret) {
                dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n",
                        __func__, ret);
-               kfree(priv);
                return ret;
        }
 
index 3d1f50f..7098f12 100644 (file)
@@ -146,8 +146,8 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
                unsigned int order;
                u64 root_size;
 
-               root_size = rounddown_pow_of_two(size);
-               order = ilog2(root_size) - ilog2(chunk_size);
+               order = ilog2(size) - ilog2(chunk_size);
+               root_size = chunk_size << order;
 
                root = drm_block_alloc(mm, NULL, order, offset);
                if (!root)
index 44ca803..31a7f59 100644 (file)
@@ -22,7 +22,6 @@
 #include "etnaviv_gem.h"
 #include "etnaviv_mmu.h"
 #include "etnaviv_perfmon.h"
-#include "common.xml.h"
 
 /*
  * DRM operations:
@@ -476,47 +475,7 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
        ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
 };
 
-static void etnaviv_fop_show_fdinfo(struct seq_file *m, struct file *f)
-{
-       struct drm_file *file = f->private_data;
-       struct drm_device *dev = file->minor->dev;
-       struct etnaviv_drm_private *priv = dev->dev_private;
-       struct etnaviv_file_private *ctx = file->driver_priv;
-
-       /*
-        * For a description of the text output format used here, see
-        * Documentation/gpu/drm-usage-stats.rst.
-        */
-       seq_printf(m, "drm-driver:\t%s\n", dev->driver->name);
-       seq_printf(m, "drm-client-id:\t%u\n", ctx->id);
-
-       for (int i = 0; i < ETNA_MAX_PIPES; i++) {
-               struct etnaviv_gpu *gpu = priv->gpu[i];
-               char engine[10] = "UNK";
-               int cur = 0;
-
-               if (!gpu)
-                       continue;
-
-               if (gpu->identity.features & chipFeatures_PIPE_2D)
-                       cur = snprintf(engine, sizeof(engine), "2D");
-               if (gpu->identity.features & chipFeatures_PIPE_3D)
-                       cur = snprintf(engine + cur, sizeof(engine) - cur,
-                                      "%s3D", cur ? "/" : "");
-               if (gpu->identity.nn_core_count > 0)
-                       cur = snprintf(engine + cur, sizeof(engine) - cur,
-                                      "%sNN", cur ? "/" : "");
-
-               seq_printf(m, "drm-engine-%s:\t%llu ns\n", engine,
-                          ctx->sched_entity[i].elapsed_ns);
-       }
-}
-
-static const struct file_operations fops = {
-       .owner = THIS_MODULE,
-       DRM_GEM_FOPS,
-       .show_fdinfo = etnaviv_fop_show_fdinfo,
-};
+DEFINE_DRM_GEM_FOPS(fops);
 
 static const struct drm_driver etnaviv_drm_driver = {
        .driver_features    = DRIVER_GEM | DRIVER_RENDER,
index 7031db1..3524b58 100644 (file)
@@ -91,7 +91,15 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
 static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
                struct vm_area_struct *vma)
 {
-       return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
+       int ret;
+
+       ret = dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
+       if (!ret) {
+               /* Drop the reference acquired by drm_gem_mmap_obj(). */
+               drm_gem_object_put(&etnaviv_obj->base);
+       }
+
+       return ret;
 }
 
 static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
index 468a792..fc0eaf4 100644 (file)
@@ -300,9 +300,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+       i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
        u32 dss_ctl1;
 
-       dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
+       /* FIXME: Move all DSS handling to intel_vdsc.c */
+       if (DISPLAY_VER(dev_priv) >= 12) {
+               struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+
+               dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe);
+               dss_ctl2_reg = ICL_PIPE_DSS_CTL2(crtc->pipe);
+       } else {
+               dss_ctl1_reg = DSS_CTL1;
+               dss_ctl2_reg = DSS_CTL2;
+       }
+
+       dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg);
        dss_ctl1 |= SPLITTER_ENABLE;
        dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
        dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
@@ -323,16 +335,16 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
 
                dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
                dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
-               dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
+               dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg);
                dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
                dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
-               intel_de_write(dev_priv, DSS_CTL2, dss_ctl2);
+               intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2);
        } else {
                /* Interleave */
                dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
        }
 
-       intel_de_write(dev_priv, DSS_CTL1, dss_ctl1);
+       intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1);
 }
 
 /* aka DSI 8X clock */
index 8d97c29..bd598a7 100644 (file)
@@ -47,6 +47,11 @@ struct intel_color_funcs {
         */
        void (*color_commit_arm)(const struct intel_crtc_state *crtc_state);
        /*
+        * Perform any extra tasks needed after all the
+        * double buffered registers have been latched.
+        */
+       void (*color_post_update)(const struct intel_crtc_state *crtc_state);
+       /*
         * Load LUTs (and other single buffered color management
         * registers). Will (hopefully) be called during the vblank
         * following the latching of any double buffered registers
@@ -614,9 +619,33 @@ static void ilk_lut_12p4_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
 
 static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
 {
+       /*
+        * Despite Wa_1406463849, ICL no longer suffers from the SKL
+        * DC5/PSR CSC black screen issue (see skl_color_commit_noarm()).
+        * Possibly due to the extra sticky CSC arming
+        * (see icl_color_post_update()).
+        *
+        * On TGL+ all CSC arming issues have been properly fixed.
+        */
        icl_load_csc_matrix(crtc_state);
 }
 
+static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+{
+       /*
+        * Possibly related to display WA #1184, SKL CSC loses the latched
+        * CSC coeff/offset register values if the CSC registers are disarmed
+        * between DC5 exit and PSR exit. This will cause the plane(s) to
+        * output all black (until CSC_MODE is rearmed and properly latched).
+        * Once PSR exit (and proper register latching) has occurred the
+        * danger is over. Thus when PSR is enabled the CSC coeff/offset
+        * register programming will be peformed from skl_color_commit_arm()
+        * which is called after PSR exit.
+        */
+       if (!crtc_state->has_psr)
+               ilk_load_csc_matrix(crtc_state);
+}
+
 static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
 {
        ilk_load_csc_matrix(crtc_state);
@@ -659,6 +688,9 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
        enum pipe pipe = crtc->pipe;
        u32 val = 0;
 
+       if (crtc_state->has_psr)
+               ilk_load_csc_matrix(crtc_state);
+
        /*
         * We don't (yet) allow userspace to control the pipe background color,
         * so force it to black, but apply pipe gamma and CSC appropriately
@@ -677,6 +709,47 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
                          crtc_state->csc_mode);
 }
 
+static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
+
+       /*
+        * We don't (yet) allow userspace to control the pipe background color,
+        * so force it to black.
+        */
+       intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0);
+
+       intel_de_write(i915, GAMMA_MODE(crtc->pipe),
+                      crtc_state->gamma_mode);
+
+       intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
+                         crtc_state->csc_mode);
+}
+
+static void icl_color_post_update(const struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+       /*
+        * Despite Wa_1406463849, ICL CSC is no longer disarmed by
+        * coeff/offset register *writes*. Instead, once CSC_MODE
+        * is armed it stays armed, even after it has been latched.
+        * Afterwards the coeff/offset registers become effectively
+        * self-arming. That self-arming must be disabled before the
+        * next icl_color_commit_noarm() tries to write the next set
+        * of coeff/offset registers. Fortunately register *reads*
+        * do still disarm the CSC. Naturally this must not be done
+        * until the previously written CSC registers have actually
+        * been latched.
+        *
+        * TGL+ no longer need this workaround.
+        */
+       intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(crtc->pipe));
+}
+
 static struct drm_property_blob *
 create_linear_lut(struct drm_i915_private *i915, int lut_size)
 {
@@ -1373,6 +1446,14 @@ void intel_color_commit_arm(const struct intel_crtc_state *crtc_state)
        i915->display.funcs.color->color_commit_arm(crtc_state);
 }
 
+void intel_color_post_update(const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+       if (i915->display.funcs.color->color_post_update)
+               i915->display.funcs.color->color_post_update(crtc_state);
+}
+
 void intel_color_prepare_commit(struct intel_crtc_state *crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -3064,10 +3145,20 @@ static const struct intel_color_funcs i9xx_color_funcs = {
        .lut_equal = i9xx_lut_equal,
 };
 
+static const struct intel_color_funcs tgl_color_funcs = {
+       .color_check = icl_color_check,
+       .color_commit_noarm = icl_color_commit_noarm,
+       .color_commit_arm = icl_color_commit_arm,
+       .load_luts = icl_load_luts,
+       .read_luts = icl_read_luts,
+       .lut_equal = icl_lut_equal,
+};
+
 static const struct intel_color_funcs icl_color_funcs = {
        .color_check = icl_color_check,
        .color_commit_noarm = icl_color_commit_noarm,
-       .color_commit_arm = skl_color_commit_arm,
+       .color_commit_arm = icl_color_commit_arm,
+       .color_post_update = icl_color_post_update,
        .load_luts = icl_load_luts,
        .read_luts = icl_read_luts,
        .lut_equal = icl_lut_equal,
@@ -3075,7 +3166,7 @@ static const struct intel_color_funcs icl_color_funcs = {
 
 static const struct intel_color_funcs glk_color_funcs = {
        .color_check = glk_color_check,
-       .color_commit_noarm = ilk_color_commit_noarm,
+       .color_commit_noarm = skl_color_commit_noarm,
        .color_commit_arm = skl_color_commit_arm,
        .load_luts = glk_load_luts,
        .read_luts = glk_read_luts,
@@ -3084,7 +3175,7 @@ static const struct intel_color_funcs glk_color_funcs = {
 
 static const struct intel_color_funcs skl_color_funcs = {
        .color_check = ivb_color_check,
-       .color_commit_noarm = ilk_color_commit_noarm,
+       .color_commit_noarm = skl_color_commit_noarm,
        .color_commit_arm = skl_color_commit_arm,
        .load_luts = bdw_load_luts,
        .read_luts = bdw_read_luts,
@@ -3180,7 +3271,9 @@ void intel_color_init_hooks(struct drm_i915_private *i915)
                else
                        i915->display.funcs.color = &i9xx_color_funcs;
        } else {
-               if (DISPLAY_VER(i915) >= 11)
+               if (DISPLAY_VER(i915) >= 12)
+                       i915->display.funcs.color = &tgl_color_funcs;
+               else if (DISPLAY_VER(i915) == 11)
                        i915->display.funcs.color = &icl_color_funcs;
                else if (DISPLAY_VER(i915) == 10)
                        i915->display.funcs.color = &glk_color_funcs;
index d620b5b..8002492 100644 (file)
@@ -21,6 +21,7 @@ void intel_color_prepare_commit(struct intel_crtc_state *crtc_state);
 void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state);
 void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state);
 void intel_color_commit_arm(const struct intel_crtc_state *crtc_state);
+void intel_color_post_update(const struct intel_crtc_state *crtc_state);
 void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
 void intel_color_get_config(struct intel_crtc_state *crtc_state);
 bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
index 208b1b5..63b4b73 100644 (file)
@@ -1209,6 +1209,9 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
        if (needs_cursorclk_wa(old_crtc_state) &&
            !needs_cursorclk_wa(new_crtc_state))
                icl_wa_cursorclkgating(dev_priv, pipe, false);
+
+       if (intel_crtc_needs_color_update(new_crtc_state))
+               intel_color_post_update(new_crtc_state);
 }
 
 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
@@ -7091,6 +7094,8 @@ static void intel_update_crtc(struct intel_atomic_state *state,
 
        intel_fbc_update(state, crtc);
 
+       drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
+
        if (!modeset &&
            intel_crtc_needs_color_update(new_crtc_state))
                intel_color_commit_noarm(new_crtc_state);
@@ -7458,8 +7463,28 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
        drm_atomic_helper_wait_for_dependencies(&state->base);
        drm_dp_mst_atomic_wait_for_dependencies(&state->base);
 
-       if (state->modeset)
-               wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+       /*
+        * During full modesets we write a lot of registers, wait
+        * for PLLs, etc. Doing that while DC states are enabled
+        * is not a good idea.
+        *
+        * During fastsets and other updates we also need to
+        * disable DC states due to the following scenario:
+        * 1. DC5 exit and PSR exit happen
+        * 2. Some or all _noarm() registers are written
+        * 3. Due to some long delay PSR is re-entered
+        * 4. DC5 entry -> DMC saves the already written new
+        *    _noarm() registers and the old not yet written
+        *    _arm() registers
+        * 5. DC5 exit -> DMC restores a mixture of old and
+        *    new register values and arms the update
+        * 6. PSR exit -> hardware latches a mixture of old and
+        *    new register values -> corrupted frame, or worse
+        * 7. New _arm() registers are finally written
+        * 8. Hardware finally latches a complete set of new
+        *    register values, and subsequent frames will be OK again
+        */
+       wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
 
        intel_atomic_prepare_plane_clear_colors(state);
 
@@ -7608,8 +7633,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
                 * the culprit.
                 */
                intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
-               intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
        }
+       intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
        intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
 
        /*
index 2106b3d..7c9b328 100644 (file)
@@ -232,7 +232,7 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
                        return slots;
        }
 
-       intel_link_compute_m_n(crtc_state->pipe_bpp,
+       intel_link_compute_m_n(crtc_state->dsc.compressed_bpp,
                               crtc_state->lane_count,
                               adjusted_mode->crtc_clock,
                               crtc_state->port_clock,
index ad1a37b..2a9f40a 100644 (file)
@@ -301,6 +301,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
        vm->pte_encode = gen8_ggtt_pte_encode;
 
        dpt->obj = dpt_obj;
+       dpt->obj->is_dpt = true;
 
        return &dpt->vm;
 }
@@ -309,5 +310,6 @@ void intel_dpt_destroy(struct i915_address_space *vm)
 {
        struct i915_dpt *dpt = i915_vm_to_dpt(vm);
 
+       dpt->obj->is_dpt = false;
        i915_vm_put(&dpt->vm);
 }
index f453287..be510b9 100644 (file)
@@ -418,9 +418,9 @@ static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
        val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
        if (val == 0xffffffff) {
                drm_dbg_kms(&i915->drm,
-                           "Port %s: PHY in TCCOLD, assume safe mode\n",
+                           "Port %s: PHY in TCCOLD, assume not owned\n",
                            dig_port->tc_port_name);
-               return true;
+               return false;
        }
 
        return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
index 8949fb0..3198b64 100644 (file)
@@ -127,7 +127,8 @@ i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
 
        memcpy(map, data, size);
 
-       i915_gem_object_unpin_map(obj);
+       i915_gem_object_flush_map(obj);
+       __i915_gem_object_release_map(obj);
 
        return obj;
 }
index f9a8acb..885ccde 100644 (file)
@@ -303,7 +303,7 @@ i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
 static inline bool
 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
 {
-       return READ_ONCE(obj->frontbuffer);
+       return READ_ONCE(obj->frontbuffer) || obj->is_dpt;
 }
 
 static inline unsigned int
index 19c9bdd..5dcbbef 100644 (file)
@@ -491,6 +491,9 @@ struct drm_i915_gem_object {
         */
        unsigned int cache_dirty:1;
 
+       /* @is_dpt: Object houses a display page table (DPT) */
+       unsigned int is_dpt:1;
+
        /**
         * @read_domains: Read memory domains.
         *
index 7420276..4758f21 100644 (file)
@@ -1067,11 +1067,12 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
                        .interruptible = true,
                        .no_wait_gpu = true, /* should be idle already */
                };
+               int err;
 
                GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
 
-               ret = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
-               if (ret) {
+               err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
+               if (err) {
                        dma_resv_unlock(bo->base.resv);
                        return VM_FAULT_SIGBUS;
                }
index 1bbe670..7503264 100644 (file)
@@ -2018,6 +2018,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
         * inspecting the queue to see if we need to resumbit.
         */
        if (*prev != *execlists->active) { /* elide lite-restores */
+               struct intel_context *prev_ce = NULL, *active_ce = NULL;
+
                /*
                 * Note the inherent discrepancy between the HW runtime,
                 * recorded as part of the context switch, and the CPU
@@ -2029,9 +2031,15 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
                 * and correct overselves later when updating from HW.
                 */
                if (*prev)
-                       lrc_runtime_stop((*prev)->context);
+                       prev_ce = (*prev)->context;
                if (*execlists->active)
-                       lrc_runtime_start((*execlists->active)->context);
+                       active_ce = (*execlists->active)->context;
+               if (prev_ce != active_ce) {
+                       if (prev_ce)
+                               lrc_runtime_stop(prev_ce);
+                       if (active_ce)
+                               lrc_runtime_start(active_ce);
+               }
                new_timeslice(execlists);
        }
 
index f5d7b51..2c92fa9 100644 (file)
@@ -2075,16 +2075,6 @@ void intel_rps_sanitize(struct intel_rps *rps)
                rps_disable_interrupts(rps);
 }
 
-u32 intel_rps_read_rpstat_fw(struct intel_rps *rps)
-{
-       struct drm_i915_private *i915 = rps_to_i915(rps);
-       i915_reg_t rpstat;
-
-       rpstat = (GRAPHICS_VER(i915) >= 12) ? GEN12_RPSTAT1 : GEN6_RPSTAT1;
-
-       return intel_uncore_read_fw(rps_to_gt(rps)->uncore, rpstat);
-}
-
 u32 intel_rps_read_rpstat(struct intel_rps *rps)
 {
        struct drm_i915_private *i915 = rps_to_i915(rps);
@@ -2095,7 +2085,7 @@ u32 intel_rps_read_rpstat(struct intel_rps *rps)
        return intel_uncore_read(rps_to_gt(rps)->uncore, rpstat);
 }
 
-u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
+static u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
 {
        struct drm_i915_private *i915 = rps_to_i915(rps);
        u32 cagf;
@@ -2118,10 +2108,11 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
        return cagf;
 }
 
-static u32 read_cagf(struct intel_rps *rps)
+static u32 __read_cagf(struct intel_rps *rps, bool take_fw)
 {
        struct drm_i915_private *i915 = rps_to_i915(rps);
        struct intel_uncore *uncore = rps_to_uncore(rps);
+       i915_reg_t r = INVALID_MMIO_REG;
        u32 freq;
 
        /*
@@ -2129,22 +2120,30 @@ static u32 read_cagf(struct intel_rps *rps)
         * registers will return 0 freq when GT is in RC6
         */
        if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
-               freq = intel_uncore_read(uncore, MTL_MIRROR_TARGET_WP1);
+               r = MTL_MIRROR_TARGET_WP1;
        } else if (GRAPHICS_VER(i915) >= 12) {
-               freq = intel_uncore_read(uncore, GEN12_RPSTAT1);
+               r = GEN12_RPSTAT1;
        } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
                vlv_punit_get(i915);
                freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
                vlv_punit_put(i915);
        } else if (GRAPHICS_VER(i915) >= 6) {
-               freq = intel_uncore_read(uncore, GEN6_RPSTAT1);
+               r = GEN6_RPSTAT1;
        } else {
-               freq = intel_uncore_read(uncore, MEMSTAT_ILK);
+               r = MEMSTAT_ILK;
        }
 
+       if (i915_mmio_reg_valid(r))
+               freq = take_fw ? intel_uncore_read(uncore, r) : intel_uncore_read_fw(uncore, r);
+
        return intel_rps_get_cagf(rps, freq);
 }
 
+static u32 read_cagf(struct intel_rps *rps)
+{
+       return __read_cagf(rps, true);
+}
+
 u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
 {
        struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
@@ -2157,7 +2156,12 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
        return freq;
 }
 
-u32 intel_rps_read_punit_req(struct intel_rps *rps)
+u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps)
+{
+       return intel_gpu_freq(rps, __read_cagf(rps, false));
+}
+
+static u32 intel_rps_read_punit_req(struct intel_rps *rps)
 {
        struct intel_uncore *uncore = rps_to_uncore(rps);
        struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
index c622962..a3fa987 100644 (file)
@@ -37,8 +37,8 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
 
 int intel_gpu_freq(struct intel_rps *rps, int val);
 int intel_freq_opcode(struct intel_rps *rps, int val);
-u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
 u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
+u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps);
 u32 intel_rps_get_requested_frequency(struct intel_rps *rps);
 u32 intel_rps_get_min_frequency(struct intel_rps *rps);
 u32 intel_rps_get_min_raw_freq(struct intel_rps *rps);
@@ -49,10 +49,8 @@ int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val);
 u32 intel_rps_get_rp0_frequency(struct intel_rps *rps);
 u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
 u32 intel_rps_get_rpn_frequency(struct intel_rps *rps);
-u32 intel_rps_read_punit_req(struct intel_rps *rps);
 u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps);
 u32 intel_rps_read_rpstat(struct intel_rps *rps);
-u32 intel_rps_read_rpstat_fw(struct intel_rps *rps);
 void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps);
 void intel_rps_raise_unslice(struct intel_rps *rps);
 void intel_rps_lower_unslice(struct intel_rps *rps);
index 410905d..0c103ca 100644 (file)
@@ -235,6 +235,13 @@ static void delayed_huc_load_fini(struct intel_huc *huc)
        i915_sw_fence_fini(&huc->delayed_load.fence);
 }
 
+int intel_huc_sanitize(struct intel_huc *huc)
+{
+       delayed_huc_load_complete(huc);
+       intel_uc_fw_sanitize(&huc->fw);
+       return 0;
+}
+
 static bool vcs_supported(struct intel_gt *gt)
 {
        intel_engine_mask_t mask = gt->info.engine_mask;
index 52db036..db555b3 100644 (file)
@@ -41,6 +41,7 @@ struct intel_huc {
        } delayed_load;
 };
 
+int intel_huc_sanitize(struct intel_huc *huc);
 void intel_huc_init_early(struct intel_huc *huc);
 int intel_huc_init(struct intel_huc *huc);
 void intel_huc_fini(struct intel_huc *huc);
@@ -54,12 +55,6 @@ bool intel_huc_is_authenticated(struct intel_huc *huc);
 void intel_huc_register_gsc_notifier(struct intel_huc *huc, struct bus_type *bus);
 void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, struct bus_type *bus);
 
-static inline int intel_huc_sanitize(struct intel_huc *huc)
-{
-       intel_uc_fw_sanitize(&huc->fw);
-       return 0;
-}
-
 static inline bool intel_huc_is_supported(struct intel_huc *huc)
 {
        return intel_uc_fw_is_supported(&huc->fw);
index 824a34e..0040749 100644 (file)
@@ -1592,9 +1592,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
        /*
         * Wa_16011777198:dg2: Unset the override of GUCRC mode to enable rc6.
         */
-       if (intel_uc_uses_guc_rc(&gt->uc) &&
-           (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
-            IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)))
+       if (stream->override_gucrc)
                drm_WARN_ON(&gt->i915->drm,
                            intel_guc_slpc_unset_gucrc_mode(&gt->uc.guc.slpc));
 
@@ -3305,8 +3303,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
                if (ret) {
                        drm_dbg(&stream->perf->i915->drm,
                                "Unable to override gucrc mode\n");
-                       goto err_config;
+                       goto err_gucrc;
                }
+
+               stream->override_gucrc = true;
        }
 
        ret = alloc_oa_buffer(stream);
@@ -3345,11 +3345,15 @@ err_enable:
        free_oa_buffer(stream);
 
 err_oa_buf_alloc:
-       free_oa_configs(stream);
+       if (stream->override_gucrc)
+               intel_guc_slpc_unset_gucrc_mode(&gt->uc.guc.slpc);
 
+err_gucrc:
        intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
        intel_engine_pm_put(stream->engine);
 
+       free_oa_configs(stream);
+
 err_config:
        free_noa_wait(stream);
 
@@ -4634,13 +4638,13 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
                err = oa_config->id;
                goto sysfs_err;
        }
-
-       mutex_unlock(&perf->metrics_lock);
+       id = oa_config->id;
 
        drm_dbg(&perf->i915->drm,
                "Added config %s id=%i\n", oa_config->uuid, oa_config->id);
+       mutex_unlock(&perf->metrics_lock);
 
-       return oa_config->id;
+       return id;
 
 sysfs_err:
        mutex_unlock(&perf->metrics_lock);
index ca150b7..4d5d8c3 100644 (file)
@@ -316,6 +316,12 @@ struct i915_perf_stream {
         * buffer should be checked for available data.
         */
        u64 poll_oa_period;
+
+       /**
+        * @override_gucrc: GuC RC has been overridden for the perf stream,
+        * and we need to restore the default configuration on release.
+        */
+       bool override_gucrc;
 };
 
 /**
index 52531ab..6d422b0 100644 (file)
@@ -393,14 +393,12 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
                 * case we assume the system is running at the intended
                 * frequency. Fortunately, the read should rarely fail!
                 */
-               val = intel_rps_read_rpstat_fw(rps);
-               if (val)
-                       val = intel_rps_get_cagf(rps, val);
-               else
-                       val = rps->cur_freq;
+               val = intel_rps_read_actual_frequency_fw(rps);
+               if (!val)
+                       val = intel_gpu_freq(rps, rps->cur_freq);
 
                add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
-                               intel_gpu_freq(rps, val), period_ns / 1000);
+                               val, period_ns / 1000);
        }
 
        if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) {
index ed9d374..5bb777f 100644 (file)
@@ -363,6 +363,35 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder,
        return 0;
 }
 
+static void
+nv50_outp_atomic_fix_depth(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state)
+{
+       struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct drm_display_mode *mode = &asyh->state.adjusted_mode;
+       unsigned int max_rate, mode_rate;
+
+       switch (nv_encoder->dcb->type) {
+       case DCB_OUTPUT_DP:
+               max_rate = nv_encoder->dp.link_nr * nv_encoder->dp.link_bw;
+
+               /* we don't support more than 10 anyway */
+               asyh->or.bpc = min_t(u8, asyh->or.bpc, 10);
+
+               /* reduce the bpc until it works out */
+               while (asyh->or.bpc > 6) {
+                       mode_rate = DIV_ROUND_UP(mode->clock * asyh->or.bpc * 3, 8);
+                       if (mode_rate <= max_rate)
+                               break;
+
+                       asyh->or.bpc -= 2;
+               }
+               break;
+       default:
+               break;
+       }
+}
+
 static int
 nv50_outp_atomic_check(struct drm_encoder *encoder,
                       struct drm_crtc_state *crtc_state,
@@ -381,6 +410,9 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
        if (crtc_state->mode_changed || crtc_state->connectors_changed)
                asyh->or.bpc = connector->display_info.bpc;
 
+       /* We might have to reduce the bpc */
+       nv50_outp_atomic_fix_depth(encoder, crtc_state);
+
        return 0;
 }
 
index 40409a2..91b5ecc 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/apple-gmux.h>
 #include <linux/backlight.h>
 #include <linux/idr.h>
+#include <drm/drm_probe_helper.h>
 
 #include "nouveau_drv.h"
 #include "nouveau_reg.h"
@@ -299,8 +300,12 @@ nv50_backlight_init(struct nouveau_backlight *bl,
        struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
        struct nvif_object *device = &drm->client.device.object;
 
+       /*
+        * Note when this runs the connectors have not been probed yet,
+        * so nv_conn->base.status is not set yet.
+        */
        if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)) ||
-           nv_conn->base.status != connector_status_connected)
+           drm_helper_probe_detect(&nv_conn->base, NULL, false) != connector_status_connected)
                return -ENODEV;
 
        if (nv_conn->type == DCB_CONNECTOR_eDP) {
index e00876f..d49b487 100644 (file)
@@ -263,8 +263,6 @@ nouveau_dp_irq(struct work_struct *work)
 }
 
 /* TODO:
- * - Use the minimum possible BPC here, once we add support for the max bpc
- *   property.
  * - Validate against the DP caps advertised by the GPU (we don't check these
  *   yet)
  */
@@ -276,7 +274,11 @@ nv50_dp_mode_valid(struct drm_connector *connector,
 {
        const unsigned int min_clock = 25000;
        unsigned int max_rate, mode_rate, ds_max_dotclock, clock = mode->clock;
-       const u8 bpp = connector->display_info.bpc * 3;
+       /* Check with the minmum bpc always, so we can advertise better modes.
+        * In particlar not doing this causes modes to be dropped on HDR
+        * displays as we might check with a bpc of 16 even.
+        */
+       const u8 bpp = 6 * 3;
 
        if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
                return MODE_NO_INTERLACE;
index 76678dd..c4c6f67 100644 (file)
@@ -31,6 +31,7 @@ gf108_fb = {
        .init = gf100_fb_init,
        .init_page = gf100_fb_init_page,
        .intr = gf100_fb_intr,
+       .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
        .ram_new = gf108_ram_new,
        .default_bigpage = 17,
 };
index f73442c..433fa96 100644 (file)
@@ -77,6 +77,7 @@ gk104_fb = {
        .init = gf100_fb_init,
        .init_page = gf100_fb_init_page,
        .intr = gf100_fb_intr,
+       .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
        .ram_new = gk104_ram_new,
        .default_bigpage = 17,
        .clkgate_pack = gk104_fb_clkgate_pack,
index 45d6cdf..4dc283d 100644 (file)
@@ -59,6 +59,7 @@ gk110_fb = {
        .init = gf100_fb_init,
        .init_page = gf100_fb_init_page,
        .intr = gf100_fb_intr,
+       .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
        .ram_new = gk104_ram_new,
        .default_bigpage = 17,
        .clkgate_pack = gk110_fb_clkgate_pack,
index de52462..90bfff6 100644 (file)
@@ -31,6 +31,7 @@ gm107_fb = {
        .init = gf100_fb_init,
        .init_page = gf100_fb_init_page,
        .intr = gf100_fb_intr,
+       .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
        .ram_new = gm107_ram_new,
        .default_bigpage = 17,
 };
index 666a5e5..e961fa2 100644 (file)
@@ -504,6 +504,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
                if (IS_ERR(pages[i])) {
                        mutex_unlock(&bo->base.pages_lock);
                        ret = PTR_ERR(pages[i]);
+                       pages[i] = NULL;
                        goto err_pages;
                }
        }
index 15d04a0..e0a8890 100644 (file)
@@ -507,12 +507,19 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
 {
        struct drm_sched_entity *entity = sched_job->entity;
        bool first;
+       ktime_t submit_ts;
 
        trace_drm_sched_job(sched_job, entity);
        atomic_inc(entity->rq->sched->score);
        WRITE_ONCE(entity->last_user, current->group_leader);
+
+       /*
+        * After the sched_job is pushed into the entity queue, it may be
+        * completed and freed up at any time. We can no longer access it.
+        * Make sure to set the submit_ts first, to avoid a race.
+        */
+       sched_job->submit_ts = submit_ts = ktime_get();
        first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
-       sched_job->submit_ts = ktime_get();
 
        /* first job wakes up scheduler */
        if (first) {
@@ -529,7 +536,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
                spin_unlock(&entity->rq_lock);
 
                if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
-                       drm_sched_rq_update_fifo(entity, sched_job->submit_ts);
+                       drm_sched_rq_update_fifo(entity, submit_ts);
 
                drm_sched_wakeup(entity->rq->sched);
        }
index 4e6ad6e..0e43784 100644 (file)
@@ -906,12 +906,6 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
 
        spin_unlock(&sched->job_list_lock);
 
-       if (job) {
-               job->entity->elapsed_ns += ktime_to_ns(
-                       ktime_sub(job->s_fence->finished.timestamp,
-                                 job->s_fence->scheduled.timestamp));
-       }
-
        return job;
 }
 
index f8ee714..09ee6f6 100644 (file)
@@ -89,7 +89,8 @@ static int check_block(struct kunit *test, struct drm_buddy *mm,
                err = -EINVAL;
        }
 
-       if (!is_power_of_2(block_size)) {
+       /* We can't use is_power_of_2() for a u64 on 32-bit systems. */
+       if (block_size & (block_size - 1)) {
                kunit_err(test, "block size not power of two\n");
                err = -EINVAL;
        }
index 82f64fb..4ce012f 100644 (file)
@@ -1122,7 +1122,7 @@ config HID_TOPRE
        tristate "Topre REALFORCE keyboards"
        depends on HID
        help
-         Say Y for N-key rollover support on Topre REALFORCE R2 108 key keyboards.
+         Say Y for N-key rollover support on Topre REALFORCE R2 108/87 key keyboards.
 
 config HID_THINGM
        tristate "ThingM blink(1) USB RGB LED"
index 63545cd..c2e9b6d 100644 (file)
 #define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN   0x261A
 #define I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN  0x2A1C
 #define I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN     0x279F
+#define I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100        0x29F5
+#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1     0x2BED
+#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2     0x2BEE
 
 #define USB_VENDOR_ID_ELECOM           0x056e
 #define USB_DEVICE_ID_ELECOM_BM084     0x0061
 
 #define USB_VENDOR_ID_TOPRE                    0x0853
 #define USB_DEVICE_ID_TOPRE_REALFORCE_R2_108                   0x0148
+#define USB_DEVICE_ID_TOPRE_REALFORCE_R2_87                    0x0146
 
 #define USB_VENDOR_ID_TOPSEED          0x0766
 #define USB_DEVICE_ID_TOPSEED_CYBERLINK        0x0204
index 7fc9679..5c65a58 100644 (file)
@@ -398,6 +398,12 @@ static const struct hid_device_id hid_battery_quirks[] = {
          HID_BATTERY_QUIRK_IGNORE },
        { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN),
          HID_BATTERY_QUIRK_IGNORE },
+       { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_13T_AW100),
+         HID_BATTERY_QUIRK_IGNORE },
+       { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1),
+         HID_BATTERY_QUIRK_IGNORE },
+       { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2),
+         HID_BATTERY_QUIRK_IGNORE },
        {}
 };
 
index 3e3f89e..d853987 100644 (file)
@@ -940,7 +940,7 @@ hid_sensor_register_platform_device(struct platform_device *pdev,
                                    struct hid_sensor_hub_device *hsdev,
                                    const struct hid_sensor_custom_match *match)
 {
-       char real_usage[HID_SENSOR_USAGE_LENGTH];
+       char real_usage[HID_SENSOR_USAGE_LENGTH] = { 0 };
        struct platform_device *custom_pdev;
        const char *dev_name;
        char *c;
index 88a91cd..d1d5ca3 100644 (file)
@@ -36,6 +36,8 @@ static __u8 *topre_report_fixup(struct hid_device *hdev, __u8 *rdesc,
 static const struct hid_device_id topre_id_table[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_TOPRE,
                         USB_DEVICE_ID_TOPRE_REALFORCE_R2_108) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_TOPRE,
+                        USB_DEVICE_ID_TOPRE_REALFORCE_R2_87) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, topre_id_table);
index 81385ab..7fc738a 100644 (file)
@@ -241,8 +241,8 @@ static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
        struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
        struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv);
 
-       return guid_equal(&driver->id[0].guid,
-                         &device->fw_client->props.protocol_name);
+       return(device->fw_client ? guid_equal(&driver->id[0].guid,
+              &device->fw_client->props.protocol_name) : 0);
 }
 
 /**
index 9dc27e5..da51b50 100644 (file)
@@ -409,6 +409,10 @@ void vmbus_disconnect(void)
  */
 struct vmbus_channel *relid2channel(u32 relid)
 {
+       if (vmbus_connection.channels == NULL) {
+               pr_warn_once("relid2channel: relid=%d: No channels mapped!\n", relid);
+               return NULL;
+       }
        if (WARN_ON(relid >= MAX_CHANNEL_RELIDS))
                return NULL;
        return READ_ONCE(vmbus_connection.channels[relid]);
index 1ea8f17..4c15fae 100644 (file)
@@ -472,7 +472,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
                if (etm4x_sspcicrn_present(drvdata, i))
                        etm4x_relaxed_write32(csa, config->ss_pe_cmp[i], TRCSSPCICRn(i));
        }
-       for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+       for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
                etm4x_relaxed_write64(csa, config->addr_val[i], TRCACVRn(i));
                etm4x_relaxed_write64(csa, config->addr_acc[i], TRCACATRn(i));
        }
@@ -1070,25 +1070,21 @@ static bool etm4_init_iomem_access(struct etmv4_drvdata *drvdata,
                                   struct csdev_access *csa)
 {
        u32 devarch = readl_relaxed(drvdata->base + TRCDEVARCH);
-       u32 idr1 = readl_relaxed(drvdata->base + TRCIDR1);
 
        /*
         * All ETMs must implement TRCDEVARCH to indicate that
-        * the component is an ETMv4. To support any broken
-        * implementations we fall back to TRCIDR1 check, which
-        * is not really reliable.
+        * the component is an ETMv4. Even though TRCIDR1 also
+        * contains the information, it is part of the "Trace"
+        * register and must be accessed with the OSLK cleared,
+        * with MMIO. But we cannot touch the OSLK until we are
+        * sure this is an ETM. So rely only on the TRCDEVARCH.
         */
-       if ((devarch & ETM_DEVARCH_ID_MASK) == ETM_DEVARCH_ETMv4x_ARCH) {
-               drvdata->arch = etm_devarch_to_arch(devarch);
-       } else {
-               pr_warn("CPU%d: ETM4x incompatible TRCDEVARCH: %x, falling back to TRCIDR1\n",
-                       smp_processor_id(), devarch);
-
-               if (ETM_TRCIDR1_ARCH_MAJOR(idr1) != ETM_TRCIDR1_ARCH_ETMv4)
-                       return false;
-               drvdata->arch = etm_trcidr_to_arch(idr1);
+       if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH) {
+               pr_warn_once("TRCDEVARCH doesn't match ETMv4 architecture\n");
+               return false;
        }
 
+       drvdata->arch = etm_devarch_to_arch(devarch);
        *csa = CSDEV_ACCESS_IOMEM(drvdata->base);
        return true;
 }
index 434f4e9..27c8a99 100644 (file)
  * TRCDEVARCH  - CoreSight architected register
  *                - Bits[15:12] - Major version
  *                - Bits[19:16] - Minor version
- * TRCIDR1     - ETM architected register
- *                - Bits[11:8] - Major version
- *                - Bits[7:4]  - Minor version
- * We must rely on TRCDEVARCH for the version information,
- * however we don't want to break the support for potential
- * old implementations which might not implement it. Thus
- * we fall back to TRCIDR1 if TRCDEVARCH is not implemented
- * for memory mapped components.
+ *
+ * We must rely only on TRCDEVARCH for the version information. Even though,
+ * TRCIDR1 also provides the architecture version, it is a "Trace" register
+ * and as such must be accessed only with Trace power domain ON. This may
+ * not be available at probe time.
+ *
  * Now to make certain decisions easier based on the version
  * we use an internal representation of the version in the
  * driver, as follows :
@@ -786,12 +784,6 @@ static inline u8 etm_devarch_to_arch(u32 devarch)
                                ETM_DEVARCH_REVISION(devarch));
 }
 
-static inline u8 etm_trcidr_to_arch(u32 trcidr1)
-{
-       return ETM_ARCH_VERSION(ETM_TRCIDR1_ARCH_MAJOR(trcidr1),
-                               ETM_TRCIDR1_ARCH_MINOR(trcidr1));
-}
-
 enum etm_impdef_type {
        ETM4_IMPDEF_HISI_CORE_COMMIT,
        ETM4_IMPDEF_FEATURE_MAX,
index 09af759..b21ffd6 100644 (file)
@@ -48,9 +48,9 @@
  * SR_HOLD_TIME_XK_TICKS field will indicate the number of ticks of the
  * baud clock required to program 'Hold Time' at X KHz.
  */
-#define SR_HOLD_TIME_100K_TICKS        133
-#define SR_HOLD_TIME_400K_TICKS        20
-#define SR_HOLD_TIME_1000K_TICKS       11
+#define SR_HOLD_TIME_100K_TICKS                150
+#define SR_HOLD_TIME_400K_TICKS                20
+#define SR_HOLD_TIME_1000K_TICKS       12
 
 #define SMB_CORE_COMPLETION_REG_OFF3   (SMBUS_MAST_CORE_ADDR_BASE + 0x23)
 
  * the baud clock required to program 'fair idle delay' at X KHz. Fair idle
  * delay establishes the MCTP T(IDLE_DELAY) period.
  */
-#define FAIR_BUS_IDLE_MIN_100K_TICKS           969
-#define FAIR_BUS_IDLE_MIN_400K_TICKS           157
-#define FAIR_BUS_IDLE_MIN_1000K_TICKS          157
+#define FAIR_BUS_IDLE_MIN_100K_TICKS           992
+#define FAIR_BUS_IDLE_MIN_400K_TICKS           500
+#define FAIR_BUS_IDLE_MIN_1000K_TICKS          500
 
 /*
  * FAIR_IDLE_DELAY_XK_TICKS field will indicate the number of ticks of the
  * baud clock required to satisfy the fairness protocol at X KHz.
  */
-#define FAIR_IDLE_DELAY_100K_TICKS     1000
-#define FAIR_IDLE_DELAY_400K_TICKS     500
-#define FAIR_IDLE_DELAY_1000K_TICKS    500
+#define FAIR_IDLE_DELAY_100K_TICKS     963
+#define FAIR_IDLE_DELAY_400K_TICKS     156
+#define FAIR_IDLE_DELAY_1000K_TICKS    156
 
 #define SMB_IDLE_SCALING_100K          \
        ((FAIR_IDLE_DELAY_100K_TICKS << 16) | FAIR_BUS_IDLE_MIN_100K_TICKS)
  */
 #define BUS_CLK_100K_LOW_PERIOD_TICKS          156
 #define BUS_CLK_400K_LOW_PERIOD_TICKS          41
-#define BUS_CLK_1000K_LOW_PERIOD_TICKS 15
+#define BUS_CLK_1000K_LOW_PERIOD_TICKS         15
 
 /*
  * BUS_CLK_XK_HIGH_PERIOD_TICKS field defines the number of I2C Baud Clock
  */
 #define CLK_SYNC_100K                  4
 #define CLK_SYNC_400K                  4
-#define CLK_SYNC_1000K         4
+#define CLK_SYNC_1000K                 4
 
 #define SMB_CORE_DATA_TIMING_REG_OFF   (SMBUS_MAST_CORE_ADDR_BASE + 0x40)
 
  * determines the SCLK hold time following SDAT driven low during the first
  * START bit in a transfer.
  */
-#define FIRST_START_HOLD_100K_TICKS    22
-#define FIRST_START_HOLD_400K_TICKS    16
-#define FIRST_START_HOLD_1000K_TICKS   6
+#define FIRST_START_HOLD_100K_TICKS    23
+#define FIRST_START_HOLD_400K_TICKS    8
+#define FIRST_START_HOLD_1000K_TICKS   12
 
 /*
  * STOP_SETUP_XK_TICKS will indicate the number of ticks of the baud clock
  * required to program 'STOP_SETUP' timer at X KHz. This timer determines the
  * SDAT setup time from the rising edge of SCLK for a STOP condition.
  */
-#define STOP_SETUP_100K_TICKS          157
+#define STOP_SETUP_100K_TICKS          150
 #define STOP_SETUP_400K_TICKS          20
-#define STOP_SETUP_1000K_TICKS 12
+#define STOP_SETUP_1000K_TICKS         12
 
 /*
  * RESTART_SETUP_XK_TICKS will indicate the number of ticks of the baud clock
  * required to program 'RESTART_SETUP' timer at X KHz. This timer determines the
  * SDAT setup time from the rising edge of SCLK for a repeated START condition.
  */
-#define RESTART_SETUP_100K_TICKS       157
+#define RESTART_SETUP_100K_TICKS       156
 #define RESTART_SETUP_400K_TICKS       20
 #define RESTART_SETUP_1000K_TICKS      12
 
  * required to program 'DATA_HOLD' timer at X KHz. This timer determines the
  * SDAT hold time following SCLK driven low.
  */
-#define DATA_HOLD_100K_TICKS           2
+#define DATA_HOLD_100K_TICKS           12
 #define DATA_HOLD_400K_TICKS           2
 #define DATA_HOLD_1000K_TICKS          2
 
  * Bus Idle Minimum time = BUS_IDLE_MIN[7:0] x Baud_Clock_Period x
  * (BUS_IDLE_MIN_XK_TICKS[7] ? 4,1)
  */
-#define BUS_IDLE_MIN_100K_TICKS                167UL
-#define BUS_IDLE_MIN_400K_TICKS                139UL
-#define BUS_IDLE_MIN_1000K_TICKS               133UL
+#define BUS_IDLE_MIN_100K_TICKS                36UL
+#define BUS_IDLE_MIN_400K_TICKS                10UL
+#define BUS_IDLE_MIN_1000K_TICKS       4UL
 
 /*
  * CTRL_CUM_TIME_OUT_XK_TICKS defines SMBus Controller Cumulative Time-Out.
  * SMBus Controller Cumulative Time-Out duration =
  * CTRL_CUM_TIME_OUT_XK_TICKS[7:0] x Baud_Clock_Period x 2048
  */
-#define CTRL_CUM_TIME_OUT_100K_TICKS           159
-#define CTRL_CUM_TIME_OUT_400K_TICKS           159
-#define CTRL_CUM_TIME_OUT_1000K_TICKS          159
+#define CTRL_CUM_TIME_OUT_100K_TICKS           76
+#define CTRL_CUM_TIME_OUT_400K_TICKS           76
+#define CTRL_CUM_TIME_OUT_1000K_TICKS          76
 
 /*
  * TARGET_CUM_TIME_OUT_XK_TICKS defines SMBus Target Cumulative Time-Out duration.
  * SMBus Target Cumulative Time-Out duration = TARGET_CUM_TIME_OUT_XK_TICKS[7:0] x
  * Baud_Clock_Period x 4096
  */
-#define TARGET_CUM_TIME_OUT_100K_TICKS 199
-#define TARGET_CUM_TIME_OUT_400K_TICKS 199
-#define TARGET_CUM_TIME_OUT_1000K_TICKS        199
+#define TARGET_CUM_TIME_OUT_100K_TICKS 95
+#define TARGET_CUM_TIME_OUT_400K_TICKS 95
+#define TARGET_CUM_TIME_OUT_1000K_TICKS        95
 
 /*
  * CLOCK_HIGH_TIME_OUT_XK defines Clock High time out period.
  * Clock High time out period = CLOCK_HIGH_TIME_OUT_XK[7:0] x Baud_Clock_Period x 8
  */
-#define CLOCK_HIGH_TIME_OUT_100K_TICKS 204
-#define CLOCK_HIGH_TIME_OUT_400K_TICKS 204
-#define CLOCK_HIGH_TIME_OUT_1000K_TICKS        204
+#define CLOCK_HIGH_TIME_OUT_100K_TICKS 97
+#define CLOCK_HIGH_TIME_OUT_400K_TICKS 97
+#define CLOCK_HIGH_TIME_OUT_1000K_TICKS        97
 
 #define TO_SCALING_100K                \
        ((BUS_IDLE_MIN_100K_TICKS << 24) | (CTRL_CUM_TIME_OUT_100K_TICKS << 16) | \
index a0af027..2e57585 100644 (file)
@@ -342,18 +342,18 @@ static int ocores_poll_wait(struct ocores_i2c *i2c)
  * ocores_isr(), we just add our polling code around it.
  *
  * It can run in atomic context
+ *
+ * Return: 0 on success, -ETIMEDOUT on timeout
  */
-static void ocores_process_polling(struct ocores_i2c *i2c)
+static int ocores_process_polling(struct ocores_i2c *i2c)
 {
-       while (1) {
-               irqreturn_t ret;
-               int err;
+       irqreturn_t ret;
+       int err = 0;
 
+       while (1) {
                err = ocores_poll_wait(i2c);
-               if (err) {
-                       i2c->state = STATE_ERROR;
+               if (err)
                        break; /* timeout */
-               }
 
                ret = ocores_isr(-1, i2c);
                if (ret == IRQ_NONE)
@@ -364,13 +364,15 @@ static void ocores_process_polling(struct ocores_i2c *i2c)
                                        break;
                }
        }
+
+       return err;
 }
 
 static int ocores_xfer_core(struct ocores_i2c *i2c,
                            struct i2c_msg *msgs, int num,
                            bool polling)
 {
-       int ret;
+       int ret = 0;
        u8 ctrl;
 
        ctrl = oc_getreg(i2c, OCI2C_CONTROL);
@@ -388,15 +390,16 @@ static int ocores_xfer_core(struct ocores_i2c *i2c,
        oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_START);
 
        if (polling) {
-               ocores_process_polling(i2c);
+               ret = ocores_process_polling(i2c);
        } else {
-               ret = wait_event_timeout(i2c->wait,
-                                        (i2c->state == STATE_ERROR) ||
-                                        (i2c->state == STATE_DONE), HZ);
-               if (ret == 0) {
-                       ocores_process_timeout(i2c);
-                       return -ETIMEDOUT;
-               }
+               if (wait_event_timeout(i2c->wait,
+                                      (i2c->state == STATE_ERROR) ||
+                                      (i2c->state == STATE_DONE), HZ) == 0)
+                       ret = -ETIMEDOUT;
+       }
+       if (ret) {
+               ocores_process_timeout(i2c);
+               return ret;
        }
 
        return (i2c->state == STATE_DONE) ? num : -EIO;
index bce6b79..545436b 100644 (file)
@@ -178,6 +178,11 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
                        return NOTIFY_OK;
                }
 
+               /*
+                * Clear the flag before adding the device so that fw_devlink
+                * doesn't skip adding consumers to this device.
+                */
+               rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
                client = of_i2c_register_device(adap, rd->dn);
                if (IS_ERR(client)) {
                        dev_err(&adap->dev, "failed to create client for '%pOF'\n",
index f866859..1c3a723 100644 (file)
@@ -864,7 +864,7 @@ static irqreturn_t kx022a_trigger_handler(int irq, void *p)
        if (ret < 0)
                goto err_read;
 
-       iio_push_to_buffers_with_timestamp(idev, data->buffer, pf->timestamp);
+       iio_push_to_buffers_with_timestamp(idev, data->buffer, data->timestamp);
 err_read:
        iio_trigger_notify_done(idev->trig);
 
index fee8d12..86effe8 100644 (file)
@@ -253,7 +253,7 @@ static const struct ad_sigma_delta_info ad7791_sigma_delta_info = {
        .has_registers = true,
        .addr_shift = 4,
        .read_mask = BIT(3),
-       .irq_flags = IRQF_TRIGGER_LOW,
+       .irq_flags = IRQF_TRIGGER_FALLING,
 };
 
 static int ad7791_read_raw(struct iio_dev *indio_dev,
index 17370c5..ec198c6 100644 (file)
@@ -28,7 +28,6 @@ struct ltc2497_driverdata {
        struct ltc2497core_driverdata common_ddata;
        struct i2c_client *client;
        u32 recv_size;
-       u32 sub_lsb;
        /*
         * DMA (thus cache coherency maintenance) may require the
         * transfer buffers to live in their own cache lines.
@@ -65,10 +64,10 @@ static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
                 * equivalent to a sign extension.
                 */
                if (st->recv_size == 3) {
-                       *val = (get_unaligned_be24(st->data.d8) >> st->sub_lsb)
+                       *val = (get_unaligned_be24(st->data.d8) >> 6)
                                - BIT(ddata->chip_info->resolution + 1);
                } else {
-                       *val = (be32_to_cpu(st->data.d32) >> st->sub_lsb)
+                       *val = (be32_to_cpu(st->data.d32) >> 6)
                                - BIT(ddata->chip_info->resolution + 1);
                }
 
@@ -122,7 +121,6 @@ static int ltc2497_probe(struct i2c_client *client)
        st->common_ddata.chip_info = chip_info;
 
        resolution = chip_info->resolution;
-       st->sub_lsb = 31 - (resolution + 1);
        st->recv_size = BITS_TO_BYTES(resolution) + 1;
 
        return ltc2497core_probe(dev, indio_dev);
index b74b689..f6895bc 100644 (file)
@@ -414,13 +414,17 @@ static int max11410_sample(struct max11410_state *st, int *sample_raw,
                if (!ret)
                        return -ETIMEDOUT;
        } else {
+               int ret2;
+
                /* Wait for status register Conversion Ready flag */
-               ret = read_poll_timeout(max11410_read_reg, ret,
-                                       ret || (val & MAX11410_STATUS_CONV_READY_BIT),
+               ret = read_poll_timeout(max11410_read_reg, ret2,
+                                       ret2 || (val & MAX11410_STATUS_CONV_READY_BIT),
                                        5000, MAX11410_CONVERSION_TIMEOUT_MS * 1000,
                                        true, st, MAX11410_REG_STATUS, &val);
                if (ret)
                        return ret;
+               if (ret2)
+                       return ret2;
        }
 
        /* Read ADC Data */
@@ -851,17 +855,21 @@ static int max11410_init_vref(struct device *dev,
 
 static int max11410_calibrate(struct max11410_state *st, u32 cal_type)
 {
-       int ret, val;
+       int ret, ret2, val;
 
        ret = max11410_write_reg(st, MAX11410_REG_CAL_START, cal_type);
        if (ret)
                return ret;
 
        /* Wait for status register Calibration Ready flag */
-       return read_poll_timeout(max11410_read_reg, ret,
-                                ret || (val & MAX11410_STATUS_CAL_READY_BIT),
-                                50000, MAX11410_CALIB_TIMEOUT_MS * 1000, true,
-                                st, MAX11410_REG_STATUS, &val);
+       ret = read_poll_timeout(max11410_read_reg, ret2,
+                               ret2 || (val & MAX11410_STATUS_CAL_READY_BIT),
+                               50000, MAX11410_CALIB_TIMEOUT_MS * 1000, true,
+                               st, MAX11410_REG_STATUS, &val);
+       if (ret)
+               return ret;
+
+       return ret2;
 }
 
 static int max11410_self_calibrate(struct max11410_state *st)
index fd00034..849a697 100644 (file)
@@ -639,7 +639,7 @@ out:
 
 static int palmas_gpadc_remove(struct platform_device *pdev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(&pdev->dev);
+       struct iio_dev *indio_dev = dev_get_drvdata(&pdev->dev);
        struct palmas_gpadc *adc = iio_priv(indio_dev);
 
        if (adc->wakeup1_enable || adc->wakeup2_enable)
index e90c299..c2d5e06 100644 (file)
@@ -628,12 +628,20 @@ static int adc5_get_fw_channel_data(struct adc5_chip *adc,
                                    struct fwnode_handle *fwnode,
                                    const struct adc5_data *data)
 {
-       const char *name = fwnode_get_name(fwnode), *channel_name;
+       const char *channel_name;
+       char *name;
        u32 chan, value, varr[2];
        u32 sid = 0;
        int ret;
        struct device *dev = adc->dev;
 
+       name = devm_kasprintf(dev, GFP_KERNEL, "%pfwP", fwnode);
+       if (!name)
+               return -ENOMEM;
+
+       /* Cut the address part */
+       name[strchrnul(name, '@') - name] = '\0';
+
        ret = fwnode_property_read_u32(fwnode, "reg", &chan);
        if (ret) {
                dev_err(dev, "invalid channel number %s\n", name);
index 2cc9a9b..263fc3a 100644 (file)
@@ -634,6 +634,7 @@ static int ti_ads7950_probe(struct spi_device *spi)
        st->chip.label = dev_name(&st->spi->dev);
        st->chip.parent = &st->spi->dev;
        st->chip.owner = THIS_MODULE;
+       st->chip.can_sleep = true;
        st->chip.base = -1;
        st->chip.ngpio = TI_ADS7950_NUM_GPIOS;
        st->chip.get_direction = ti_ads7950_get_direction;
index 791dd99..18a64f7 100644 (file)
@@ -66,8 +66,8 @@ static int cio_dac_write_raw(struct iio_dev *indio_dev,
        if (mask != IIO_CHAN_INFO_RAW)
                return -EINVAL;
 
-       /* DAC can only accept up to a 16-bit value */
-       if ((unsigned int)val > 65535)
+       /* DAC can only accept up to a 12-bit value */
+       if ((unsigned int)val > 4095)
                return -EINVAL;
 
        priv->chan_out_states[chan->channel] = val;
index f1d7d4b..c2f9762 100644 (file)
@@ -47,6 +47,7 @@ config ADIS16480
        depends on SPI
        select IIO_ADIS_LIB
        select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+       select CRC32
        help
          Say yes here to build support for Analog Devices ADIS16375, ADIS16480,
          ADIS16485, ADIS16488 inertial sensors.
index 80c78bd..a7a080b 100644 (file)
@@ -203,24 +203,27 @@ static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
                                break;
                        }
 
+                       if (filp->f_flags & O_NONBLOCK) {
+                               if (!written)
+                                       ret = -EAGAIN;
+                               break;
+                       }
+
                        wait_woken(&wait, TASK_INTERRUPTIBLE,
                                        MAX_SCHEDULE_TIMEOUT);
                        continue;
                }
 
                ret = rb->access->write(rb, n - written, buf + written);
-               if (ret == 0 && (filp->f_flags & O_NONBLOCK))
-                       ret = -EAGAIN;
+               if (ret < 0)
+                       break;
 
-               if (ret > 0) {
-                       written += ret;
-                       if (written != n && !(filp->f_flags & O_NONBLOCK))
-                               continue;
-               }
-       } while (ret == 0);
+               written += ret;
+
+       } while (written != n);
        remove_wait_queue(&rb->pollq, &wait);
 
-       return ret < 0 ? ret : n;
+       return ret < 0 ? ret : written;
 }
 
 /**
index b1674a5..d4a34a3 100644 (file)
@@ -429,6 +429,14 @@ static const struct iio_info cm32181_info = {
        .attrs                  = &cm32181_attribute_group,
 };
 
+static void cm32181_unregister_dummy_client(void *data)
+{
+       struct i2c_client *client = data;
+
+       /* Unregister the dummy client */
+       i2c_unregister_device(client);
+}
+
 static int cm32181_probe(struct i2c_client *client)
 {
        struct device *dev = &client->dev;
@@ -460,6 +468,10 @@ static int cm32181_probe(struct i2c_client *client)
                client = i2c_acpi_new_device(dev, 1, &board_info);
                if (IS_ERR(client))
                        return PTR_ERR(client);
+
+               ret = devm_add_action_or_reset(dev, cm32181_unregister_dummy_client, client);
+               if (ret)
+                       return ret;
        }
 
        cm32181 = iio_priv(indio_dev);
index 6bdfce9..5c44a36 100644 (file)
@@ -208,7 +208,6 @@ static int vcnl4000_init(struct vcnl4000_data *data)
 
        data->rev = ret & 0xf;
        data->al_scale = 250000;
-       mutex_init(&data->vcnl4000_lock);
 
        return data->chip_spec->set_power_state(data, true);
 };
@@ -1367,6 +1366,8 @@ static int vcnl4000_probe(struct i2c_client *client)
        data->id = id->driver_data;
        data->chip_spec = &vcnl4000_chip_spec_cfg[data->id];
 
+       mutex_init(&data->vcnl4000_lock);
+
        ret = data->chip_spec->init(data);
        if (ret < 0)
                return ret;
index 3081559..6b9563d 100644 (file)
@@ -624,22 +624,11 @@ static inline unsigned short cma_family(struct rdma_id_private *id_priv)
        return id_priv->id.route.addr.src_addr.ss_family;
 }
 
-static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
+static int cma_set_default_qkey(struct rdma_id_private *id_priv)
 {
        struct ib_sa_mcmember_rec rec;
        int ret = 0;
 
-       if (id_priv->qkey) {
-               if (qkey && id_priv->qkey != qkey)
-                       return -EINVAL;
-               return 0;
-       }
-
-       if (qkey) {
-               id_priv->qkey = qkey;
-               return 0;
-       }
-
        switch (id_priv->id.ps) {
        case RDMA_PS_UDP:
        case RDMA_PS_IB:
@@ -659,6 +648,16 @@ static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
        return ret;
 }
 
+static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
+{
+       if (!qkey ||
+           (id_priv->qkey && (id_priv->qkey != qkey)))
+               return -EINVAL;
+
+       id_priv->qkey = qkey;
+       return 0;
+}
+
 static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
 {
        dev_addr->dev_type = ARPHRD_INFINIBAND;
@@ -1229,7 +1228,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
        *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
 
        if (id_priv->id.qp_type == IB_QPT_UD) {
-               ret = cma_set_qkey(id_priv, 0);
+               ret = cma_set_default_qkey(id_priv);
                if (ret)
                        return ret;
 
@@ -4569,7 +4568,10 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
        memset(&rep, 0, sizeof rep);
        rep.status = status;
        if (status == IB_SIDR_SUCCESS) {
-               ret = cma_set_qkey(id_priv, qkey);
+               if (qkey)
+                       ret = cma_set_qkey(id_priv, qkey);
+               else
+                       ret = cma_set_default_qkey(id_priv);
                if (ret)
                        return ret;
                rep.qp_num = id_priv->qp_num;
@@ -4774,9 +4776,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
        enum ib_gid_type gid_type;
        struct net_device *ndev;
 
-       if (!status)
-               status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
-       else
+       if (status)
                pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
                                     status);
 
@@ -4804,7 +4804,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
        }
 
        event->param.ud.qp_num = 0xFFFFFF;
-       event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
+       event->param.ud.qkey = id_priv->qkey;
 
 out:
        if (ndev)
@@ -4823,8 +4823,11 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
            READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)
                goto out;
 
-       cma_make_mc_event(status, id_priv, multicast, &event, mc);
-       ret = cma_cm_event_handler(id_priv, &event);
+       ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
+       if (!ret) {
+               cma_make_mc_event(status, id_priv, multicast, &event, mc);
+               ret = cma_cm_event_handler(id_priv, &event);
+       }
        rdma_destroy_ah_attr(&event.param.ud.ah_attr);
        WARN_ON(ret);
 
@@ -4877,9 +4880,11 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
        if (ret)
                return ret;
 
-       ret = cma_set_qkey(id_priv, 0);
-       if (ret)
-               return ret;
+       if (!id_priv->qkey) {
+               ret = cma_set_default_qkey(id_priv);
+               if (ret)
+                       return ret;
+       }
 
        cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
        rec.qkey = cpu_to_be32(id_priv->qkey);
@@ -4956,9 +4961,6 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
        cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
 
        ib.rec.pkey = cpu_to_be16(0xffff);
-       if (id_priv->id.ps == RDMA_PS_UDP)
-               ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
-
        if (dev_addr->bound_dev_if)
                ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
        if (!ndev)
@@ -4984,6 +4986,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
        if (err || !ib.rec.mtu)
                return err ?: -EINVAL;
 
+       if (!id_priv->qkey)
+               cma_set_default_qkey(id_priv);
+
        rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
                    &ib.rec.port_gid);
        INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
@@ -5009,6 +5014,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
                            READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED))
                return -EINVAL;
 
+       if (id_priv->id.qp_type != IB_QPT_UD)
+               return -EINVAL;
+
        mc = kzalloc(sizeof(*mc), GFP_KERNEL);
        if (!mc)
                return -ENOMEM;
index 11b1c16..b99b3cc 100644 (file)
@@ -532,6 +532,8 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
        else
                ret = device->ops.create_ah(ah, &init_attr, NULL);
        if (ret) {
+               if (ah->sgid_attr)
+                       rdma_put_gid_attr(ah->sgid_attr);
                kfree(ah);
                return ERR_PTR(ret);
        }
index cabd867..7bc3542 100644 (file)
@@ -65,7 +65,7 @@ static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
        [ERDMA_OP_LOCAL_INV] = IB_WC_LOCAL_INV,
        [ERDMA_OP_READ_WITH_INV] = IB_WC_RDMA_READ,
        [ERDMA_OP_ATOMIC_CAS] = IB_WC_COMP_SWAP,
-       [ERDMA_OP_ATOMIC_FAD] = IB_WC_FETCH_ADD,
+       [ERDMA_OP_ATOMIC_FAA] = IB_WC_FETCH_ADD,
 };
 
 static const struct {
index 4c38d99..37ad1bb 100644 (file)
@@ -441,7 +441,7 @@ struct erdma_reg_mr_sqe {
 };
 
 /* EQ related. */
-#define ERDMA_DEFAULT_EQ_DEPTH 256
+#define ERDMA_DEFAULT_EQ_DEPTH 4096
 
 /* ceqe */
 #define ERDMA_CEQE_HDR_DB_MASK BIT_ULL(63)
@@ -491,7 +491,7 @@ enum erdma_opcode {
        ERDMA_OP_LOCAL_INV = 15,
        ERDMA_OP_READ_WITH_INV = 16,
        ERDMA_OP_ATOMIC_CAS = 17,
-       ERDMA_OP_ATOMIC_FAD = 18,
+       ERDMA_OP_ATOMIC_FAA = 18,
        ERDMA_NUM_OPCODES = 19,
        ERDMA_OP_INVALID = ERDMA_NUM_OPCODES + 1
 };
index 5dc31e5..4a29a53 100644 (file)
@@ -56,7 +56,7 @@ done:
 static int erdma_enum_and_get_netdev(struct erdma_dev *dev)
 {
        struct net_device *netdev;
-       int ret = -ENODEV;
+       int ret = -EPROBE_DEFER;
 
        /* Already binded to a net_device, so we skip. */
        if (dev->netdev)
index d088d6b..44923c5 100644 (file)
@@ -405,7 +405,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
                        FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
                                   mr->mem.mtt_nents);
 
-               if (mr->mem.mtt_nents < ERDMA_MAX_INLINE_MTT_ENTRIES) {
+               if (mr->mem.mtt_nents <= ERDMA_MAX_INLINE_MTT_ENTRIES) {
                        attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 0);
                        /* Copy SGLs to SQE content to accelerate */
                        memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
@@ -439,7 +439,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
                                cpu_to_le64(atomic_wr(send_wr)->compare_add);
                } else {
                        wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
-                                             ERDMA_OP_ATOMIC_FAD);
+                                             ERDMA_OP_ATOMIC_FAA);
                        atomic_sqe->fetchadd_swap_data =
                                cpu_to_le64(atomic_wr(send_wr)->compare_add);
                }
index e0a993b..131cf5f 100644 (file)
@@ -11,7 +11,7 @@
 
 /* RDMA Capability. */
 #define ERDMA_MAX_PD (128 * 1024)
-#define ERDMA_MAX_SEND_WR 4096
+#define ERDMA_MAX_SEND_WR 8192
 #define ERDMA_MAX_ORD 128
 #define ERDMA_MAX_IRD 128
 #define ERDMA_MAX_SGE_RD 1
index 195aa9e..8817864 100644 (file)
@@ -1458,13 +1458,15 @@ static int irdma_send_fin(struct irdma_cm_node *cm_node)
  * irdma_find_listener - find a cm node listening on this addr-port pair
  * @cm_core: cm's core
  * @dst_addr: listener ip addr
+ * @ipv4: flag indicating IPv4 when true
  * @dst_port: listener tcp port num
  * @vlan_id: virtual LAN ID
  * @listener_state: state to match with listen node's
  */
 static struct irdma_cm_listener *
-irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
-                   u16 vlan_id, enum irdma_cm_listener_state listener_state)
+irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, bool ipv4,
+                   u16 dst_port, u16 vlan_id,
+                   enum irdma_cm_listener_state listener_state)
 {
        struct irdma_cm_listener *listen_node;
        static const u32 ip_zero[4] = { 0, 0, 0, 0 };
@@ -1477,7 +1479,7 @@ irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
        list_for_each_entry (listen_node, &cm_core->listen_list, list) {
                memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
                listen_port = listen_node->loc_port;
-               if (listen_port != dst_port ||
+               if (listen_node->ipv4 != ipv4 || listen_port != dst_port ||
                    !(listener_state & listen_node->listener_state))
                        continue;
                /* compare node pair, return node handle if a match */
@@ -2902,9 +2904,10 @@ irdma_make_listen_node(struct irdma_cm_core *cm_core,
        unsigned long flags;
 
        /* cannot have multiple matching listeners */
-       listener = irdma_find_listener(cm_core, cm_info->loc_addr,
-                                      cm_info->loc_port, cm_info->vlan_id,
-                                      IRDMA_CM_LISTENER_EITHER_STATE);
+       listener =
+               irdma_find_listener(cm_core, cm_info->loc_addr, cm_info->ipv4,
+                                   cm_info->loc_port, cm_info->vlan_id,
+                                   IRDMA_CM_LISTENER_EITHER_STATE);
        if (listener &&
            listener->listener_state == IRDMA_CM_LISTENER_ACTIVE_STATE) {
                refcount_dec(&listener->refcnt);
@@ -3153,6 +3156,7 @@ void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
 
                listener = irdma_find_listener(cm_core,
                                               cm_info.loc_addr,
+                                              cm_info.ipv4,
                                               cm_info.loc_port,
                                               cm_info.vlan_id,
                                               IRDMA_CM_LISTENER_ACTIVE_STATE);
index 19c2849..7feadb3 100644 (file)
@@ -41,7 +41,7 @@
 #define TCP_OPTIONS_PADDING    3
 
 #define IRDMA_DEFAULT_RETRYS   64
-#define IRDMA_DEFAULT_RETRANS  8
+#define IRDMA_DEFAULT_RETRANS  32
 #define IRDMA_DEFAULT_TTL              0x40
 #define IRDMA_DEFAULT_RTT_VAR          6
 #define IRDMA_DEFAULT_SS_THRESH                0x3fffffff
index 2e1e2ba..43dfa47 100644 (file)
@@ -41,6 +41,7 @@ static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
        IRDMA_HMC_IW_XFFL,
        IRDMA_HMC_IW_Q1,
        IRDMA_HMC_IW_Q1FL,
+       IRDMA_HMC_IW_PBLE,
        IRDMA_HMC_IW_TIMER,
        IRDMA_HMC_IW_FSIMC,
        IRDMA_HMC_IW_FSIAV,
@@ -827,6 +828,8 @@ static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
        info.entry_type = rf->sd_type;
 
        for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
+               if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE)
+                       continue;
                if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
                        info.rsrc_type = iw_hmc_obj_types[i];
                        info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
index 445e69e..7887230 100644 (file)
@@ -2595,7 +2595,10 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
                        /* remove the SQ WR by moving SQ tail*/
                        IRDMA_RING_SET_TAIL(*sq_ring,
                                sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
-
+                       if (cmpl->cpi.op_type == IRDMAQP_OP_NOP) {
+                               kfree(cmpl);
+                               continue;
+                       }
                        ibdev_dbg(iwqp->iwscq->ibcq.device,
                                  "DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n",
                                  __func__, cmpl->cpi.wr_id, qp->qp_id);
index 5b988db..5d45de2 100644 (file)
@@ -442,6 +442,10 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
                *active_width = IB_WIDTH_2X;
                *active_speed = IB_SPEED_NDR;
                break;
+       case MLX5E_PROT_MASK(MLX5E_400GAUI_8):
+               *active_width = IB_WIDTH_8X;
+               *active_speed = IB_SPEED_HDR;
+               break;
        case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4):
                *active_width = IB_WIDTH_4X;
                *active_speed = IB_SPEED_NDR;
index f642ec8..29131f1 100644 (file)
@@ -781,9 +781,6 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
        input_report_key(dev, BTN_C, data[8]);
        input_report_key(dev, BTN_Z, data[9]);
 
-       /* Profile button has a value of 0-3, so it is reported as an axis */
-       if (xpad->mapping & MAP_PROFILE_BUTTON)
-               input_report_abs(dev, ABS_PROFILE, data[34]);
 
        input_sync(dev);
 }
@@ -1061,6 +1058,10 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
                                        (__u16) le16_to_cpup((__le16 *)(data + 8)));
                }
 
+               /* Profile button has a value of 0-3, so it is reported as an axis */
+               if (xpad->mapping & MAP_PROFILE_BUTTON)
+                       input_report_abs(dev, ABS_PROFILE, data[34]);
+
                /* paddle handling */
                /* based on SDL's SDL_hidapi_xboxone.c */
                if (xpad->mapping & MAP_PADDLES) {
index 989228b..e2c11d9 100644 (file)
@@ -852,8 +852,8 @@ static void alps_process_packet_v6(struct psmouse *psmouse)
                        x = y = z = 0;
 
                /* Divide 4 since trackpoint's speed is too fast */
-               input_report_rel(dev2, REL_X, (char)x / 4);
-               input_report_rel(dev2, REL_Y, -((char)y / 4));
+               input_report_rel(dev2, REL_X, (s8)x / 4);
+               input_report_rel(dev2, REL_Y, -((s8)y / 4));
 
                psmouse_report_standard_buttons(dev2, packet[3]);
 
@@ -1104,8 +1104,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
            ((packet[3] & 0x20) << 1);
        z = (packet[5] & 0x3f) | ((packet[3] & 0x80) >> 1);
 
-       input_report_rel(dev2, REL_X, (char)x);
-       input_report_rel(dev2, REL_Y, -((char)y));
+       input_report_rel(dev2, REL_X, (s8)x);
+       input_report_rel(dev2, REL_Y, -((s8)y));
        input_report_abs(dev2, ABS_PRESSURE, z);
 
        psmouse_report_standard_buttons(dev2, packet[1]);
@@ -2294,20 +2294,20 @@ static int alps_get_v3_v7_resolution(struct psmouse *psmouse, int reg_pitch)
        if (reg < 0)
                return reg;
 
-       x_pitch = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
+       x_pitch = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
        x_pitch = 50 + 2 * x_pitch; /* In 0.1 mm units */
 
-       y_pitch = (char)reg >> 4; /* sign extend upper 4 bits */
+       y_pitch = (s8)reg >> 4; /* sign extend upper 4 bits */
        y_pitch = 36 + 2 * y_pitch; /* In 0.1 mm units */
 
        reg = alps_command_mode_read_reg(psmouse, reg_pitch + 1);
        if (reg < 0)
                return reg;
 
-       x_electrode = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
+       x_electrode = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
        x_electrode = 17 + x_electrode;
 
-       y_electrode = (char)reg >> 4; /* sign extend upper 4 bits */
+       y_electrode = (s8)reg >> 4; /* sign extend upper 4 bits */
        y_electrode = 13 + y_electrode;
 
        x_phys = x_pitch * (x_electrode - 1); /* In 0.1 mm units */
index 6fd5fff..c74b990 100644 (file)
@@ -202,8 +202,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
        state->pressed = packet[0] >> 7;
        finger1 = ((packet[0] >> 4) & 0x7) - 1;
        if (finger1 < FOC_MAX_FINGERS) {
-               state->fingers[finger1].x += (char)packet[1];
-               state->fingers[finger1].y += (char)packet[2];
+               state->fingers[finger1].x += (s8)packet[1];
+               state->fingers[finger1].y += (s8)packet[2];
        } else {
                psmouse_err(psmouse, "First finger in rel packet invalid: %d\n",
                            finger1);
@@ -218,8 +218,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
         */
        finger2 = ((packet[3] >> 4) & 0x7) - 1;
        if (finger2 < FOC_MAX_FINGERS) {
-               state->fingers[finger2].x += (char)packet[4];
-               state->fingers[finger2].y += (char)packet[5];
+               state->fingers[finger2].x += (s8)packet[4];
+               state->fingers[finger2].y += (s8)packet[5];
        }
 }
 
index efc6173..028e45b 100644 (file)
@@ -611,6 +611,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
                .driver_data = (void *)(SERIO_QUIRK_NOMUX)
        },
        {
+               /* Fujitsu Lifebook A574/H */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "FMVA0501PZ"),
+               },
+               .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+       },
+       {
                /* Gigabyte M912 */
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
@@ -1117,6 +1125,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
                                        SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
        },
        {
+               /*
+                * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
+                * the keyboard very laggy for ~5 seconds after boot and
+                * sometimes also after resume.
+                * However both are required for the keyboard to not fail
+                * completely sometimes after boot or resume.
+                */
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_NAME, "N150CU"),
+               },
+               .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+                                       SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+       },
+       {
                .matches = {
                        DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
                },
@@ -1124,6 +1146,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
                                        SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
        },
        {
+               /*
+                * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
+                * the keyboard very laggy for ~5 seconds after boot and
+                * sometimes also after resume.
+                * However both are required for the keyboard to not fail
+                * completely sometimes after boot or resume.
+                */
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_NAME, "NHxxRZQ"),
+               },
+               .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+                                       SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+       },
+       {
                .matches = {
                        DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
                },
index b348172..d77f116 100644 (file)
@@ -124,10 +124,18 @@ static const unsigned long goodix_irq_flags[] = {
 static const struct dmi_system_id nine_bytes_report[] = {
 #if defined(CONFIG_DMI) && defined(CONFIG_X86)
        {
-               .ident = "Lenovo YogaBook",
-               /* YB1-X91L/F and YB1-X90L/F */
+               /* Lenovo Yoga Book X90F / X90L */
                .matches = {
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9")
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+               }
+       },
+       {
+               /* Lenovo Yoga Book X91F / X91L */
+               .matches = {
+                       /* Non exact match to match F + L versions */
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
                }
        },
 #endif
index 483aaae..1abd187 100644 (file)
@@ -1415,23 +1415,26 @@ static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
        return &data->iommu;
 }
 
-static void exynos_iommu_release_device(struct device *dev)
+static void exynos_iommu_set_platform_dma(struct device *dev)
 {
        struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
-       struct sysmmu_drvdata *data;
 
        if (owner->domain) {
                struct iommu_group *group = iommu_group_get(dev);
 
                if (group) {
-#ifndef CONFIG_ARM
-                       WARN_ON(owner->domain !=
-                               iommu_group_default_domain(group));
-#endif
                        exynos_iommu_detach_device(owner->domain, dev);
                        iommu_group_put(group);
                }
        }
+}
+
+static void exynos_iommu_release_device(struct device *dev)
+{
+       struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
+       struct sysmmu_drvdata *data;
+
+       exynos_iommu_set_platform_dma(dev);
 
        list_for_each_entry(data, &owner->controllers, owner_node)
                device_link_del(data->link);
@@ -1479,7 +1482,7 @@ static const struct iommu_ops exynos_iommu_ops = {
        .domain_alloc = exynos_iommu_domain_alloc,
        .device_group = generic_device_group,
 #ifdef CONFIG_ARM
-       .set_platform_dma_ops = exynos_iommu_release_device,
+       .set_platform_dma_ops = exynos_iommu_set_platform_dma,
 #endif
        .probe_device = exynos_iommu_probe_device,
        .release_device = exynos_iommu_release_device,
index 6acfe87..23828d1 100644 (file)
@@ -1071,7 +1071,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
        }
 
        err = -EINVAL;
-       if (cap_sagaw(iommu->cap) == 0) {
+       if (!cap_sagaw(iommu->cap) &&
+           (!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) {
                pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
                        iommu->name);
                drhd->ignored = 1;
index d6df3b8..694ab9b 100644 (file)
@@ -641,6 +641,8 @@ struct iommu_pmu {
        DECLARE_BITMAP(used_mask, IOMMU_PMU_IDX_MAX);
        struct perf_event       *event_list[IOMMU_PMU_IDX_MAX];
        unsigned char           irq_name[16];
+       struct hlist_node       cpuhp_node;
+       int                     cpu;
 };
 
 #define IOMMU_IRQ_ID_OFFSET_PRQ                (DMAR_UNITS_SUPPORTED)
index 6d01fa0..df9e261 100644 (file)
@@ -311,14 +311,12 @@ static int set_ioapic_sid(struct irte *irte, int apic)
        if (!irte)
                return -1;
 
-       down_read(&dmar_global_lock);
        for (i = 0; i < MAX_IO_APICS; i++) {
                if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
                        sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
                        break;
                }
        }
-       up_read(&dmar_global_lock);
 
        if (sid == 0) {
                pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic);
@@ -338,14 +336,12 @@ static int set_hpet_sid(struct irte *irte, u8 id)
        if (!irte)
                return -1;
 
-       down_read(&dmar_global_lock);
        for (i = 0; i < MAX_HPET_TBS; i++) {
                if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
                        sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
                        break;
                }
        }
-       up_read(&dmar_global_lock);
 
        if (sid == 0) {
                pr_warn("Failed to set source-id of HPET block (%d)\n", id);
@@ -1339,9 +1335,7 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
        if (!data)
                goto out_free_parent;
 
-       down_read(&dmar_global_lock);
        index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs);
-       up_read(&dmar_global_lock);
        if (index < 0) {
                pr_warn("Failed to allocate IRTE\n");
                kfree(data);
index e17d974..cf43e79 100644 (file)
@@ -773,19 +773,34 @@ static void iommu_pmu_unset_interrupt(struct intel_iommu *iommu)
        iommu->perf_irq = 0;
 }
 
-static int iommu_pmu_cpu_online(unsigned int cpu)
+static int iommu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
 {
+       struct iommu_pmu *iommu_pmu = hlist_entry_safe(node, typeof(*iommu_pmu), cpuhp_node);
+
        if (cpumask_empty(&iommu_pmu_cpu_mask))
                cpumask_set_cpu(cpu, &iommu_pmu_cpu_mask);
 
+       if (cpumask_test_cpu(cpu, &iommu_pmu_cpu_mask))
+               iommu_pmu->cpu = cpu;
+
        return 0;
 }
 
-static int iommu_pmu_cpu_offline(unsigned int cpu)
+static int iommu_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
 {
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu;
-       int target;
+       struct iommu_pmu *iommu_pmu = hlist_entry_safe(node, typeof(*iommu_pmu), cpuhp_node);
+       int target = cpumask_first(&iommu_pmu_cpu_mask);
+
+       /*
+        * The iommu_pmu_cpu_mask has been updated when offline the CPU
+        * for the first iommu_pmu. Migrate the other iommu_pmu to the
+        * new target.
+        */
+       if (target < nr_cpu_ids && target != iommu_pmu->cpu) {
+               perf_pmu_migrate_context(&iommu_pmu->pmu, cpu, target);
+               iommu_pmu->cpu = target;
+               return 0;
+       }
 
        if (!cpumask_test_and_clear_cpu(cpu, &iommu_pmu_cpu_mask))
                return 0;
@@ -795,45 +810,50 @@ static int iommu_pmu_cpu_offline(unsigned int cpu)
        if (target < nr_cpu_ids)
                cpumask_set_cpu(target, &iommu_pmu_cpu_mask);
        else
-               target = -1;
+               return 0;
 
-       rcu_read_lock();
-
-       for_each_iommu(iommu, drhd) {
-               if (!iommu->pmu)
-                       continue;
-               perf_pmu_migrate_context(&iommu->pmu->pmu, cpu, target);
-       }
-       rcu_read_unlock();
+       perf_pmu_migrate_context(&iommu_pmu->pmu, cpu, target);
+       iommu_pmu->cpu = target;
 
        return 0;
 }
 
 static int nr_iommu_pmu;
+static enum cpuhp_state iommu_cpuhp_slot;
 
 static int iommu_pmu_cpuhp_setup(struct iommu_pmu *iommu_pmu)
 {
        int ret;
 
-       if (nr_iommu_pmu++)
-               return 0;
+       if (!nr_iommu_pmu) {
+               ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+                                             "driver/iommu/intel/perfmon:online",
+                                             iommu_pmu_cpu_online,
+                                             iommu_pmu_cpu_offline);
+               if (ret < 0)
+                       return ret;
+               iommu_cpuhp_slot = ret;
+       }
 
-       ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_IOMMU_PERF_ONLINE,
-                               "driver/iommu/intel/perfmon:online",
-                               iommu_pmu_cpu_online,
-                               iommu_pmu_cpu_offline);
-       if (ret)
-               nr_iommu_pmu = 0;
+       ret = cpuhp_state_add_instance(iommu_cpuhp_slot, &iommu_pmu->cpuhp_node);
+       if (ret) {
+               if (!nr_iommu_pmu)
+                       cpuhp_remove_multi_state(iommu_cpuhp_slot);
+               return ret;
+       }
+       nr_iommu_pmu++;
 
-       return ret;
+       return 0;
 }
 
 static void iommu_pmu_cpuhp_free(struct iommu_pmu *iommu_pmu)
 {
+       cpuhp_state_remove_instance(iommu_cpuhp_slot, &iommu_pmu->cpuhp_node);
+
        if (--nr_iommu_pmu)
                return;
 
-       cpuhp_remove_state(CPUHP_AP_PERF_X86_IOMMU_PERF_ONLINE);
+       cpuhp_remove_multi_state(iommu_cpuhp_slot);
 }
 
 void iommu_pmu_register(struct intel_iommu *iommu)
index f8d92c9..3c47846 100644 (file)
@@ -294,9 +294,9 @@ static void batch_clear_carry(struct pfn_batch *batch, unsigned int keep_pfns)
                        batch->npfns[batch->end - 1] < keep_pfns);
 
        batch->total_pfns = keep_pfns;
-       batch->npfns[0] = keep_pfns;
        batch->pfns[0] = batch->pfns[batch->end - 1] +
                         (batch->npfns[batch->end - 1] - keep_pfns);
+       batch->npfns[0] = keep_pfns;
        batch->end = 0;
 }
 
@@ -1142,6 +1142,7 @@ struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length,
                                    bool writable)
 {
        struct iopt_pages *pages;
+       unsigned long end;
 
        /*
         * The iommu API uses size_t as the length, and protect the DIV_ROUND_UP
@@ -1150,6 +1151,9 @@ struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length,
        if (length > SIZE_MAX - PAGE_SIZE || length == 0)
                return ERR_PTR(-EINVAL);
 
+       if (check_add_overflow((unsigned long)uptr, length, &end))
+               return ERR_PTR(-EOVERFLOW);
+
        pages = kzalloc(sizeof(*pages), GFP_KERNEL_ACCOUNT);
        if (!pages)
                return ERR_PTR(-ENOMEM);
@@ -1203,13 +1207,21 @@ iopt_area_unpin_domain(struct pfn_batch *batch, struct iopt_area *area,
                        unsigned long start =
                                max(start_index, *unmapped_end_index);
 
+                       if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
+                           batch->total_pfns)
+                               WARN_ON(*unmapped_end_index -
+                                               batch->total_pfns !=
+                                       start_index);
                        batch_from_domain(batch, domain, area, start,
                                          last_index);
-                       batch_last_index = start + batch->total_pfns - 1;
+                       batch_last_index = start_index + batch->total_pfns - 1;
                } else {
                        batch_last_index = last_index;
                }
 
+               if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+                       WARN_ON(batch_last_index > real_last_index);
+
                /*
                 * unmaps must always 'cut' at a place where the pfns are not
                 * contiguous to pair with the maps that always install
index 2d0f934..dfde008 100644 (file)
@@ -1467,7 +1467,8 @@ static void setup_split_accounting(struct clone_info *ci, unsigned int len)
 }
 
 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
-                               struct dm_target *ti, unsigned int num_bios)
+                               struct dm_target *ti, unsigned int num_bios,
+                               unsigned *len)
 {
        struct bio *bio;
        int try;
@@ -1478,7 +1479,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
                if (try)
                        mutex_lock(&ci->io->md->table_devices_lock);
                for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
-                       bio = alloc_tio(ci, ti, bio_nr, NULL,
+                       bio = alloc_tio(ci, ti, bio_nr, len,
                                        try ? GFP_NOIO : GFP_NOWAIT);
                        if (!bio)
                                break;
@@ -1513,8 +1514,10 @@ static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
                ret = 1;
                break;
        default:
+               if (len)
+                       setup_split_accounting(ci, *len);
                /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
-               alloc_multiple_bios(&blist, ci, ti, num_bios);
+               alloc_multiple_bios(&blist, ci, ti, num_bios, len);
                while ((clone = bio_list_pop(&blist))) {
                        dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
                        __map_bio(clone);
index 39e49e5..13321db 100644 (file)
@@ -6260,7 +6260,6 @@ static void __md_stop(struct mddev *mddev)
        module_put(pers->owner);
        clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
 
-       percpu_ref_exit(&mddev->writes_pending);
        percpu_ref_exit(&mddev->active_io);
        bioset_exit(&mddev->bio_set);
        bioset_exit(&mddev->sync_set);
@@ -6273,6 +6272,7 @@ void md_stop(struct mddev *mddev)
         */
        __md_stop_writes(mddev);
        __md_stop(mddev);
+       percpu_ref_exit(&mddev->writes_pending);
 }
 
 EXPORT_SYMBOL_GPL(md_stop);
@@ -7843,6 +7843,7 @@ static void md_free_disk(struct gendisk *disk)
 {
        struct mddev *mddev = disk->private_data;
 
+       percpu_ref_exit(&mddev->writes_pending);
        mddev_free(mddev);
 }
 
index 49d6c8b..48ae2e0 100644 (file)
@@ -1098,7 +1098,7 @@ static int imx290_runtime_suspend(struct device *dev)
 }
 
 static const struct dev_pm_ops imx290_pm_ops = {
-       SET_RUNTIME_PM_OPS(imx290_runtime_suspend, imx290_runtime_resume, NULL)
+       RUNTIME_PM_OPS(imx290_runtime_suspend, imx290_runtime_resume, NULL)
 };
 
 /* ----------------------------------------------------------------------------
@@ -1362,8 +1362,8 @@ static struct i2c_driver imx290_i2c_driver = {
        .remove = imx290_remove,
        .driver = {
                .name  = "imx290",
-               .pm = &imx290_pm_ops,
-               .of_match_table = of_match_ptr(imx290_of_match),
+               .pm = pm_ptr(&imx290_pm_ops),
+               .of_match_table = imx290_of_match,
        },
 };
 
index 61ff20a..cfb11c5 100644 (file)
@@ -38,8 +38,8 @@ static void venus_reset_cpu(struct venus_core *core)
        writel(fw_size, wrapper_base + WRAPPER_FW_END_ADDR);
        writel(0, wrapper_base + WRAPPER_CPA_START_ADDR);
        writel(fw_size, wrapper_base + WRAPPER_CPA_END_ADDR);
-       writel(0, wrapper_base + WRAPPER_NONPIX_START_ADDR);
-       writel(0, wrapper_base + WRAPPER_NONPIX_END_ADDR);
+       writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR);
+       writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR);
 
        if (IS_V6(core)) {
                /* Bring XTSS out of reset */
index bf76678..bbfaf65 100644 (file)
@@ -410,6 +410,7 @@ static struct memstick_dev *memstick_alloc_card(struct memstick_host *host)
        return card;
 err_out:
        host->card = old_card;
+       kfree_const(card->dev.kobj.name);
        kfree(card);
        return NULL;
 }
@@ -468,8 +469,10 @@ static void memstick_check(struct work_struct *work)
                                put_device(&card->dev);
                                host->card = NULL;
                        }
-               } else
+               } else {
+                       kfree_const(card->dev.kobj.name);
                        kfree(card);
+               }
        }
 
 out_power_off:
index 8995309..672d37e 100644 (file)
@@ -351,8 +351,6 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
                 */
                case MMC_TIMING_SD_HS:
                case MMC_TIMING_MMC_HS:
-               case MMC_TIMING_UHS_SDR12:
-               case MMC_TIMING_UHS_SDR25:
                        val &= ~SDHCI_CTRL_HISPD;
                }
        }
index 1e94e7d..a0a1194 100644 (file)
@@ -153,7 +153,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
                                mtdblk->cache_state = STATE_EMPTY;
                                ret = mtd_read(mtd, sect_start, sect_size,
                                               &retlen, mtdblk->cache_data);
-                               if (ret)
+                               if (ret && !mtd_is_bitflip(ret))
                                        return ret;
                                if (retlen != sect_size)
                                        return -EIO;
@@ -188,8 +188,12 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
        pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
                        mtd->name, pos, len);
 
-       if (!sect_size)
-               return mtd_read(mtd, pos, len, &retlen, buf);
+       if (!sect_size) {
+               ret = mtd_read(mtd, pos, len, &retlen, buf);
+               if (ret && !mtd_is_bitflip(ret))
+                       return ret;
+               return 0;
+       }
 
        while (len > 0) {
                unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -209,7 +213,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
                        memcpy (buf, mtdblk->cache_data + offset, size);
                } else {
                        ret = mtd_read(mtd, pos, size, &retlen, buf);
-                       if (ret)
+                       if (ret && !mtd_is_bitflip(ret))
                                return ret;
                        if (retlen != size)
                                return -EIO;
index 8afdca7..6b487ff 100644 (file)
@@ -429,6 +429,7 @@ static int mxic_ecc_data_xfer_wait_for_completion(struct mxic_ecc_engine *mxic)
                mxic_ecc_enable_int(mxic);
                ret = wait_for_completion_timeout(&mxic->complete,
                                                  msecs_to_jiffies(1000));
+               ret = ret ? 0 : -ETIMEDOUT;
                mxic_ecc_disable_int(mxic);
        } else {
                ret = readl_poll_timeout(mxic->regs + INTRPT_STS, val,
index 5ee0123..074e142 100644 (file)
@@ -176,6 +176,7 @@ struct meson_nfc {
 
        dma_addr_t daddr;
        dma_addr_t iaddr;
+       u32 info_bytes;
 
        unsigned long assigned_cs;
 };
@@ -279,7 +280,7 @@ static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir,
 
        if (raw) {
                len = mtd->writesize + mtd->oobsize;
-               cmd = (len & GENMASK(5, 0)) | scrambler | DMA_DIR(dir);
+               cmd = (len & GENMASK(13, 0)) | scrambler | DMA_DIR(dir);
                writel(cmd, nfc->reg_base + NFC_REG_CMD);
                return;
        }
@@ -503,6 +504,7 @@ static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf,
                                         nfc->daddr, datalen, dir);
                        return ret;
                }
+               nfc->info_bytes = infolen;
                cmd = GENCMDIADDRL(NFC_CMD_AIL, nfc->iaddr);
                writel(cmd, nfc->reg_base + NFC_REG_CMD);
 
@@ -520,8 +522,10 @@ static void meson_nfc_dma_buffer_release(struct nand_chip *nand,
        struct meson_nfc *nfc = nand_get_controller_data(nand);
 
        dma_unmap_single(nfc->dev, nfc->daddr, datalen, dir);
-       if (infolen)
+       if (infolen) {
                dma_unmap_single(nfc->dev, nfc->iaddr, infolen, dir);
+               nfc->info_bytes = 0;
+       }
 }
 
 static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
@@ -540,7 +544,7 @@ static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
        if (ret)
                goto out;
 
-       cmd = NFC_CMD_N2M | (len & GENMASK(5, 0));
+       cmd = NFC_CMD_N2M | (len & GENMASK(13, 0));
        writel(cmd, nfc->reg_base + NFC_REG_CMD);
 
        meson_nfc_drain_cmd(nfc);
@@ -564,7 +568,7 @@ static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len)
        if (ret)
                return ret;
 
-       cmd = NFC_CMD_M2N | (len & GENMASK(5, 0));
+       cmd = NFC_CMD_M2N | (len & GENMASK(13, 0));
        writel(cmd, nfc->reg_base + NFC_REG_CMD);
 
        meson_nfc_drain_cmd(nfc);
@@ -710,6 +714,8 @@ static void meson_nfc_check_ecc_pages_valid(struct meson_nfc *nfc,
                usleep_range(10, 15);
                /* info is updated by nfc dma engine*/
                smp_rmb();
+               dma_sync_single_for_cpu(nfc->dev, nfc->iaddr, nfc->info_bytes,
+                                       DMA_FROM_DEVICE);
                ret = *info & ECC_COMPLETE;
        } while (!ret);
 }
@@ -991,7 +997,7 @@ static const struct mtd_ooblayout_ops meson_ooblayout_ops = {
 
 static int meson_nfc_clk_init(struct meson_nfc *nfc)
 {
-       struct clk_parent_data nfc_divider_parent_data[1];
+       struct clk_parent_data nfc_divider_parent_data[1] = {0};
        struct clk_init_data init = {0};
        int ret;
 
index c21abf7..179b284 100644 (file)
@@ -2160,8 +2160,23 @@ static int ns_exec_op(struct nand_chip *chip, const struct nand_operation *op,
        const struct nand_op_instr *instr = NULL;
        struct nandsim *ns = nand_get_controller_data(chip);
 
-       if (check_only)
+       if (check_only) {
+               /* The current implementation of nandsim needs to know the
+                * ongoing operation when performing the address cycles. This
+                * means it cannot make the difference between a regular read
+                * and a continuous read. Hence, this hack to manually refuse
+                * supporting sequential cached operations.
+                */
+               for (op_id = 0; op_id < op->ninstrs; op_id++) {
+                       instr = &op->instrs[op_id];
+                       if (instr->type == NAND_OP_CMD_INSTR &&
+                           (instr->ctx.cmd.opcode == NAND_CMD_READCACHEEND ||
+                            instr->ctx.cmd.opcode == NAND_CMD_READCACHESEQ))
+                               return -EOPNOTSUPP;
+               }
+
                return 0;
+       }
 
        ns->lines.ce = 1;
 
index 5d62704..9e74bcd 100644 (file)
@@ -1531,6 +1531,9 @@ static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
        if (IS_ERR(sdrt))
                return PTR_ERR(sdrt);
 
+       if (conf->timings.mode > 3)
+               return -EOPNOTSUPP;
+
        if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
                return 0;
 
index 0a78045..522d375 100644 (file)
@@ -3343,7 +3343,19 @@ static struct spi_mem_driver spi_nor_driver = {
        .remove = spi_nor_remove,
        .shutdown = spi_nor_shutdown,
 };
-module_spi_mem_driver(spi_nor_driver);
+
+static int __init spi_nor_module_init(void)
+{
+       return spi_mem_driver_register(&spi_nor_driver);
+}
+module_init(spi_nor_module_init);
+
+static void __exit spi_nor_module_exit(void)
+{
+       spi_mem_driver_unregister(&spi_nor_driver);
+       spi_nor_debugfs_shutdown();
+}
+module_exit(spi_nor_module_exit);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
index 2542322..e0cc42a 100644 (file)
@@ -711,8 +711,10 @@ static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
 
 #ifdef CONFIG_DEBUG_FS
 void spi_nor_debugfs_register(struct spi_nor *nor);
+void spi_nor_debugfs_shutdown(void);
 #else
 static inline void spi_nor_debugfs_register(struct spi_nor *nor) {}
+static inline void spi_nor_debugfs_shutdown(void) {}
 #endif
 
 #endif /* __LINUX_MTD_SPI_NOR_INTERNAL_H */
index 845b78c..fc7ad20 100644 (file)
@@ -226,13 +226,13 @@ static void spi_nor_debugfs_unregister(void *data)
        nor->debugfs_root = NULL;
 }
 
+static struct dentry *rootdir;
+
 void spi_nor_debugfs_register(struct spi_nor *nor)
 {
-       struct dentry *rootdir, *d;
+       struct dentry *d;
        int ret;
 
-       /* Create rootdir once. Will never be deleted again. */
-       rootdir = debugfs_lookup(SPI_NOR_DEBUGFS_ROOT, NULL);
        if (!rootdir)
                rootdir = debugfs_create_dir(SPI_NOR_DEBUGFS_ROOT, NULL);
 
@@ -247,3 +247,8 @@ void spi_nor_debugfs_register(struct spi_nor *nor)
        debugfs_create_file("capabilities", 0444, d, nor,
                            &spi_nor_capabilities_fops);
 }
+
+void spi_nor_debugfs_shutdown(void)
+{
+       debugfs_remove(rootdir);
+}
index 0904eb4..ad025b2 100644 (file)
@@ -666,12 +666,6 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
        ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
        ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
 
-       if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) >
-           ubi->vid_hdr_alsize)) {
-               ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset);
-               return -EINVAL;
-       }
-
        dbg_gen("min_io_size      %d", ubi->min_io_size);
        dbg_gen("max_write_size   %d", ubi->max_write_size);
        dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
@@ -689,6 +683,21 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
                                                ubi->vid_hdr_aloffset;
        }
 
+       /*
+        * Memory allocation for VID header is ubi->vid_hdr_alsize
+        * which is described in comments in io.c.
+        * Make sure VID header shift + UBI_VID_HDR_SIZE not exceeds
+        * ubi->vid_hdr_alsize, so that all vid header operations
+        * won't access memory out of bounds.
+        */
+       if ((ubi->vid_hdr_shift + UBI_VID_HDR_SIZE) > ubi->vid_hdr_alsize) {
+               ubi_err(ubi, "Invalid VID header offset %d, VID header shift(%d)"
+                       " + VID header size(%zu) > VID header aligned size(%d).",
+                       ubi->vid_hdr_offset, ubi->vid_hdr_shift,
+                       UBI_VID_HDR_SIZE, ubi->vid_hdr_alsize);
+               return -EINVAL;
+       }
+
        /* Similar for the data offset */
        ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
        ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
index 40f39e5..26a214f 100644 (file)
@@ -575,7 +575,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
  * @vol_id: the volume ID that last used this PEB
  * @lnum: the last used logical eraseblock number for the PEB
  * @torture: if the physical eraseblock has to be tortured
- * @nested: denotes whether the work_sem is already held in read mode
+ * @nested: denotes whether the work_sem is already held
  *
  * This function returns zero in case of success and a %-ENOMEM in case of
  * failure.
@@ -1131,7 +1131,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
                int err1;
 
                /* Re-schedule the LEB for erasure */
-               err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
+               err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true);
                if (err1) {
                        spin_lock(&ubi->wl_lock);
                        wl_entry_destroy(ubi, e);
index 236e521..8cc9a74 100644 (file)
@@ -3269,7 +3269,8 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
 
        combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined);
        if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP ||
-           combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
+           (combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION &&
+            combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT))
                goto out;
 
        saddr = &combined->ip6.saddr;
@@ -3291,7 +3292,7 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
        else if (curr_active_slave &&
                 time_after(slave_last_rx(bond, curr_active_slave),
                            curr_active_slave->last_link_up))
-               bond_validate_na(bond, slave, saddr, daddr);
+               bond_validate_na(bond, slave, daddr, saddr);
        else if (curr_arp_slave &&
                 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
                bond_validate_na(bond, slave, saddr, daddr);
index 70887e0..d9434ed 100644 (file)
@@ -216,6 +216,18 @@ static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg,
        return 0;
 }
 
+static int b53_mmap_phy_read16(struct b53_device *dev, int addr, int reg,
+                              u16 *value)
+{
+       return -EIO;
+}
+
+static int b53_mmap_phy_write16(struct b53_device *dev, int addr, int reg,
+                               u16 value)
+{
+       return -EIO;
+}
+
 static const struct b53_io_ops b53_mmap_ops = {
        .read8 = b53_mmap_read8,
        .read16 = b53_mmap_read16,
@@ -227,6 +239,8 @@ static const struct b53_io_ops b53_mmap_ops = {
        .write32 = b53_mmap_write32,
        .write48 = b53_mmap_write48,
        .write64 = b53_mmap_write64,
+       .phy_read16 = b53_mmap_phy_read16,
+       .phy_write16 = b53_mmap_phy_write16,
 };
 
 static int b53_mmap_probe_of(struct platform_device *pdev,
index 003b0ac..3fffd5d 100644 (file)
@@ -958,15 +958,14 @@ int ksz8_fdb_dump(struct ksz_device *dev, int port,
        u16 entries = 0;
        u8 timestamp = 0;
        u8 fid;
-       u8 member;
-       struct alu_struct alu;
+       u8 src_port;
+       u8 mac[ETH_ALEN];
 
        do {
-               alu.is_static = false;
-               ret = ksz8_r_dyn_mac_table(dev, i, alu.mac, &fid, &member,
+               ret = ksz8_r_dyn_mac_table(dev, i, mac, &fid, &src_port,
                                           &timestamp, &entries);
-               if (!ret && (member & BIT(port))) {
-                       ret = cb(alu.mac, alu.fid, alu.is_static, data);
+               if (!ret && port == src_port) {
+                       ret = cb(mac, fid, false, data);
                        if (ret)
                                break;
                }
index 2f4623f..3698112 100644 (file)
@@ -82,22 +82,16 @@ static const struct regmap_bus regmap_smi[] = {
        {
                .read = ksz8863_mdio_read,
                .write = ksz8863_mdio_write,
-               .max_raw_read = 1,
-               .max_raw_write = 1,
        },
        {
                .read = ksz8863_mdio_read,
                .write = ksz8863_mdio_write,
                .val_format_endian_default = REGMAP_ENDIAN_BIG,
-               .max_raw_read = 2,
-               .max_raw_write = 2,
        },
        {
                .read = ksz8863_mdio_read,
                .write = ksz8863_mdio_write,
                .val_format_endian_default = REGMAP_ENDIAN_BIG,
-               .max_raw_read = 4,
-               .max_raw_write = 4,
        }
 };
 
@@ -108,7 +102,6 @@ static const struct regmap_config ksz8863_regmap_config[] = {
                .pad_bits = 24,
                .val_bits = 8,
                .cache_type = REGCACHE_NONE,
-               .use_single_read = 1,
                .lock = ksz_regmap_lock,
                .unlock = ksz_regmap_unlock,
        },
@@ -118,7 +111,6 @@ static const struct regmap_config ksz8863_regmap_config[] = {
                .pad_bits = 24,
                .val_bits = 16,
                .cache_type = REGCACHE_NONE,
-               .use_single_read = 1,
                .lock = ksz_regmap_lock,
                .unlock = ksz_regmap_unlock,
        },
@@ -128,7 +120,6 @@ static const struct regmap_config ksz8863_regmap_config[] = {
                .pad_bits = 24,
                .val_bits = 32,
                .cache_type = REGCACHE_NONE,
-               .use_single_read = 1,
                .lock = ksz_regmap_lock,
                .unlock = ksz_regmap_unlock,
        }
index 7fc2155..74c56d0 100644 (file)
@@ -404,13 +404,13 @@ static const u32 ksz8863_masks[] = {
        [VLAN_TABLE_VALID]              = BIT(19),
        [STATIC_MAC_TABLE_VALID]        = BIT(19),
        [STATIC_MAC_TABLE_USE_FID]      = BIT(21),
-       [STATIC_MAC_TABLE_FID]          = GENMASK(29, 26),
+       [STATIC_MAC_TABLE_FID]          = GENMASK(25, 22),
        [STATIC_MAC_TABLE_OVERRIDE]     = BIT(20),
        [STATIC_MAC_TABLE_FWD_PORTS]    = GENMASK(18, 16),
-       [DYNAMIC_MAC_TABLE_ENTRIES_H]   = GENMASK(5, 0),
-       [DYNAMIC_MAC_TABLE_MAC_EMPTY]   = BIT(7),
+       [DYNAMIC_MAC_TABLE_ENTRIES_H]   = GENMASK(1, 0),
+       [DYNAMIC_MAC_TABLE_MAC_EMPTY]   = BIT(2),
        [DYNAMIC_MAC_TABLE_NOT_READY]   = BIT(7),
-       [DYNAMIC_MAC_TABLE_ENTRIES]     = GENMASK(31, 28),
+       [DYNAMIC_MAC_TABLE_ENTRIES]     = GENMASK(31, 24),
        [DYNAMIC_MAC_TABLE_FID]         = GENMASK(19, 16),
        [DYNAMIC_MAC_TABLE_SRC_PORT]    = GENMASK(21, 20),
        [DYNAMIC_MAC_TABLE_TIMESTAMP]   = GENMASK(23, 22),
@@ -420,10 +420,10 @@ static u8 ksz8863_shifts[] = {
        [VLAN_TABLE_MEMBERSHIP_S]       = 16,
        [STATIC_MAC_FWD_PORTS]          = 16,
        [STATIC_MAC_FID]                = 22,
-       [DYNAMIC_MAC_ENTRIES_H]         = 3,
+       [DYNAMIC_MAC_ENTRIES_H]         = 8,
        [DYNAMIC_MAC_ENTRIES]           = 24,
        [DYNAMIC_MAC_FID]               = 16,
-       [DYNAMIC_MAC_TIMESTAMP]         = 24,
+       [DYNAMIC_MAC_TIMESTAMP]         = 22,
        [DYNAMIC_MAC_SRC_PORT]          = 20,
 };
 
index 30383c4..7108f74 100644 (file)
@@ -3354,9 +3354,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
         * If this is the upstream port for this switch, enable
         * forwarding of unknown unicasts and multicasts.
         */
-       reg = MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP |
-               MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
+       reg = MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
                MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
+       /* Forward any IPv4 IGMP or IPv6 MLD frames received
+        * by a USER port to the CPU port to allow snooping.
+        */
+       if (dsa_is_user_port(ds, port))
+               reg |= MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP;
+
        err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
        if (err)
                return err;
@@ -5596,7 +5601,7 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
         * .port_set_upstream_port method.
         */
        .set_egress_port = mv88e6393x_set_egress_port,
-       .watchdog_ops = &mv88e6390_watchdog_ops,
+       .watchdog_ops = &mv88e6393x_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu,
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
index ed3b2f8..a7af3ce 100644 (file)
@@ -943,6 +943,26 @@ const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {
        .irq_free = mv88e6390_watchdog_free,
 };
 
+static int mv88e6393x_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
+{
+       mv88e6390_watchdog_action(chip, irq);
+
+       /* Fix for clearing the force WD event bit.
+        * Unreleased erratum on mv88e6393x.
+        */
+       mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
+                          MV88E6390_G2_WDOG_CTL_UPDATE |
+                          MV88E6390_G2_WDOG_CTL_PTR_EVENT);
+
+       return IRQ_HANDLED;
+}
+
+const struct mv88e6xxx_irq_ops mv88e6393x_watchdog_ops = {
+       .irq_action = mv88e6393x_watchdog_action,
+       .irq_setup = mv88e6390_watchdog_setup,
+       .irq_free = mv88e6390_watchdog_free,
+};
+
 static irqreturn_t mv88e6xxx_g2_watchdog_thread_fn(int irq, void *dev_id)
 {
        struct mv88e6xxx_chip *chip = dev_id;
index e973114..7e09196 100644 (file)
@@ -369,6 +369,7 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
 extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
 extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
 extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
+extern const struct mv88e6xxx_irq_ops mv88e6393x_watchdog_ops;
 
 extern const struct mv88e6xxx_avb_ops mv88e6165_avb_ops;
 extern const struct mv88e6xxx_avb_ops mv88e6352_avb_ops;
index 3e54fac..5a8fe70 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <linux/module.h>
 #include <linux/of_device.h>
+#include <linux/overflow.h>
 #include <linux/regmap.h>
 
 #include "realtek.h"
@@ -152,7 +153,9 @@ static int realtek_mdio_probe(struct mdio_device *mdiodev)
        if (!var)
                return -EINVAL;
 
-       priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+       priv = devm_kzalloc(&mdiodev->dev,
+                           size_add(sizeof(*priv), var->chip_data_sz),
+                           GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
 
index 16c4906..12083b9 100644 (file)
@@ -672,6 +672,18 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        return 0;
 }
 
+static struct sk_buff *
+bnx2x_build_skb(const struct bnx2x_fastpath *fp, void *data)
+{
+       struct sk_buff *skb;
+
+       if (fp->rx_frag_size)
+               skb = build_skb(data, fp->rx_frag_size);
+       else
+               skb = slab_build_skb(data);
+       return skb;
+}
+
 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
 {
        if (fp->rx_frag_size)
@@ -779,7 +791,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
                         fp->rx_buf_size, DMA_FROM_DEVICE);
        if (likely(new_data))
-               skb = build_skb(data, fp->rx_frag_size);
+               skb = bnx2x_build_skb(fp, data);
 
        if (likely(skb)) {
 #ifdef BNX2X_STOP_ON_ERROR
@@ -1046,7 +1058,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                                                 dma_unmap_addr(rx_buf, mapping),
                                                 fp->rx_buf_size,
                                                 DMA_FROM_DEVICE);
-                               skb = build_skb(data, fp->rx_frag_size);
+                               skb = bnx2x_build_skb(fp, data);
                                if (unlikely(!skb)) {
                                        bnx2x_frag_free(fp, data);
                                        bnx2x_fp_qstats(bp, fp)->
index e2e2c98..c23e3b3 100644 (file)
@@ -175,12 +175,12 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
        { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
        { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
        { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
-       { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
        { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
-       { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
-       { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
        { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
-       { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
        { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
        { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
 #ifdef CONFIG_BNXT_SRIOV
index c0628ac..5928430 100644 (file)
@@ -1226,6 +1226,7 @@ struct bnxt_link_info {
 #define BNXT_LINK_SPEED_40GB   PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
 #define BNXT_LINK_SPEED_50GB   PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
 #define BNXT_LINK_SPEED_100GB  PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
+#define BNXT_LINK_SPEED_200GB  PORT_PHY_QCFG_RESP_LINK_SPEED_200GB
        u16                     support_speeds;
        u16                     support_pam4_speeds;
        u16                     auto_link_speeds;       /* fw adv setting */
index ec57312..6bd18eb 100644 (file)
@@ -1714,6 +1714,8 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
                return SPEED_50000;
        case BNXT_LINK_SPEED_100GB:
                return SPEED_100000;
+       case BNXT_LINK_SPEED_200GB:
+               return SPEED_200000;
        default:
                return SPEED_UNKNOWN;
        }
@@ -3738,6 +3740,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
                bnxt_ulp_stop(bp);
                rc = bnxt_close_nic(bp, true, false);
                if (rc) {
+                       etest->flags |= ETH_TEST_FL_FAILED;
                        bnxt_ulp_start(bp, rc);
                        return;
                }
index 66e3056..e43d99e 100644 (file)
@@ -1064,6 +1064,10 @@ static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
        }
 #endif
        addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+#ifdef CONFIG_MACB_USE_HWSTAMP
+       if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
+               addr &= ~GEM_BIT(DMA_RXVALID);
+#endif
        return addr;
 }
 
index da9d4b3..838750a 100644 (file)
@@ -989,6 +989,20 @@ static int enetc_get_mm(struct net_device *ndev, struct ethtool_mm_state *state)
        return 0;
 }
 
+/* FIXME: Workaround for the link partner's verification failing if ENETC
+ * priorly received too much express traffic. The documentation doesn't
+ * suggest this is needed.
+ */
+static void enetc_restart_emac_rx(struct enetc_si *si)
+{
+       u32 val = enetc_port_rd(&si->hw, ENETC_PM0_CMD_CFG);
+
+       enetc_port_wr(&si->hw, ENETC_PM0_CMD_CFG, val & ~ENETC_PM0_RX_EN);
+
+       if (val & ENETC_PM0_RX_EN)
+               enetc_port_wr(&si->hw, ENETC_PM0_CMD_CFG, val);
+}
+
 static int enetc_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
                        struct netlink_ext_ack *extack)
 {
@@ -1040,6 +1054,8 @@ static int enetc_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
 
        enetc_port_wr(hw, ENETC_MMCSR, val);
 
+       enetc_restart_emac_rx(priv->si);
+
        mutex_unlock(&priv->mm_lock);
 
        return 0;
index 5ba1e0d..9939cca 100644 (file)
@@ -507,6 +507,11 @@ struct bufdesc_ex {
 /* i.MX6Q adds pm_qos support */
 #define FEC_QUIRK_HAS_PMQOS                    BIT(23)
 
+/* Not all FEC hardware block MDIOs support accesses in C45 mode.
+ * Older blocks in the ColdFire parts do not support it.
+ */
+#define FEC_QUIRK_HAS_MDIO_C45         BIT(24)
+
 struct bufdesc_prop {
        int qid;
        /* Address of Rx and Tx buffers */
index f3b16a6..160c1b3 100644 (file)
@@ -100,18 +100,19 @@ struct fec_devinfo {
 
 static const struct fec_devinfo fec_imx25_info = {
        .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
-                 FEC_QUIRK_HAS_FRREG,
+                 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45,
 };
 
 static const struct fec_devinfo fec_imx27_info = {
-       .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+       .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG |
+                 FEC_QUIRK_HAS_MDIO_C45,
 };
 
 static const struct fec_devinfo fec_imx28_info = {
        .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
                  FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
                  FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
-                 FEC_QUIRK_NO_HARD_RESET,
+                 FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45,
 };
 
 static const struct fec_devinfo fec_imx6q_info = {
@@ -119,11 +120,12 @@ static const struct fec_devinfo fec_imx6q_info = {
                  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
                  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
                  FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
-                 FEC_QUIRK_HAS_PMQOS,
+                 FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45,
 };
 
 static const struct fec_devinfo fec_mvf600_info = {
-       .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
+       .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC |
+                 FEC_QUIRK_HAS_MDIO_C45,
 };
 
 static const struct fec_devinfo fec_imx6x_info = {
@@ -132,7 +134,8 @@ static const struct fec_devinfo fec_imx6x_info = {
                  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
                  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
                  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
-                 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES,
+                 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
+                 FEC_QUIRK_HAS_MDIO_C45,
 };
 
 static const struct fec_devinfo fec_imx6ul_info = {
@@ -140,7 +143,8 @@ static const struct fec_devinfo fec_imx6ul_info = {
                  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
                  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
                  FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
-                 FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII,
+                 FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII |
+                 FEC_QUIRK_HAS_MDIO_C45,
 };
 
 static const struct fec_devinfo fec_imx8mq_info = {
@@ -150,7 +154,8 @@ static const struct fec_devinfo fec_imx8mq_info = {
                  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
                  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
                  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
-                 FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2,
+                 FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 |
+                 FEC_QUIRK_HAS_MDIO_C45,
 };
 
 static const struct fec_devinfo fec_imx8qm_info = {
@@ -160,14 +165,15 @@ static const struct fec_devinfo fec_imx8qm_info = {
                  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
                  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
                  FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
-                 FEC_QUIRK_DELAYED_CLKS_SUPPORT,
+                 FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45,
 };
 
 static const struct fec_devinfo fec_s32v234_info = {
        .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
                  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
                  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
-                 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE,
+                 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+                 FEC_QUIRK_HAS_MDIO_C45,
 };
 
 static struct platform_device_id fec_devtype[] = {
@@ -2434,8 +2440,10 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        fep->mii_bus->name = "fec_enet_mii_bus";
        fep->mii_bus->read = fec_enet_mdio_read_c22;
        fep->mii_bus->write = fec_enet_mdio_write_c22;
-       fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
-       fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
+       if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) {
+               fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
+               fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
+       }
        snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
                pdev->name, fep->dev_id + 1);
        fep->mii_bus->priv = fep;
index 64eb044..005cb9d 100644 (file)
@@ -47,6 +47,8 @@
 
 #define GVE_RX_BUFFER_SIZE_DQO 2048
 
+#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
+
 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
 struct gve_rx_desc_queue {
        struct gve_rx_desc *desc_ring; /* the descriptor ring */
index 4888bf0..5e11b82 100644 (file)
@@ -284,8 +284,8 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
        int bytes;
        int hlen;
 
-       hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) +
-                                tcp_hdrlen(skb) : skb_headlen(skb);
+       hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + tcp_hdrlen(skb) :
+                                min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
 
        pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
                                                   hlen);
@@ -454,13 +454,11 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
        pkt_desc = &tx->desc[idx];
 
        l4_hdr_offset = skb_checksum_start_offset(skb);
-       /* If the skb is gso, then we want the tcp header in the first segment
-        * otherwise we want the linear portion of the skb (which will contain
-        * the checksum because skb->csum_start and skb->csum_offset are given
-        * relative to skb->head) in the first segment.
+       /* If the skb is gso, then we want the tcp header alone in the first segment
+        * otherwise we want the minimum required by the gVNIC spec.
         */
        hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
-                       skb_headlen(skb);
+                       min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
 
        info->skb =  skb;
        /* We don't want to split the header, so if necessary, pad to the end
index 5b3519c..97fe178 100644 (file)
@@ -44,7 +44,7 @@ static int i40e_diag_reg_pattern_test(struct i40e_hw *hw,
        return 0;
 }
 
-struct i40e_diag_reg_test_info i40e_reg_list[] = {
+const struct i40e_diag_reg_test_info i40e_reg_list[] = {
        /* offset               mask         elements   stride */
        {I40E_QTX_CTL(0),       0x0000FFBF, 1,
                I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
@@ -78,27 +78,28 @@ int i40e_diag_reg_test(struct i40e_hw *hw)
 {
        int ret_code = 0;
        u32 reg, mask;
+       u32 elements;
        u32 i, j;
 
        for (i = 0; i40e_reg_list[i].offset != 0 &&
                                             !ret_code; i++) {
 
+               elements = i40e_reg_list[i].elements;
                /* set actual reg range for dynamically allocated resources */
                if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
                    hw->func_caps.num_tx_qp != 0)
-                       i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
+                       elements = hw->func_caps.num_tx_qp;
                if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
                     i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
                     i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
                     i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
                     i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
                    hw->func_caps.num_msix_vectors != 0)
-                       i40e_reg_list[i].elements =
-                               hw->func_caps.num_msix_vectors - 1;
+                       elements = hw->func_caps.num_msix_vectors - 1;
 
                /* test register access */
                mask = i40e_reg_list[i].mask;
-               for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
+               for (j = 0; j < elements && !ret_code; j++) {
                        reg = i40e_reg_list[i].offset +
                              (j * i40e_reg_list[i].stride);
                        ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
index e641035..c3ce5f3 100644 (file)
@@ -20,7 +20,7 @@ struct i40e_diag_reg_test_info {
        u32 stride;     /* bytes between each element */
 };
 
-extern struct i40e_diag_reg_test_info i40e_reg_list[];
+extern const struct i40e_diag_reg_test_info i40e_reg_list[];
 
 int i40e_diag_reg_test(struct i40e_hw *hw);
 int i40e_diag_eeprom_test(struct i40e_hw *hw);
index 232bc61..746ff76 100644 (file)
@@ -59,8 +59,6 @@ enum iavf_vsi_state_t {
 struct iavf_vsi {
        struct iavf_adapter *back;
        struct net_device *netdev;
-       unsigned long active_cvlans[BITS_TO_LONGS(VLAN_N_VID)];
-       unsigned long active_svlans[BITS_TO_LONGS(VLAN_N_VID)];
        u16 seid;
        u16 id;
        DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
@@ -158,15 +156,20 @@ struct iavf_vlan {
        u16 tpid;
 };
 
+enum iavf_vlan_state_t {
+       IAVF_VLAN_INVALID,
+       IAVF_VLAN_ADD,          /* filter needs to be added */
+       IAVF_VLAN_IS_NEW,       /* filter is new, wait for PF answer */
+       IAVF_VLAN_ACTIVE,       /* filter is accepted by PF */
+       IAVF_VLAN_DISABLE,      /* filter needs to be deleted by PF, then marked INACTIVE */
+       IAVF_VLAN_INACTIVE,     /* filter is inactive, we are in IFF_DOWN */
+       IAVF_VLAN_REMOVE,       /* filter needs to be removed from list */
+};
+
 struct iavf_vlan_filter {
        struct list_head list;
        struct iavf_vlan vlan;
-       struct {
-               u8 is_new_vlan:1;       /* filter is new, wait for PF answer */
-               u8 remove:1;            /* filter needs to be removed */
-               u8 add:1;               /* filter needs to be added */
-               u8 padding:5;
-       };
+       enum iavf_vlan_state_t state;
 };
 
 #define IAVF_MAX_TRAFFIC_CLASS 4
@@ -258,6 +261,7 @@ struct iavf_adapter {
        wait_queue_head_t vc_waitqueue;
        struct iavf_q_vector *q_vectors;
        struct list_head vlan_filter_list;
+       int num_vlan_filters;
        struct list_head mac_filter_list;
        struct mutex crit_lock;
        struct mutex client_lock;
index 095201e..2de4baf 100644 (file)
@@ -791,7 +791,8 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
                f->vlan = vlan;
 
                list_add_tail(&f->list, &adapter->vlan_filter_list);
-               f->add = true;
+               f->state = IAVF_VLAN_ADD;
+               adapter->num_vlan_filters++;
                adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
        }
 
@@ -813,7 +814,7 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
 
        f = iavf_find_vlan(adapter, vlan);
        if (f) {
-               f->remove = true;
+               f->state = IAVF_VLAN_REMOVE;
                adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
        }
 
@@ -828,14 +829,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
  **/
 static void iavf_restore_filters(struct iavf_adapter *adapter)
 {
-       u16 vid;
+       struct iavf_vlan_filter *f;
 
        /* re-add all VLAN filters */
-       for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
-               iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));
+       spin_lock_bh(&adapter->mac_vlan_list_lock);
 
-       for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
-               iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
+       list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+               if (f->state == IAVF_VLAN_INACTIVE)
+                       f->state = IAVF_VLAN_ADD;
+       }
+
+       spin_unlock_bh(&adapter->mac_vlan_list_lock);
+       adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
 }
 
 /**
@@ -844,8 +849,7 @@ static void iavf_restore_filters(struct iavf_adapter *adapter)
  */
 u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
 {
-       return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
-               bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
+       return adapter->num_vlan_filters;
 }
 
 /**
@@ -928,11 +932,6 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
                return 0;
 
        iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
-       if (proto == cpu_to_be16(ETH_P_8021Q))
-               clear_bit(vid, adapter->vsi.active_cvlans);
-       else
-               clear_bit(vid, adapter->vsi.active_svlans);
-
        return 0;
 }
 
@@ -1293,16 +1292,11 @@ static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
                }
        }
 
-       /* remove all VLAN filters */
+       /* disable all VLAN filters */
        list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
-                                list) {
-               if (vlf->add) {
-                       list_del(&vlf->list);
-                       kfree(vlf);
-               } else {
-                       vlf->remove = true;
-               }
-       }
+                                list)
+               vlf->state = IAVF_VLAN_DISABLE;
+
        spin_unlock_bh(&adapter->mac_vlan_list_lock);
 }
 
@@ -2914,6 +2908,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
                list_del(&fv->list);
                kfree(fv);
        }
+       adapter->num_vlan_filters = 0;
 
        spin_unlock_bh(&adapter->mac_vlan_list_lock);
 
@@ -3131,9 +3126,6 @@ continue_reset:
        adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
        iavf_misc_irq_enable(adapter);
 
-       bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
-       bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
-
        mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
 
        /* We were running when the reset started, so we need to restore some
index 4e17d00..9afbbda 100644 (file)
@@ -642,16 +642,10 @@ static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
 
        spin_lock_bh(&adapter->mac_vlan_list_lock);
        list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
-               if (f->is_new_vlan) {
-                       if (f->vlan.tpid == ETH_P_8021Q)
-                               clear_bit(f->vlan.vid,
-                                         adapter->vsi.active_cvlans);
-                       else
-                               clear_bit(f->vlan.vid,
-                                         adapter->vsi.active_svlans);
-
+               if (f->state == IAVF_VLAN_IS_NEW) {
                        list_del(&f->list);
                        kfree(f);
+                       adapter->num_vlan_filters--;
                }
        }
        spin_unlock_bh(&adapter->mac_vlan_list_lock);
@@ -679,7 +673,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
        spin_lock_bh(&adapter->mac_vlan_list_lock);
 
        list_for_each_entry(f, &adapter->vlan_filter_list, list) {
-               if (f->add)
+               if (f->state == IAVF_VLAN_ADD)
                        count++;
        }
        if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
@@ -710,11 +704,10 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
                vvfl->vsi_id = adapter->vsi_res->vsi_id;
                vvfl->num_elements = count;
                list_for_each_entry(f, &adapter->vlan_filter_list, list) {
-                       if (f->add) {
+                       if (f->state == IAVF_VLAN_ADD) {
                                vvfl->vlan_id[i] = f->vlan.vid;
                                i++;
-                               f->add = false;
-                               f->is_new_vlan = true;
+                               f->state = IAVF_VLAN_IS_NEW;
                                if (i == count)
                                        break;
                        }
@@ -760,7 +753,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
                vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
                vvfl_v2->num_elements = count;
                list_for_each_entry(f, &adapter->vlan_filter_list, list) {
-                       if (f->add) {
+                       if (f->state == IAVF_VLAN_ADD) {
                                struct virtchnl_vlan_supported_caps *filtering_support =
                                        &adapter->vlan_v2_caps.filtering.filtering_support;
                                struct virtchnl_vlan *vlan;
@@ -778,8 +771,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
                                vlan->tpid = f->vlan.tpid;
 
                                i++;
-                               f->add = false;
-                               f->is_new_vlan = true;
+                               f->state = IAVF_VLAN_IS_NEW;
                        }
                }
 
@@ -822,10 +814,16 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
                 * filters marked for removal to enable bailing out before
                 * sending a virtchnl message
                 */
-               if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) {
+               if (f->state == IAVF_VLAN_REMOVE &&
+                   !VLAN_FILTERING_ALLOWED(adapter)) {
                        list_del(&f->list);
                        kfree(f);
-               } else if (f->remove) {
+                       adapter->num_vlan_filters--;
+               } else if (f->state == IAVF_VLAN_DISABLE &&
+                   !VLAN_FILTERING_ALLOWED(adapter)) {
+                       f->state = IAVF_VLAN_INACTIVE;
+               } else if (f->state == IAVF_VLAN_REMOVE ||
+                          f->state == IAVF_VLAN_DISABLE) {
                        count++;
                }
        }
@@ -857,11 +855,18 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
                vvfl->vsi_id = adapter->vsi_res->vsi_id;
                vvfl->num_elements = count;
                list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
-                       if (f->remove) {
+                       if (f->state == IAVF_VLAN_DISABLE) {
                                vvfl->vlan_id[i] = f->vlan.vid;
+                               f->state = IAVF_VLAN_INACTIVE;
                                i++;
+                               if (i == count)
+                                       break;
+                       } else if (f->state == IAVF_VLAN_REMOVE) {
+                               vvfl->vlan_id[i] = f->vlan.vid;
                                list_del(&f->list);
                                kfree(f);
+                               adapter->num_vlan_filters--;
+                               i++;
                                if (i == count)
                                        break;
                        }
@@ -901,7 +906,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
                vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
                vvfl_v2->num_elements = count;
                list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
-                       if (f->remove) {
+                       if (f->state == IAVF_VLAN_DISABLE ||
+                           f->state == IAVF_VLAN_REMOVE) {
                                struct virtchnl_vlan_supported_caps *filtering_support =
                                        &adapter->vlan_v2_caps.filtering.filtering_support;
                                struct virtchnl_vlan *vlan;
@@ -915,8 +921,13 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
                                vlan->tci = f->vlan.vid;
                                vlan->tpid = f->vlan.tpid;
 
-                               list_del(&f->list);
-                               kfree(f);
+                               if (f->state == IAVF_VLAN_DISABLE) {
+                                       f->state = IAVF_VLAN_INACTIVE;
+                               } else {
+                                       list_del(&f->list);
+                                       kfree(f);
+                                       adapter->num_vlan_filters--;
+                               }
                                i++;
                                if (i == count)
                                        break;
@@ -2192,7 +2203,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                                list_for_each_entry(vlf,
                                                    &adapter->vlan_filter_list,
                                                    list)
-                                       vlf->add = true;
+                                       vlf->state = IAVF_VLAN_ADD;
 
                                adapter->aq_required |=
                                        IAVF_FLAG_AQ_ADD_VLAN_FILTER;
@@ -2260,7 +2271,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                                list_for_each_entry(vlf,
                                                    &adapter->vlan_filter_list,
                                                    list)
-                                       vlf->add = true;
+                                       vlf->state = IAVF_VLAN_ADD;
 
                                aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
                        }
@@ -2444,15 +2455,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
 
                spin_lock_bh(&adapter->mac_vlan_list_lock);
                list_for_each_entry(f, &adapter->vlan_filter_list, list) {
-                       if (f->is_new_vlan) {
-                               f->is_new_vlan = false;
-                               if (f->vlan.tpid == ETH_P_8021Q)
-                                       set_bit(f->vlan.vid,
-                                               adapter->vsi.active_cvlans);
-                               else
-                                       set_bit(f->vlan.vid,
-                                               adapter->vsi.active_svlans);
-                       }
+                       if (f->state == IAVF_VLAN_IS_NEW)
+                               f->state = IAVF_VLAN_ACTIVE;
                }
                spin_unlock_bh(&adapter->mac_vlan_list_lock);
                }
index 4eca8d1..b7682de 100644 (file)
@@ -2788,7 +2788,7 @@ static int
 ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
                           u16 vsi_handle, unsigned long *tc_bitmap)
 {
-       struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
+       struct ice_sched_agg_vsi_info *agg_vsi_info, *iter, *old_agg_vsi_info = NULL;
        struct ice_sched_agg_info *agg_info, *old_agg_info;
        struct ice_hw *hw = pi->hw;
        int status = 0;
@@ -2806,11 +2806,13 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
        if (old_agg_info && old_agg_info != agg_info) {
                struct ice_sched_agg_vsi_info *vtmp;
 
-               list_for_each_entry_safe(old_agg_vsi_info, vtmp,
+               list_for_each_entry_safe(iter, vtmp,
                                         &old_agg_info->agg_vsi_list,
                                         list_entry)
-                       if (old_agg_vsi_info->vsi_handle == vsi_handle)
+                       if (iter->vsi_handle == vsi_handle) {
+                               old_agg_vsi_info = iter;
                                break;
+                       }
        }
 
        /* check if entry already exist */
index 61f844d..46b3685 100644 (file)
@@ -1780,18 +1780,36 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
 int
 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
 {
-       struct ice_vsi_ctx *ctx;
+       struct ice_vsi_ctx *ctx, *cached_ctx;
+       int status;
+
+       cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+       if (!cached_ctx)
+               return -ENOENT;
 
-       ctx = ice_get_vsi_ctx(hw, vsi_handle);
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
-               return -EIO;
+               return -ENOMEM;
+
+       ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss;
+       ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc;
+       ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags;
+
+       ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
 
        if (enable)
                ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
        else
                ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
 
-       return ice_update_vsi(hw, vsi_handle, ctx, NULL);
+       status = ice_update_vsi(hw, vsi_handle, ctx, NULL);
+       if (!status) {
+               cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags;
+               cached_ctx->info.valid_sections |= ctx->info.valid_sections;
+       }
+
+       kfree(ctx);
+       return status;
 }
 
 /**
index b61dd9f..4fcf2d0 100644 (file)
@@ -938,6 +938,7 @@ ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
  * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
  * @rx_ring: Rx descriptor ring to transact packets on
  * @size: size of buffer to add to skb
+ * @ntc: index of next to clean element
  *
  * This function will pull an Rx buffer from the ring and synchronize it
  * for use by the CPU.
@@ -1026,7 +1027,6 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
 /**
  * ice_construct_skb - Allocate skb and populate it
  * @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buf: Rx buffer to pull data from
  * @xdp: xdp_buff pointing to the data
  *
  * This function allocates an skb. It then populates it with the page
index 7bc5aa3..c8322fb 100644 (file)
@@ -438,6 +438,7 @@ busy:
  * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
  * @xdp_ring: XDP ring
  * @xdp_res: Result of the receive batch
+ * @first_idx: index to write from caller
  *
  * This function bumps XDP Tx tail and/or flush redirect map, and
  * should be called when a batch of packets has been processed in the
index e6ef6b3..daa6a1e 100644 (file)
@@ -542,6 +542,87 @@ static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
 }
 
 /**
+ * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
+ * @fdir: pointer to the VF FDIR structure
+ */
+static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
+{
+       enum ice_fltr_ptype flow;
+
+       for (flow = ICE_FLTR_PTYPE_NONF_NONE;
+            flow < ICE_FLTR_PTYPE_MAX; flow++) {
+               fdir->fdir_fltr_cnt[flow][0] = 0;
+               fdir->fdir_fltr_cnt[flow][1] = 0;
+       }
+}
+
+/**
+ * ice_vc_fdir_has_prof_conflict
+ * @vf: pointer to the VF structure
+ * @conf: FDIR configuration for each filter
+ *
+ * Check if @conf has conflicting profile with existing profiles
+ *
+ * Return: true on success, and false on error.
+ */
+static bool
+ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
+                             struct virtchnl_fdir_fltr_conf *conf)
+{
+       struct ice_fdir_fltr *desc;
+
+       list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
+               struct virtchnl_fdir_fltr_conf *existing_conf;
+               enum ice_fltr_ptype flow_type_a, flow_type_b;
+               struct ice_fdir_fltr *a, *b;
+
+               existing_conf = to_fltr_conf_from_desc(desc);
+               a = &existing_conf->input;
+               b = &conf->input;
+               flow_type_a = a->flow_type;
+               flow_type_b = b->flow_type;
+
+               /* No need to compare two rules with different tunnel types or
+                * with the same protocol type.
+                */
+               if (existing_conf->ttype != conf->ttype ||
+                   flow_type_a == flow_type_b)
+                       continue;
+
+               switch (flow_type_a) {
+               case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+               case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+               case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+                       if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
+                               return true;
+                       break;
+               case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+                       if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
+                           flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+                           flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
+                               return true;
+                       break;
+               case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+               case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+               case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+                       if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
+                               return true;
+                       break;
+               case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+                       if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
+                           flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
+                           flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
+                               return true;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return false;
+}
+
+/**
  * ice_vc_fdir_write_flow_prof
  * @vf: pointer to the VF structure
  * @flow: filter flow type
@@ -677,6 +758,13 @@ ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
        enum ice_fltr_ptype flow;
        int ret;
 
+       ret = ice_vc_fdir_has_prof_conflict(vf, conf);
+       if (ret) {
+               dev_dbg(dev, "Found flow profile conflict for VF %d\n",
+                       vf->vf_id);
+               return ret;
+       }
+
        flow = input->flow_type;
        ret = ice_vc_fdir_alloc_prof(vf, flow);
        if (ret) {
@@ -1798,7 +1886,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
                v_ret = VIRTCHNL_STATUS_SUCCESS;
                stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
                dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
-               goto err_free_conf;
+               goto err_rem_entry;
        }
 
        ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
@@ -1807,15 +1895,16 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
                stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
                dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
                        vf->vf_id, ret);
-               goto err_rem_entry;
+               goto err_clr_irq;
        }
 
 exit:
        kfree(stat);
        return ret;
 
-err_rem_entry:
+err_clr_irq:
        ice_vc_fdir_clear_irq_ctx(vf);
+err_rem_entry:
        ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
 err_free_conf:
        devm_kfree(dev, conf);
@@ -1924,6 +2013,7 @@ void ice_vf_fdir_init(struct ice_vf *vf)
        spin_lock_init(&fdir->ctx_lock);
        fdir->ctx_irq.flags = 0;
        fdir->ctx_done.flags = 0;
+       ice_vc_fdir_reset_cnt_all(fdir);
 }
 
 /**
index 0e39d19..2cad76d 100644 (file)
@@ -3549,6 +3549,8 @@ static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
 
        netdev_tx_reset_queue(nq);
 
+       txq->buf               = NULL;
+       txq->tso_hdrs          = NULL;
        txq->descs             = NULL;
        txq->last_desc         = 0;
        txq->next_desc_to_proc = 0;
index 41d935d..40aeaa7 100644 (file)
@@ -62,35 +62,38 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
        MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
                       MVPP22_CLS_HEK_IP4_2T,
                       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
-                      MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
                       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
        MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
                       MVPP22_CLS_HEK_IP4_2T,
                       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
-                      MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
                       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
        MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
                       MVPP22_CLS_HEK_IP4_2T,
                       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
-                      MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
                       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
        /* TCP over IPv4 flows, fragmented, with vlan tag */
        MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
                       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
-                      MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+                          MVPP2_PRS_RI_L4_TCP,
                       MVPP2_PRS_IP_MASK),
 
        MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
                       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
-                      MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+                          MVPP2_PRS_RI_L4_TCP,
                       MVPP2_PRS_IP_MASK),
 
        MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
                       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
-                      MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
+                      MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
+                          MVPP2_PRS_RI_L4_TCP,
                       MVPP2_PRS_IP_MASK),
 
        /* UDP over IPv4 flows, Not fragmented, no vlan tag */
@@ -132,35 +135,38 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
        MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
                       MVPP22_CLS_HEK_IP4_2T,
                       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
-                      MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
                       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
        MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
                       MVPP22_CLS_HEK_IP4_2T,
                       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
-                      MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
                       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
        MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
                       MVPP22_CLS_HEK_IP4_2T,
                       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
-                      MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
                       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
 
        /* UDP over IPv4 flows, fragmented, with vlan tag */
        MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
                       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
-                      MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+                          MVPP2_PRS_RI_L4_UDP,
                       MVPP2_PRS_IP_MASK),
 
        MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
                       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
-                      MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+                          MVPP2_PRS_RI_L4_UDP,
                       MVPP2_PRS_IP_MASK),
 
        MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
                       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
-                      MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
+                      MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
+                          MVPP2_PRS_RI_L4_UDP,
                       MVPP2_PRS_IP_MASK),
 
        /* TCP over IPv6 flows, not fragmented, no vlan tag */
index 75ba57b..9af22f4 100644 (file)
@@ -1539,8 +1539,8 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
        if (!priv->prs_double_vlans)
                return -ENOMEM;
 
-       /* Double VLAN: 0x8100, 0x88A8 */
-       err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
+       /* Double VLAN: 0x88A8, 0x8100 */
+       err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q,
                                        MVPP2_PRS_PORT_MASK);
        if (err)
                return err;
@@ -1607,59 +1607,45 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
 {
        struct mvpp2_prs_entry pe;
-       int tid;
-
-       /* IPv4 over PPPoE with options */
-       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
-                                       MVPP2_PE_LAST_FREE_TID);
-       if (tid < 0)
-               return tid;
-
-       memset(&pe, 0, sizeof(pe));
-       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
-       pe.index = tid;
-
-       mvpp2_prs_match_etype(&pe, 0, PPP_IP);
-
-       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
-       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
-                                MVPP2_PRS_RI_L3_PROTO_MASK);
-       /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
-       mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
-                                sizeof(struct iphdr) - 4,
-                                MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
-       /* Set L3 offset */
-       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
-                                 MVPP2_ETH_TYPE_LEN,
-                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
-
-       /* Update shadow table and hw entry */
-       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
-       mvpp2_prs_hw_write(priv, &pe);
+       int tid, ihl;
 
-       /* IPv4 over PPPoE without options */
-       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
-                                       MVPP2_PE_LAST_FREE_TID);
-       if (tid < 0)
-               return tid;
+       /* IPv4 over PPPoE with header length >= 5 */
+       for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
+               tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                               MVPP2_PE_LAST_FREE_TID);
+               if (tid < 0)
+                       return tid;
 
-       pe.index = tid;
+               memset(&pe, 0, sizeof(pe));
+               mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+               pe.index = tid;
 
-       mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
-                                    MVPP2_PRS_IPV4_HEAD |
-                                    MVPP2_PRS_IPV4_IHL_MIN,
-                                    MVPP2_PRS_IPV4_HEAD_MASK |
-                                    MVPP2_PRS_IPV4_IHL_MASK);
+               mvpp2_prs_match_etype(&pe, 0, PPP_IP);
+               mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+                                            MVPP2_PRS_IPV4_HEAD | ihl,
+                                            MVPP2_PRS_IPV4_HEAD_MASK |
+                                            MVPP2_PRS_IPV4_IHL_MASK);
 
-       /* Clear ri before updating */
-       pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
-       pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
-       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
-                                MVPP2_PRS_RI_L3_PROTO_MASK);
+               mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+               mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+                                        MVPP2_PRS_RI_L3_PROTO_MASK);
+               /* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
+               mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+                                        sizeof(struct iphdr) - 4,
+                                        MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+               /* Set L3 offset */
+               mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+                                         MVPP2_ETH_TYPE_LEN,
+                                         MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+               /* Set L4 offset */
+               mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+                                         MVPP2_ETH_TYPE_LEN + (ihl * 4),
+                                         MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
 
-       /* Update shadow table and hw entry */
-       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
-       mvpp2_prs_hw_write(priv, &pe);
+               /* Update shadow table and hw entry */
+               mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+               mvpp2_prs_hw_write(priv, &pe);
+       }
 
        /* IPv6 over PPPoE */
        tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
index 3cb4362..e14050e 100644 (file)
@@ -753,6 +753,7 @@ static void mtk_mac_link_up(struct phylink_config *config,
                 MAC_MCR_FORCE_RX_FC);
 
        /* Configure speed */
+       mac->speed = speed;
        switch (speed) {
        case SPEED_2500:
        case SPEED_1000:
@@ -763,8 +764,6 @@ static void mtk_mac_link_up(struct phylink_config *config,
                break;
        }
 
-       mtk_set_queue_speed(mac->hw, mac->id, speed);
-
        /* Configure duplex */
        if (duplex == DUPLEX_FULL)
                mcr |= MAC_MCR_FORCE_DPX;
@@ -2059,9 +2058,6 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                        skb_checksum_none_assert(skb);
                skb->protocol = eth_type_trans(skb, netdev);
 
-               if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
-                       mtk_ppe_check_skb(eth->ppe[0], skb, hash);
-
                if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
                        if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
                                if (trxd.rxd3 & RX_DMA_VTAG_V2) {
@@ -2089,6 +2085,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                        __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci);
                }
 
+               if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+                       mtk_ppe_check_skb(eth->ppe[0], skb, hash);
+
                skb_record_rx_queue(skb, 0);
                napi_gro_receive(napi, skb);
 
@@ -3237,6 +3236,9 @@ found:
        if (dp->index >= MTK_QDMA_NUM_QUEUES)
                return NOTIFY_DONE;
 
+       if (mac->speed > 0 && mac->speed <= s.base.speed)
+               s.base.speed = 0;
+
        mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
 
        return NOTIFY_DONE;
index 6883eb3..fd07d6e 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/platform_device.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
+#include <net/dst_metadata.h>
 #include <net/dsa.h>
 #include "mtk_eth_soc.h"
 #include "mtk_ppe.h"
@@ -458,6 +459,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
                hwe->ib1 &= ~MTK_FOE_IB1_STATE;
                hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
                dma_wmb();
+               mtk_ppe_cache_clear(ppe);
        }
        entry->hash = 0xffff;
 
@@ -699,7 +701,9 @@ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
                    skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
                        goto out;
 
-               tag += 4;
+               if (!skb_metadata_dst(skb))
+                       tag += 4;
+
                if (get_unaligned_be16(tag) != ETH_P_8021Q)
                        break;
 
index 81afd5e..161751b 100644 (file)
@@ -576,6 +576,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
                if (IS_ERR(block_cb))
                        return PTR_ERR(block_cb);
 
+               flow_block_cb_incref(block_cb);
                flow_block_cb_add(block_cb, f);
                list_add_tail(&block_cb->driver_list, &block_cb_list);
                return 0;
@@ -584,7 +585,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
                if (!block_cb)
                        return -ENOENT;
 
-               if (flow_block_cb_decref(block_cb)) {
+               if (!flow_block_cb_decref(block_cb)) {
                        flow_block_cb_remove(block_cb, f);
                        list_del(&block_cb->driver_list);
                }
index 4b5e459..332472f 100644 (file)
@@ -681,14 +681,32 @@ int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
        return 0;
 }
 
-int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+                       enum xdp_rss_hash_type *rss_type)
 {
        struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
+       struct mlx4_cqe *cqe = _ctx->cqe;
+       enum xdp_rss_hash_type xht = 0;
+       __be16 status;
 
        if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH)))
                return -ENODATA;
 
-       *hash = be32_to_cpu(_ctx->cqe->immed_rss_invalid);
+       *hash = be32_to_cpu(cqe->immed_rss_invalid);
+       status = cqe->status;
+       if (status & cpu_to_be16(MLX4_CQE_STATUS_TCP))
+               xht = XDP_RSS_L4_TCP;
+       if (status & cpu_to_be16(MLX4_CQE_STATUS_UDP))
+               xht = XDP_RSS_L4_UDP;
+       if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV4F))
+               xht |= XDP_RSS_L3_IPV4;
+       if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) {
+               xht |= XDP_RSS_L3_IPV6;
+               if (cqe->ipv6_ext_mask)
+                       xht |= XDP_RSS_L3_DYNHDR;
+       }
+       *rss_type = xht;
+
        return 0;
 }
 
index 544e09b..4ac4d88 100644 (file)
@@ -798,7 +798,8 @@ int mlx4_en_netdev_event(struct notifier_block *this,
 
 struct xdp_md;
 int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp);
-int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash);
+int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+                       enum xdp_rss_hash_type *rss_type);
 
 /*
  * Functions for time stamping
index c5dae48..d9d3b9e 100644 (file)
@@ -34,6 +34,7 @@
 #include <net/xdp_sock_drv.h>
 #include "en/xdp.h"
 #include "en/params.h"
+#include <linux/bitfield.h>
 
 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
 {
@@ -169,14 +170,72 @@ static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
        return 0;
 }
 
-static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+/* Mapping HW RSS Type bits CQE_RSS_HTYPE_IP + CQE_RSS_HTYPE_L4 into 4-bits*/
+#define RSS_TYPE_MAX_TABLE     16 /* 4-bits max 16 entries */
+#define RSS_L4         GENMASK(1, 0)
+#define RSS_L3         GENMASK(3, 2) /* Same as CQE_RSS_HTYPE_IP */
+
+/* Valid combinations of CQE_RSS_HTYPE_IP + CQE_RSS_HTYPE_L4 sorted numerical */
+enum mlx5_rss_hash_type {
+       RSS_TYPE_NO_HASH        = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IP_NONE) |
+                                  FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)),
+       RSS_TYPE_L3_IPV4        = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
+                                  FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)),
+       RSS_TYPE_L4_IPV4_TCP    = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
+                                  FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_TCP)),
+       RSS_TYPE_L4_IPV4_UDP    = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
+                                  FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_UDP)),
+       RSS_TYPE_L4_IPV4_IPSEC  = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) |
+                                  FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_IPSEC)),
+       RSS_TYPE_L3_IPV6        = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
+                                  FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)),
+       RSS_TYPE_L4_IPV6_TCP    = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
+                                  FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_TCP)),
+       RSS_TYPE_L4_IPV6_UDP    = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
+                                  FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_UDP)),
+       RSS_TYPE_L4_IPV6_IPSEC  = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) |
+                                  FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_IPSEC)),
+};
+
+/* Invalid combinations will simply return zero, allows no boundary checks */
+static const enum xdp_rss_hash_type mlx5_xdp_rss_type[RSS_TYPE_MAX_TABLE] = {
+       [RSS_TYPE_NO_HASH]       = XDP_RSS_TYPE_NONE,
+       [1]                      = XDP_RSS_TYPE_NONE, /* Implicit zero */
+       [2]                      = XDP_RSS_TYPE_NONE, /* Implicit zero */
+       [3]                      = XDP_RSS_TYPE_NONE, /* Implicit zero */
+       [RSS_TYPE_L3_IPV4]       = XDP_RSS_TYPE_L3_IPV4,
+       [RSS_TYPE_L4_IPV4_TCP]   = XDP_RSS_TYPE_L4_IPV4_TCP,
+       [RSS_TYPE_L4_IPV4_UDP]   = XDP_RSS_TYPE_L4_IPV4_UDP,
+       [RSS_TYPE_L4_IPV4_IPSEC] = XDP_RSS_TYPE_L4_IPV4_IPSEC,
+       [RSS_TYPE_L3_IPV6]       = XDP_RSS_TYPE_L3_IPV6,
+       [RSS_TYPE_L4_IPV6_TCP]   = XDP_RSS_TYPE_L4_IPV6_TCP,
+       [RSS_TYPE_L4_IPV6_UDP]   = XDP_RSS_TYPE_L4_IPV6_UDP,
+       [RSS_TYPE_L4_IPV6_IPSEC] = XDP_RSS_TYPE_L4_IPV6_IPSEC,
+       [12]                     = XDP_RSS_TYPE_NONE, /* Implicit zero */
+       [13]                     = XDP_RSS_TYPE_NONE, /* Implicit zero */
+       [14]                     = XDP_RSS_TYPE_NONE, /* Implicit zero */
+       [15]                     = XDP_RSS_TYPE_NONE, /* Implicit zero */
+};
+
+static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+                            enum xdp_rss_hash_type *rss_type)
 {
        const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
+       const struct mlx5_cqe64 *cqe = _ctx->cqe;
+       u32 hash_type, l4_type, ip_type, lookup;
 
        if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)))
                return -ENODATA;
 
-       *hash = be32_to_cpu(_ctx->cqe->rss_hash_result);
+       *hash = be32_to_cpu(cqe->rss_hash_result);
+
+       hash_type = cqe->rss_hash_type;
+       BUILD_BUG_ON(CQE_RSS_HTYPE_IP != RSS_L3); /* same mask */
+       ip_type = hash_type & CQE_RSS_HTYPE_IP;
+       l4_type = FIELD_GET(CQE_RSS_HTYPE_L4, hash_type);
+       lookup = ip_type | l4_type;
+       *rss_type = mlx5_xdp_rss_type[lookup];
+
        return 0;
 }
 
index 87f76ba..eb827b8 100644 (file)
@@ -628,7 +628,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
        int i, err, ring;
 
        if (dev->flags & QLCNIC_NEED_FLR) {
-               pci_reset_function(dev->pdev);
+               err = pci_reset_function(dev->pdev);
+               if (err) {
+                       dev_err(&dev->pdev->dev,
+                               "Adapter reset failed (%d). Please reboot\n",
+                               err);
+                       return err;
+               }
                dev->flags &= ~QLCNIC_NEED_FLR;
        }
 
index 930496c..b50f167 100644 (file)
@@ -826,6 +826,9 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp,
        /* disable phy pfm mode */
        phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
 
+       /* disable 10m pll off */
+       phy_modify_paged(phydev, 0x0a43, 0x10, BIT(0), 0);
+
        rtl8168g_disable_aldps(phydev);
        rtl8168g_config_eee_phy(phydev);
 }
index 7022fb2..d30459d 100644 (file)
@@ -1304,7 +1304,8 @@ static void efx_ef10_fini_nic(struct efx_nic *efx)
 static int efx_ef10_init_nic(struct efx_nic *efx)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
-       netdev_features_t hw_enc_features = 0;
+       struct net_device *net_dev = efx->net_dev;
+       netdev_features_t tun_feats, tso_feats;
        int rc;
 
        if (nic_data->must_check_datapath_caps) {
@@ -1349,20 +1350,30 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
                nic_data->must_restore_piobufs = false;
        }
 
-       /* add encapsulated checksum offload features */
+       /* encap features might change during reset if fw variant changed */
        if (efx_has_cap(efx, VXLAN_NVGRE) && !efx_ef10_is_vf(efx))
-               hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-       /* add encapsulated TSO features */
-       if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) {
-               netdev_features_t encap_tso_features;
+               net_dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+       else
+               net_dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
 
-               encap_tso_features = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
-                       NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM;
+       tun_feats = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+                   NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM;
+       tso_feats = NETIF_F_TSO | NETIF_F_TSO6;
 
-               hw_enc_features |= encap_tso_features | NETIF_F_TSO;
-               efx->net_dev->features |= encap_tso_features;
+       if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) {
+               /* If this is first nic_init, or if it is a reset and a new fw
+                * variant has added new features, enable them by default.
+                * If the features are not new, maintain their current value.
+                */
+               if (!(net_dev->hw_features & tun_feats))
+                       net_dev->features |= tun_feats;
+               net_dev->hw_enc_features |= tun_feats | tso_feats;
+               net_dev->hw_features |= tun_feats;
+       } else {
+               net_dev->hw_enc_features &= ~(tun_feats | tso_feats);
+               net_dev->hw_features &= ~tun_feats;
+               net_dev->features &= ~tun_feats;
        }
-       efx->net_dev->hw_enc_features = hw_enc_features;
 
        /* don't fail init if RSS setup doesn't work */
        rc = efx->type->rx_push_rss_config(efx, false,
@@ -4021,7 +4032,10 @@ static unsigned int efx_ef10_recycle_ring_size(const struct efx_nic *efx)
         NETIF_F_HW_VLAN_CTAG_FILTER |  \
         NETIF_F_IPV6_CSUM |            \
         NETIF_F_RXHASH |               \
-        NETIF_F_NTUPLE)
+        NETIF_F_NTUPLE |               \
+        NETIF_F_SG |                   \
+        NETIF_F_RXCSUM |               \
+        NETIF_F_RXALL)
 
 const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
        .is_vf = true,
index 02c2ade..884d8d1 100644 (file)
@@ -1001,21 +1001,18 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
        }
 
        /* Determine netdevice features */
-       net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
-                             NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
-       if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) {
-               net_dev->features |= NETIF_F_TSO6;
-               if (efx_has_cap(efx, TX_TSO_V2_ENCAP))
-                       net_dev->hw_enc_features |= NETIF_F_TSO6;
-       }
-       /* Check whether device supports TSO */
-       if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
-               net_dev->features &= ~NETIF_F_ALL_TSO;
+       net_dev->features |= efx->type->offload_features;
+
+       /* Add TSO features */
+       if (efx->type->tso_versions && efx->type->tso_versions(efx))
+               net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+
        /* Mask for features that also apply to VLAN devices */
        net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
                                   NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
                                   NETIF_F_RXCSUM);
 
+       /* Determine user configurable features */
        net_dev->hw_features |= net_dev->features & ~efx->fixed_features;
 
        /* Disable receiving frames with bad FCS, by default. */
index a2e5119..a690d13 100644 (file)
@@ -1037,8 +1037,6 @@ static int smsc911x_mii_probe(struct net_device *dev)
                return ret;
        }
 
-       /* Indicate that the MAC is responsible for managing PHY PM */
-       phydev->mac_managed_pm = true;
        phy_attached_info(phydev);
 
        phy_set_max_speed(phydev, SPEED_100);
@@ -1066,6 +1064,7 @@ static int smsc911x_mii_init(struct platform_device *pdev,
                             struct net_device *dev)
 {
        struct smsc911x_data *pdata = netdev_priv(dev);
+       struct phy_device *phydev;
        int err = -ENXIO;
 
        pdata->mii_bus = mdiobus_alloc();
@@ -1108,6 +1107,10 @@ static int smsc911x_mii_init(struct platform_device *pdev,
                goto err_out_free_bus_2;
        }
 
+       phydev = phy_find_first(pdata->mii_bus);
+       if (phydev)
+               phydev->mac_managed_pm = true;
+
        return 0;
 
 err_out_free_bus_2:
index ec9c130..54bb072 100644 (file)
@@ -532,7 +532,6 @@ struct mac_device_info {
        unsigned int xlgmac;
        unsigned int num_vlan;
        u32 vlan_filter[32];
-       unsigned int promisc;
        bool vlan_fail_q_en;
        u8 vlan_fail_q;
 };
index 13aa919..ab9f876 100644 (file)
@@ -251,7 +251,6 @@ static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
                priv->plat->mdio_bus_data->xpcs_an_inband = false;
        } else {
                priv->plat->max_speed = 1000;
-               priv->plat->mdio_bus_data->xpcs_an_inband = true;
        }
 }
 
index 8c7a0b7..36251ec 100644 (file)
@@ -472,12 +472,6 @@ static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
        if (vid > 4095)
                return -EINVAL;
 
-       if (hw->promisc) {
-               netdev_err(dev,
-                          "Adding VLAN in promisc mode not supported\n");
-               return -EPERM;
-       }
-
        /* Single Rx VLAN Filter */
        if (hw->num_vlan == 1) {
                /* For single VLAN filter, VID 0 means VLAN promiscuous */
@@ -527,12 +521,6 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
 {
        int i, ret = 0;
 
-       if (hw->promisc) {
-               netdev_err(dev,
-                          "Deleting VLAN in promisc mode not supported\n");
-               return -EPERM;
-       }
-
        /* Single Rx VLAN Filter */
        if (hw->num_vlan == 1) {
                if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
@@ -557,39 +545,6 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
        return ret;
 }
 
-static void dwmac4_vlan_promisc_enable(struct net_device *dev,
-                                      struct mac_device_info *hw)
-{
-       void __iomem *ioaddr = hw->pcsr;
-       u32 value;
-       u32 hash;
-       u32 val;
-       int i;
-
-       /* Single Rx VLAN Filter */
-       if (hw->num_vlan == 1) {
-               dwmac4_write_single_vlan(dev, 0);
-               return;
-       }
-
-       /* Extended Rx VLAN Filter Enable */
-       for (i = 0; i < hw->num_vlan; i++) {
-               if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
-                       val = hw->vlan_filter[i] & ~GMAC_VLAN_TAG_DATA_VEN;
-                       dwmac4_write_vlan_filter(dev, hw, i, val);
-               }
-       }
-
-       hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
-       if (hash & GMAC_VLAN_VLHT) {
-               value = readl(ioaddr + GMAC_VLAN_TAG);
-               if (value & GMAC_VLAN_VTHM) {
-                       value &= ~GMAC_VLAN_VTHM;
-                       writel(value, ioaddr + GMAC_VLAN_TAG);
-               }
-       }
-}
-
 static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
                                           struct mac_device_info *hw)
 {
@@ -709,22 +664,12 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
        }
 
        /* VLAN filtering */
-       if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+       if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en)
+               value &= ~GMAC_PACKET_FILTER_VTFE;
+       else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
                value |= GMAC_PACKET_FILTER_VTFE;
 
        writel(value, ioaddr + GMAC_PACKET_FILTER);
-
-       if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) {
-               if (!hw->promisc) {
-                       hw->promisc = 1;
-                       dwmac4_vlan_promisc_enable(dev, hw);
-               }
-       } else {
-               if (hw->promisc) {
-                       hw->promisc = 0;
-                       dwmac4_restore_hw_vlan_rx_fltr(dev, hw);
-               }
-       }
 }
 
 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
index 17310ad..d7fcab0 100644 (file)
@@ -1134,20 +1134,26 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
 static int stmmac_init_phy(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
+       struct fwnode_handle *phy_fwnode;
        struct fwnode_handle *fwnode;
        int ret;
 
+       if (!phylink_expects_phy(priv->phylink))
+               return 0;
+
        fwnode = of_fwnode_handle(priv->plat->phylink_node);
        if (!fwnode)
                fwnode = dev_fwnode(priv->device);
 
        if (fwnode)
-               ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
+               phy_fwnode = fwnode_get_phy_node(fwnode);
+       else
+               phy_fwnode = NULL;
 
        /* Some DT bindings do not set-up the PHY handle. Let's try to
         * manually parse it
         */
-       if (!fwnode || ret) {
+       if (!phy_fwnode || IS_ERR(phy_fwnode)) {
                int addr = priv->plat->phy_addr;
                struct phy_device *phydev;
 
@@ -1163,6 +1169,9 @@ static int stmmac_init_phy(struct net_device *dev)
                }
 
                ret = phylink_connect_phy(priv->phylink, phydev);
+       } else {
+               fwnode_handle_put(phy_fwnode);
+               ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
        }
 
        if (!priv->plat->pmt) {
@@ -6622,6 +6631,8 @@ int stmmac_xdp_open(struct net_device *dev)
                goto init_error;
        }
 
+       stmmac_reset_queues_param(priv);
+
        /* DMA CSR Channel configuration */
        for (chan = 0; chan < dma_csr_ch; chan++) {
                stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
@@ -6948,7 +6959,7 @@ static void stmmac_napi_del(struct net_device *dev)
 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
-       int ret = 0;
+       int ret = 0, i;
 
        if (netif_running(dev))
                stmmac_release(dev);
@@ -6957,6 +6968,10 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
 
        priv->plat->rx_queues_to_use = rx_cnt;
        priv->plat->tx_queues_to_use = tx_cnt;
+       if (!netif_is_rxfh_configured(dev))
+               for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+                       priv->rss.table[i] = ethtool_rxfh_indir_default(i,
+                                                                       rx_cnt);
 
        stmmac_napi_add(dev);
 
index ab8b09a..7a2e767 100644 (file)
@@ -4522,7 +4522,7 @@ static int niu_alloc_channels(struct niu *np)
 
                err = niu_rbr_fill(np, rp, GFP_KERNEL);
                if (err)
-                       return err;
+                       goto out_err;
        }
 
        tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
index 4e3861c..bcea87b 100644 (file)
@@ -2926,7 +2926,8 @@ err_free_phylink:
        am65_cpsw_nuss_phylink_cleanup(common);
        am65_cpts_release(common->cpts);
 err_of_clear:
-       of_platform_device_destroy(common->mdio_dev, NULL);
+       if (common->mdio_dev)
+               of_platform_device_destroy(common->mdio_dev, NULL);
 err_pm_clear:
        pm_runtime_put_sync(dev);
        pm_runtime_disable(dev);
@@ -2956,7 +2957,8 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
        am65_cpts_release(common->cpts);
        am65_cpsw_disable_serdes_phy(common);
 
-       of_platform_device_destroy(common->mdio_dev, NULL);
+       if (common->mdio_dev)
+               of_platform_device_destroy(common->mdio_dev, NULL);
 
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
index 37f0b62..f9cd566 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/of.h>
 #include <linux/of_mdio.h>
 #include <linux/of_net.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
 #include <linux/if_vlan.h>
 #include <linux/kmemleak.h>
 #include <linux/sys_soc.h>
index 35128dd..c61e4e4 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/io.h>
 #include <linux/clk.h>
+#include <linux/platform_device.h>
 #include <linux/timer.h>
 #include <linux/module.h>
 #include <linux/irqreturn.h>
@@ -23,7 +24,7 @@
 #include <linux/of.h>
 #include <linux/of_mdio.h>
 #include <linux/of_net.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
 #include <linux/if_vlan.h>
 #include <linux/kmemleak.h>
 #include <linux/sys_soc.h>
index 77d8d7f..97e2c1e 100644 (file)
 #define WX_PX_INTA                   0x110
 #define WX_PX_GPIE                   0x118
 #define WX_PX_GPIE_MODEL             BIT(0)
-#define WX_PX_IC                     0x120
+#define WX_PX_IC(_i)                 (0x120 + (_i) * 4)
 #define WX_PX_IMS(_i)                (0x140 + (_i) * 4)
 #define WX_PX_IMC(_i)                (0x150 + (_i) * 4)
 #define WX_PX_ISB_ADDR_L             0x160
index 5b564d3..17412e5 100644 (file)
@@ -352,7 +352,7 @@ static void ngbe_up(struct wx *wx)
        netif_tx_start_all_queues(wx->netdev);
 
        /* clear any pending interrupts, may auto mask */
-       rd32(wx, WX_PX_IC);
+       rd32(wx, WX_PX_IC(0));
        rd32(wx, WX_PX_MISC_IC);
        ngbe_irq_enable(wx, true);
        if (wx->gpio_ctrl)
index 6c0a982..a58ce54 100644 (file)
@@ -229,7 +229,8 @@ static void txgbe_up_complete(struct wx *wx)
        wx_napi_enable_all(wx);
 
        /* clear any pending interrupts, may auto mask */
-       rd32(wx, WX_PX_IC);
+       rd32(wx, WX_PX_IC(0));
+       rd32(wx, WX_PX_IC(1));
        rd32(wx, WX_PX_MISC_IC);
        txgbe_irq_enable(wx, true);
 
index 0b0c6c0..d0b5129 100644 (file)
@@ -1902,10 +1902,9 @@ static int ca8210_skb_tx(
        struct ca8210_priv  *priv
 )
 {
-       int status;
        struct ieee802154_hdr header = { };
        struct secspec secspec;
-       unsigned int mac_len;
+       int mac_len, status;
 
        dev_dbg(&priv->spi->dev, "%s called\n", __func__);
 
index 0f52c06..ee6fb00 100644 (file)
@@ -156,7 +156,7 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
         * gsi_trans_pool_exit_dma() can assume the total allocated
         * size is exactly (count * size).
         */
-       total_size = get_order(total_size) << PAGE_SHIFT;
+       total_size = PAGE_SIZE << get_order(total_size);
 
        virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
        if (!virt)
index 7a28e08..d0c916a 100644 (file)
@@ -130,14 +130,10 @@ static u16 net_failover_select_queue(struct net_device *dev,
                        txq = ops->ndo_select_queue(primary_dev, skb, sb_dev);
                else
                        txq = netdev_pick_tx(primary_dev, skb, NULL);
-
-               qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
-
-               return txq;
+       } else {
+               txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
        }
 
-       txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
-
        /* Save the original txq to restore before passing to the driver */
        qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
 
index b4ff9c5..9ab5eff 100644 (file)
@@ -588,15 +588,13 @@ static int dp83869_of_init(struct phy_device *phydev)
                                                       &dp83869_internal_delay[0],
                                                       delay_size, true);
        if (dp83869->rx_int_delay < 0)
-               dp83869->rx_int_delay =
-                               dp83869_internal_delay[DP83869_CLK_DELAY_DEF];
+               dp83869->rx_int_delay = DP83869_CLK_DELAY_DEF;
 
        dp83869->tx_int_delay = phy_get_internal_delay(phydev, dev,
                                                       &dp83869_internal_delay[0],
                                                       delay_size, false);
        if (dp83869->tx_int_delay < 0)
-               dp83869->tx_int_delay =
-                               dp83869_internal_delay[DP83869_CLK_DELAY_DEF];
+               dp83869->tx_int_delay = DP83869_CLK_DELAY_DEF;
 
        return ret;
 }
index 2c84fcc..4e884e4 100644 (file)
@@ -4151,6 +4151,7 @@ static struct phy_driver ksphy_driver[] = {
        .resume         = kszphy_resume,
        .cable_test_start       = ksz9x31_cable_test_start,
        .cable_test_get_status  = ksz9x31_cable_test_get_status,
+       .get_features   = ksz9477_get_features,
 }, {
        .phy_id         = PHY_ID_KSZ8873MLL,
        .phy_id_mask    = MICREL_PHY_ID_MASK,
index 5813b07..029875a 100644 (file)
 #define MAX_ID_PS                      2260U
 #define DEFAULT_ID_PS                  2000U
 
-#define PPM_TO_SUBNS_INC(ppb)  div_u64(GENMASK(31, 0) * (ppb) * \
+#define PPM_TO_SUBNS_INC(ppb)  div_u64(GENMASK_ULL(31, 0) * (ppb) * \
                                        PTP_CLK_PERIOD_100BT1, NSEC_PER_SEC)
 
 #define NXP_C45_SKB_CB(skb)    ((struct nxp_c45_skb_cb *)(skb)->cb)
@@ -1337,6 +1337,17 @@ no_ptp_support:
        return ret;
 }
 
+static void nxp_c45_remove(struct phy_device *phydev)
+{
+       struct nxp_c45_phy *priv = phydev->priv;
+
+       if (priv->ptp_clock)
+               ptp_clock_unregister(priv->ptp_clock);
+
+       skb_queue_purge(&priv->tx_queue);
+       skb_queue_purge(&priv->rx_queue);
+}
+
 static struct phy_driver nxp_c45_driver[] = {
        {
                PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
@@ -1359,6 +1370,7 @@ static struct phy_driver nxp_c45_driver[] = {
                .set_loopback           = genphy_c45_loopback,
                .get_sqi                = nxp_c45_get_sqi,
                .get_sqi_max            = nxp_c45_get_sqi_max,
+               .remove                 = nxp_c45_remove,
        },
 };
 
index 1785f1c..1de3e33 100644 (file)
@@ -3057,7 +3057,7 @@ EXPORT_SYMBOL_GPL(device_phy_find_device);
  * and "phy-device" are not supported in ACPI. DT supports all the three
  * named references to the phy node.
  */
-struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode)
+struct fwnode_handle *fwnode_get_phy_node(const struct fwnode_handle *fwnode)
 {
        struct fwnode_handle *phy_node;
 
index 1a2f074..30c166b 100644 (file)
@@ -1586,6 +1586,25 @@ void phylink_destroy(struct phylink *pl)
 }
 EXPORT_SYMBOL_GPL(phylink_destroy);
 
+/**
+ * phylink_expects_phy() - Determine if phylink expects a phy to be attached
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * When using fixed-link mode, or in-band mode with 1000base-X or 2500base-X,
+ * no PHY is needed.
+ *
+ * Returns true if phylink will be expecting a PHY.
+ */
+bool phylink_expects_phy(struct phylink *pl)
+{
+       if (pl->cfg_link_an_mode == MLO_AN_FIXED ||
+           (pl->cfg_link_an_mode == MLO_AN_INBAND &&
+            phy_interface_mode_is_8023z(pl->link_config.interface)))
+               return false;
+       return true;
+}
+EXPORT_SYMBOL_GPL(phylink_expects_phy);
+
 static void phylink_phy_change(struct phy_device *phydev, bool up)
 {
        struct phylink *pl = phydev->phylink;
index daac293..9fc50fc 100644 (file)
@@ -17,7 +17,7 @@ struct sfp_bus {
        /* private: */
        struct kref kref;
        struct list_head node;
-       struct fwnode_handle *fwnode;
+       const struct fwnode_handle *fwnode;
 
        const struct sfp_socket_ops *socket_ops;
        struct device *sfp_dev;
@@ -390,7 +390,7 @@ static const struct sfp_upstream_ops *sfp_get_upstream_ops(struct sfp_bus *bus)
        return bus->registered ? bus->upstream_ops : NULL;
 }
 
-static struct sfp_bus *sfp_bus_get(struct fwnode_handle *fwnode)
+static struct sfp_bus *sfp_bus_get(const struct fwnode_handle *fwnode)
 {
        struct sfp_bus *sfp, *new, *found = NULL;
 
@@ -593,7 +593,7 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
  *     - %-ENOMEM if we failed to allocate the bus.
  *     - an error from the upstream's connect_phy() method.
  */
-struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
+struct sfp_bus *sfp_bus_find_fwnode(const struct fwnode_handle *fwnode)
 {
        struct fwnode_reference_args ref;
        struct sfp_bus *bus;
index fb98db6..bf34503 100644 (file)
@@ -210,6 +210,12 @@ static const enum gpiod_flags gpio_flags[] = {
 #define SFP_PHY_ADDR           22
 #define SFP_PHY_ADDR_ROLLBALL  17
 
+/* SFP_EEPROM_BLOCK_SIZE is the size of data chunk to read the EEPROM
+ * at a time. Some SFP modules and also some Linux I2C drivers do not like
+ * reads longer than 16 bytes.
+ */
+#define SFP_EEPROM_BLOCK_SIZE  16
+
 struct sff_data {
        unsigned int gpios;
        bool (*module_supported)(const struct sfp_eeprom_id *id);
@@ -387,6 +393,10 @@ static const struct sfp_quirk sfp_quirks[] = {
 
        SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),
 
+       // HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
+       // 2600MBd in their EERPOM
+       SFP_QUIRK_M("HG GENUINE", "MXPD-483II", sfp_quirk_2500basex),
+
        // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd NRZ in
        // their EEPROM
        SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
@@ -1925,11 +1935,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
        u8 check;
        int ret;
 
-       /* Some SFP modules and also some Linux I2C drivers do not like reads
-        * longer than 16 bytes, so read the EEPROM in chunks of 16 bytes at
-        * a time.
-        */
-       sfp->i2c_block_size = 16;
+       sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
 
        ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
        if (ret < 0) {
@@ -2481,6 +2487,9 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
        unsigned int first, last, len;
        int ret;
 
+       if (!(sfp->state & SFP_F_PRESENT))
+               return -ENODEV;
+
        if (ee->len == 0)
                return -EINVAL;
 
@@ -2513,6 +2522,9 @@ static int sfp_module_eeprom_by_page(struct sfp *sfp,
                                     const struct ethtool_module_eeprom *page,
                                     struct netlink_ext_ack *extack)
 {
+       if (!(sfp->state & SFP_F_PRESENT))
+               return -ENODEV;
+
        if (page->bank) {
                NL_SET_ERR_MSG(extack, "Banks not supported");
                return -EOPNOTSUPP;
@@ -2617,6 +2629,7 @@ static struct sfp *sfp_alloc(struct device *dev)
                return ERR_PTR(-ENOMEM);
 
        sfp->dev = dev;
+       sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
 
        mutex_init(&sfp->sm_mutex);
        mutex_init(&sfp->st_mutex);
index decb5ba..0fc4b95 100644 (file)
@@ -1943,7 +1943,7 @@ static struct rx_agg *alloc_rx_agg(struct r8152 *tp, gfp_t mflags)
        if (!rx_agg)
                return NULL;
 
-       rx_agg->page = alloc_pages(mflags | __GFP_COMP, order);
+       rx_agg->page = alloc_pages(mflags | __GFP_COMP | __GFP_NOWARN, order);
        if (!rx_agg->page)
                goto free_rx;
 
index c117891..e1b38fb 100644 (file)
@@ -1648,14 +1648,18 @@ static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
        return 0;
 }
 
-static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+                           enum xdp_rss_hash_type *rss_type)
 {
        struct veth_xdp_buff *_ctx = (void *)ctx;
+       struct sk_buff *skb = _ctx->skb;
 
-       if (!_ctx->skb)
+       if (!skb)
                return -ENODATA;
 
-       *hash = skb_get_hash(_ctx->skb);
+       *hash = skb_get_hash(skb);
+       *rss_type = skb->l4_hash ? XDP_RSS_TYPE_L4_ANY : XDP_RSS_TYPE_NONE;
+
        return 0;
 }
 
index 6829870..da488cb 100644 (file)
@@ -1688,7 +1688,9 @@ not_lro:
                        if (unlikely(rcd->ts))
                                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
 
-                       if (adapter->netdev->features & NETIF_F_LRO)
+                       /* Use GRO callback if UPT is enabled */
+                       if ((adapter->netdev->features & NETIF_F_LRO) &&
+                           !rq->shared->updateRxProd)
                                netif_receive_skb(skb);
                        else
                                napi_gro_receive(&rq->napi, skb);
index 86995e8..a62ee05 100644 (file)
@@ -16,7 +16,7 @@
 #include "pci.h"
 #include "pcic.h"
 
-#define MHI_TIMEOUT_DEFAULT_MS 90000
+#define MHI_TIMEOUT_DEFAULT_MS 20000
 #define RDDM_DUMP_SIZE 0x420000
 
 static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
index b7c918f..65d4799 100644 (file)
@@ -994,15 +994,34 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
 MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
 
 
-static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
-                                                 int val)
+static void brcmf_sdiod_acpi_save_power_manageable(struct brcmf_sdio_dev *sdiodev)
 {
 #if IS_ENABLED(CONFIG_ACPI)
        struct acpi_device *adev;
 
-       adev = ACPI_COMPANION(dev);
+       adev = ACPI_COMPANION(&sdiodev->func1->dev);
        if (adev)
-               adev->flags.power_manageable = 0;
+               sdiodev->func1_power_manageable = adev->flags.power_manageable;
+
+       adev = ACPI_COMPANION(&sdiodev->func2->dev);
+       if (adev)
+               sdiodev->func2_power_manageable = adev->flags.power_manageable;
+#endif
+}
+
+static void brcmf_sdiod_acpi_set_power_manageable(struct brcmf_sdio_dev *sdiodev,
+                                                 int enable)
+{
+#if IS_ENABLED(CONFIG_ACPI)
+       struct acpi_device *adev;
+
+       adev = ACPI_COMPANION(&sdiodev->func1->dev);
+       if (adev)
+               adev->flags.power_manageable = enable ? sdiodev->func1_power_manageable : 0;
+
+       adev = ACPI_COMPANION(&sdiodev->func2->dev);
+       if (adev)
+               adev->flags.power_manageable = enable ? sdiodev->func2_power_manageable : 0;
 #endif
 }
 
@@ -1012,7 +1031,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
        int err;
        struct brcmf_sdio_dev *sdiodev;
        struct brcmf_bus *bus_if;
-       struct device *dev;
 
        brcmf_dbg(SDIO, "Enter\n");
        brcmf_dbg(SDIO, "Class=%x\n", func->class);
@@ -1020,14 +1038,9 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
        brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
        brcmf_dbg(SDIO, "Function#: %d\n", func->num);
 
-       dev = &func->dev;
-
        /* Set MMC_QUIRK_LENIENT_FN0 for this card */
        func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
 
-       /* prohibit ACPI power management for this device */
-       brcmf_sdiod_acpi_set_power_manageable(dev, 0);
-
        /* Consume func num 1 but dont do anything with it. */
        if (func->num == 1)
                return 0;
@@ -1059,6 +1072,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
        dev_set_drvdata(&sdiodev->func1->dev, bus_if);
        sdiodev->dev = &sdiodev->func1->dev;
 
+       brcmf_sdiod_acpi_save_power_manageable(sdiodev);
        brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
 
        brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
@@ -1124,6 +1138,8 @@ void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
 
        if (sdiodev->settings->bus.sdio.oob_irq_supported ||
            pm_caps & MMC_PM_WAKE_SDIO_IRQ) {
+               /* Stop ACPI from turning off the device when wowl is enabled */
+               brcmf_sdiod_acpi_set_power_manageable(sdiodev, !enabled);
                sdiodev->wowl_enabled = enabled;
                brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
                return;
index b76d34d..0d18ed1 100644 (file)
@@ -188,6 +188,8 @@ struct brcmf_sdio_dev {
        char nvram_name[BRCMF_FW_NAME_LEN];
        char clm_name[BRCMF_FW_NAME_LEN];
        bool wowl_enabled;
+       bool func1_power_manageable;
+       bool func2_power_manageable;
        enum brcmf_sdiod_state state;
        struct brcmf_sdiod_freezer *freezer;
        const struct firmware *clm_fw;
index ca50feb..1b1358c 100644 (file)
@@ -512,15 +512,15 @@ mt7603_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
            !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
                return -EOPNOTSUPP;
 
-       if (cmd == SET_KEY) {
-               key->hw_key_idx = wcid->idx;
-               wcid->hw_key_idx = idx;
-       } else {
+       if (cmd != SET_KEY) {
                if (idx == wcid->hw_key_idx)
                        wcid->hw_key_idx = -1;
 
-               key = NULL;
+               return 0;
        }
+
+       key->hw_key_idx = wcid->idx;
+       wcid->hw_key_idx = idx;
        mt76_wcid_key_setup(&dev->mt76, wcid, key);
 
        return mt7603_wtbl_set_key(dev, wcid->idx, key);
index a956024..51a968a 100644 (file)
@@ -1193,8 +1193,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_enable_rtscts);
 static int
 mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
                           struct ieee80211_key_conf *key,
-                          enum mt76_cipher_type cipher, u16 cipher_mask,
-                          enum set_key_cmd cmd)
+                          enum mt76_cipher_type cipher, u16 cipher_mask)
 {
        u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
        u8 data[32] = {};
@@ -1203,27 +1202,18 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
                return -EINVAL;
 
        mt76_rr_copy(dev, addr, data, sizeof(data));
-       if (cmd == SET_KEY) {
-               if (cipher == MT_CIPHER_TKIP) {
-                       /* Rx/Tx MIC keys are swapped */
-                       memcpy(data, key->key, 16);
-                       memcpy(data + 16, key->key + 24, 8);
-                       memcpy(data + 24, key->key + 16, 8);
-               } else {
-                       if (cipher_mask == BIT(cipher))
-                               memcpy(data, key->key, key->keylen);
-                       else if (cipher != MT_CIPHER_BIP_CMAC_128)
-                               memcpy(data, key->key, 16);
-                       if (cipher == MT_CIPHER_BIP_CMAC_128)
-                               memcpy(data + 16, key->key, 16);
-               }
+       if (cipher == MT_CIPHER_TKIP) {
+               /* Rx/Tx MIC keys are swapped */
+               memcpy(data, key->key, 16);
+               memcpy(data + 16, key->key + 24, 8);
+               memcpy(data + 24, key->key + 16, 8);
        } else {
+               if (cipher_mask == BIT(cipher))
+                       memcpy(data, key->key, key->keylen);
+               else if (cipher != MT_CIPHER_BIP_CMAC_128)
+                       memcpy(data, key->key, 16);
                if (cipher == MT_CIPHER_BIP_CMAC_128)
-                       memset(data + 16, 0, 16);
-               else if (cipher_mask)
-                       memset(data, 0, 16);
-               if (!cipher_mask)
-                       memset(data, 0, sizeof(data));
+                       memcpy(data + 16, key->key, 16);
        }
 
        mt76_wr_copy(dev, addr, data, sizeof(data));
@@ -1234,7 +1224,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
 static int
 mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
                          enum mt76_cipher_type cipher, u16 cipher_mask,
-                         int keyidx, enum set_key_cmd cmd)
+                         int keyidx)
 {
        u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
 
@@ -1253,9 +1243,7 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
        else
                w0 &= ~MT_WTBL_W0_RX_IK_VALID;
 
-       if (cmd == SET_KEY &&
-           (cipher != MT_CIPHER_BIP_CMAC_128 ||
-            cipher_mask == BIT(cipher))) {
+       if (cipher != MT_CIPHER_BIP_CMAC_128 || cipher_mask == BIT(cipher)) {
                w0 &= ~MT_WTBL_W0_KEY_IDX;
                w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
        }
@@ -1272,19 +1260,10 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
 
 static void
 mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
-                             enum mt76_cipher_type cipher, u16 cipher_mask,
-                             enum set_key_cmd cmd)
+                             enum mt76_cipher_type cipher, u16 cipher_mask)
 {
        u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
 
-       if (!cipher_mask) {
-               mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
-               return;
-       }
-
-       if (cmd != SET_KEY)
-               return;
-
        if (cipher == MT_CIPHER_BIP_CMAC_128 &&
            cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
                return;
@@ -1295,8 +1274,7 @@ mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
 
 int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
                              struct mt76_wcid *wcid,
-                             struct ieee80211_key_conf *key,
-                             enum set_key_cmd cmd)
+                             struct ieee80211_key_conf *key)
 {
        enum mt76_cipher_type cipher;
        u16 cipher_mask = wcid->cipher;
@@ -1306,19 +1284,14 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
        if (cipher == MT_CIPHER_NONE)
                return -EOPNOTSUPP;
 
-       if (cmd == SET_KEY)
-               cipher_mask |= BIT(cipher);
-       else
-               cipher_mask &= ~BIT(cipher);
-
-       mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd);
-       err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask,
-                                        cmd);
+       cipher_mask |= BIT(cipher);
+       mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask);
+       err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask);
        if (err < 0)
                return err;
 
        err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
-                                       key->keyidx, cmd);
+                                       key->keyidx);
        if (err < 0)
                return err;
 
@@ -1329,13 +1302,12 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
 
 int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
                            struct mt76_wcid *wcid,
-                           struct ieee80211_key_conf *key,
-                           enum set_key_cmd cmd)
+                           struct ieee80211_key_conf *key)
 {
        int err;
 
        spin_lock_bh(&dev->mt76.lock);
-       err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
+       err = __mt7615_mac_wtbl_set_key(dev, wcid, key);
        spin_unlock_bh(&dev->mt76.lock);
 
        return err;
index ab4c1b4..dadb13f 100644 (file)
@@ -391,18 +391,17 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
        if (cmd == SET_KEY)
                *wcid_keyidx = idx;
-       else if (idx == *wcid_keyidx)
-               *wcid_keyidx = -1;
-       else
+       else {
+               if (idx == *wcid_keyidx)
+                       *wcid_keyidx = -1;
                goto out;
+       }
 
-       mt76_wcid_key_setup(&dev->mt76, wcid,
-                           cmd == SET_KEY ? key : NULL);
-
+       mt76_wcid_key_setup(&dev->mt76, wcid, key);
        if (mt76_is_mmio(&dev->mt76))
-               err = mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
+               err = mt7615_mac_wtbl_set_key(dev, wcid, key);
        else
-               err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
+               err = __mt7615_mac_wtbl_set_key(dev, wcid, key);
 
 out:
        mt7615_mutex_release(dev);
index 43591b4..9e58f69 100644 (file)
@@ -490,11 +490,9 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
 void mt7615_mac_set_timing(struct mt7615_phy *phy);
 int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
                              struct mt76_wcid *wcid,
-                             struct ieee80211_key_conf *key,
-                             enum set_key_cmd cmd);
+                             struct ieee80211_key_conf *key);
 int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
-                           struct ieee80211_key_conf *key,
-                           enum set_key_cmd cmd);
+                           struct ieee80211_key_conf *key);
 void mt7615_mac_reset_work(struct work_struct *work);
 u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid);
 
index 7451a63..dcbb5c6 100644 (file)
@@ -454,20 +454,20 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
        msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL;
        wcid = msta ? &msta->wcid : &mvif->group_wcid;
 
-       if (cmd == SET_KEY) {
-               key->hw_key_idx = wcid->idx;
-               wcid->hw_key_idx = idx;
-               if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
-                       key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
-                       wcid->sw_iv = true;
-               }
-       } else {
+       if (cmd != SET_KEY) {
                if (idx == wcid->hw_key_idx) {
                        wcid->hw_key_idx = -1;
                        wcid->sw_iv = false;
                }
 
-               key = NULL;
+               return 0;
+       }
+
+       key->hw_key_idx = wcid->idx;
+       wcid->hw_key_idx = idx;
+       if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+               wcid->sw_iv = true;
        }
        mt76_wcid_key_setup(&dev->mt76, wcid, key);
 
index 3bbccbd..784191e 100644 (file)
@@ -410,16 +410,15 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                mt7915_mcu_add_bss_info(phy, vif, true);
        }
 
-       if (cmd == SET_KEY)
+       if (cmd == SET_KEY) {
                *wcid_keyidx = idx;
-       else if (idx == *wcid_keyidx)
-               *wcid_keyidx = -1;
-       else
+       } else {
+               if (idx == *wcid_keyidx)
+                       *wcid_keyidx = -1;
                goto out;
+       }
 
-       mt76_wcid_key_setup(&dev->mt76, wcid,
-                           cmd == SET_KEY ? key : NULL);
-
+       mt76_wcid_key_setup(&dev->mt76, wcid, key);
        err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
                                      key, MCU_EXT_CMD(STA_REC_UPDATE),
                                      &msta->wcid, cmd);
index 80c71ac..cc94531 100644 (file)
@@ -171,12 +171,12 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
 
 u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
 {
-       struct mt7921_fw_features *features = NULL;
        const struct mt76_connac2_fw_trailer *hdr;
        struct mt7921_realease_info *rel_info;
        const struct firmware *fw;
        int ret, i, offset = 0;
        const u8 *data, *end;
+       u8 offload_caps = 0;
 
        ret = request_firmware(&fw, fw_wm, dev);
        if (ret)
@@ -208,7 +208,10 @@ u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
                data += sizeof(*rel_info);
 
                if (rel_info->tag == MT7921_FW_TAG_FEATURE) {
+                       struct mt7921_fw_features *features;
+
                        features = (struct mt7921_fw_features *)data;
+                       offload_caps = features->data;
                        break;
                }
 
@@ -218,7 +221,7 @@ u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
 out:
        release_firmware(fw);
 
-       return features ? features->data : 0;
+       return offload_caps;
 }
 EXPORT_SYMBOL_GPL(mt7921_check_offload_capability);
 
index 75eaf86..42933a6 100644 (file)
@@ -569,16 +569,15 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
        mt7921_mutex_acquire(dev);
 
-       if (cmd == SET_KEY)
+       if (cmd == SET_KEY) {
                *wcid_keyidx = idx;
-       else if (idx == *wcid_keyidx)
-               *wcid_keyidx = -1;
-       else
+       } else {
+               if (idx == *wcid_keyidx)
+                       *wcid_keyidx = -1;
                goto out;
+       }
 
-       mt76_wcid_key_setup(&dev->mt76, wcid,
-                           cmd == SET_KEY ? key : NULL);
-
+       mt76_wcid_key_setup(&dev->mt76, wcid, key);
        err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
                                      key, MCU_UNI_CMD(STA_REC_UPDATE),
                                      &msta->wcid, cmd);
index cb72ded..5c23c82 100644 (file)
@@ -20,7 +20,7 @@ static const struct pci_device_id mt7921_pci_device_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608),
                .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
        { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616),
-               .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+               .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
        { },
 };
 
index 3e4da03..1ba22d1 100644 (file)
@@ -351,16 +351,15 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                mt7996_mcu_add_bss_info(phy, vif, true);
        }
 
-       if (cmd == SET_KEY)
+       if (cmd == SET_KEY) {
                *wcid_keyidx = idx;
-       else if (idx == *wcid_keyidx)
-               *wcid_keyidx = -1;
-       else
+       } else {
+               if (idx == *wcid_keyidx)
+                       *wcid_keyidx = -1;
                goto out;
+       }
 
-       mt76_wcid_key_setup(&dev->mt76, wcid,
-                           cmd == SET_KEY ? key : NULL);
-
+       mt76_wcid_key_setup(&dev->mt76, wcid, key);
        err = mt7996_mcu_add_key(&dev->mt76, vif, &msta->bip,
                                 key, MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
                                 &msta->wcid, cmd);
index 1e6a479..c066b00 100644 (file)
@@ -587,6 +587,13 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
        while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
                if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
                        ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
+
+                       if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID &&
+                           chnl_cfg_port.wwan_port_type == WWAN_PORT_XMMRPC) {
+                               ctrl_chl_idx++;
+                               continue;
+                       }
+
                        if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID &&
                            chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) {
                                ctrl_chl_idx++;
index 5bf5a93..04517bd 100644 (file)
@@ -295,7 +295,7 @@ static int ipc_pcie_probe(struct pci_dev *pci,
        ret = dma_set_mask(ipc_pcie->dev, DMA_BIT_MASK(64));
        if (ret) {
                dev_err(ipc_pcie->dev, "Could not set PCI DMA mask: %d", ret);
-               return ret;
+               goto set_mask_fail;
        }
 
        ipc_pcie_config_aspm(ipc_pcie);
@@ -323,6 +323,7 @@ static int ipc_pcie_probe(struct pci_dev *pci,
 imem_init_fail:
        ipc_pcie_resources_release(ipc_pcie);
 resources_req_fail:
+set_mask_fail:
        pci_disable_device(pci);
 pci_enable_fail:
        kfree(ipc_pcie);
index 268ff9e..2652cd0 100644 (file)
@@ -1,7 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
-ccflags-y += -Werror
-
 obj-${CONFIG_MTK_T7XX} := mtk_t7xx.o
 mtk_t7xx-y:=   t7xx_pci.o \
                t7xx_pcie_mac.o \
index 3dbfc8a..1fcbd83 100644 (file)
@@ -166,7 +166,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
        struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
        grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
 
-       struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
+       struct gnttab_copy tx_copy_ops[2 * MAX_PENDING_REQS];
        struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
        struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
        /* passed to gnttab_[un]map_refs with pages under (un)mapping */
index 1b42676..c1501f4 100644 (file)
@@ -334,6 +334,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
 struct xenvif_tx_cb {
        u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
        u8 copy_count;
+       u32 split_mask;
 };
 
 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
@@ -361,6 +362,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
        struct sk_buff *skb =
                alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
                          GFP_ATOMIC | __GFP_NOWARN);
+
+       BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
        if (unlikely(skb == NULL))
                return NULL;
 
@@ -396,11 +399,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
        nr_slots = shinfo->nr_frags + 1;
 
        copy_count(skb) = 0;
+       XENVIF_TX_CB(skb)->split_mask = 0;
 
        /* Create copy ops for exactly data_len bytes into the skb head. */
        __skb_put(skb, data_len);
        while (data_len > 0) {
                int amount = data_len > txp->size ? txp->size : data_len;
+               bool split = false;
 
                cop->source.u.ref = txp->gref;
                cop->source.domid = queue->vif->domid;
@@ -413,6 +418,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
                cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
                                               - data_len);
 
+               /* Don't cross local page boundary! */
+               if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
+                       amount = XEN_PAGE_SIZE - cop->dest.offset;
+                       XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
+                       split = true;
+               }
+
                cop->len = amount;
                cop->flags = GNTCOPY_source_gref;
 
@@ -420,7 +432,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
                pending_idx = queue->pending_ring[index];
                callback_param(queue, pending_idx).ctx = NULL;
                copy_pending_idx(skb, copy_count(skb)) = pending_idx;
-               copy_count(skb)++;
+               if (!split)
+                       copy_count(skb)++;
 
                cop++;
                data_len -= amount;
@@ -441,7 +454,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
                        nr_slots--;
                } else {
                        /* The copy op partially covered the tx_request.
-                        * The remainder will be mapped.
+                        * The remainder will be mapped or copied in the next
+                        * iteration.
                         */
                        txp->offset += amount;
                        txp->size -= amount;
@@ -539,6 +553,13 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
                pending_idx = copy_pending_idx(skb, i);
 
                newerr = (*gopp_copy)->status;
+
+               /* Split copies need to be handled together. */
+               if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
+                       (*gopp_copy)++;
+                       if (!newerr)
+                               newerr = (*gopp_copy)->status;
+               }
                if (likely(!newerr)) {
                        /* The first frag might still have this slot mapped */
                        if (i < copy_count(skb) - 1 || !sharedslot)
@@ -973,10 +994,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 
                /* No crossing a page as the payload mustn't fragment. */
                if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
-                       netdev_err(queue->vif->dev,
-                                  "txreq.offset: %u, size: %u, end: %lu\n",
-                                  txreq.offset, txreq.size,
-                                  (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
+                       netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
+                                  txreq.offset, txreq.size);
                        xenvif_fatal_tx_err(queue->vif);
                        break;
                }
@@ -1061,10 +1080,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                __skb_queue_tail(&queue->tx_queue, skb);
 
                queue->tx.req_cons = idx;
-
-               if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
-                   (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
-                       break;
        }
 
        return;
index 53ef028..d6a9bac 100644 (file)
@@ -1674,6 +1674,9 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
        struct request_queue *queue = disk->queue;
        u32 size = queue_logical_block_size(queue);
 
+       if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
+               ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
+
        if (ctrl->max_discard_sectors == 0) {
                blk_queue_max_discard_sectors(queue, 0);
                return;
@@ -1688,9 +1691,6 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
        if (queue->limits.max_discard_sectors)
                return;
 
-       if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
-               ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
-
        blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
        blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
 
index b615906..cd7873d 100644 (file)
@@ -3441,6 +3441,9 @@ static const struct pci_device_id nvme_id_table[] = {
        { PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
                .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
+               .driver_data = NVME_QUIRK_BOGUS_NID |
+                               NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+       { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
                .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
                .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
index 42c0598..49c9e7b 100644 (file)
@@ -1620,22 +1620,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
        if (ret)
                goto err_init_connect;
 
-       queue->rd_enabled = true;
        set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
-       nvme_tcp_init_recv_ctx(queue);
-
-       write_lock_bh(&queue->sock->sk->sk_callback_lock);
-       queue->sock->sk->sk_user_data = queue;
-       queue->state_change = queue->sock->sk->sk_state_change;
-       queue->data_ready = queue->sock->sk->sk_data_ready;
-       queue->write_space = queue->sock->sk->sk_write_space;
-       queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
-       queue->sock->sk->sk_state_change = nvme_tcp_state_change;
-       queue->sock->sk->sk_write_space = nvme_tcp_write_space;
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       queue->sock->sk->sk_ll_usec = 1;
-#endif
-       write_unlock_bh(&queue->sock->sk->sk_callback_lock);
 
        return 0;
 
@@ -1655,7 +1640,7 @@ err_destroy_mutex:
        return ret;
 }
 
-static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
+static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
 {
        struct socket *sock = queue->sock;
 
@@ -1670,7 +1655,7 @@ static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
 {
        kernel_sock_shutdown(queue->sock, SHUT_RDWR);
-       nvme_tcp_restore_sock_calls(queue);
+       nvme_tcp_restore_sock_ops(queue);
        cancel_work_sync(&queue->io_work);
 }
 
@@ -1688,21 +1673,42 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
        mutex_unlock(&queue->queue_lock);
 }
 
+static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
+{
+       write_lock_bh(&queue->sock->sk->sk_callback_lock);
+       queue->sock->sk->sk_user_data = queue;
+       queue->state_change = queue->sock->sk->sk_state_change;
+       queue->data_ready = queue->sock->sk->sk_data_ready;
+       queue->write_space = queue->sock->sk->sk_write_space;
+       queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
+       queue->sock->sk->sk_state_change = nvme_tcp_state_change;
+       queue->sock->sk->sk_write_space = nvme_tcp_write_space;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       queue->sock->sk->sk_ll_usec = 1;
+#endif
+       write_unlock_bh(&queue->sock->sk->sk_callback_lock);
+}
+
 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
 {
        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+       struct nvme_tcp_queue *queue = &ctrl->queues[idx];
        int ret;
 
+       queue->rd_enabled = true;
+       nvme_tcp_init_recv_ctx(queue);
+       nvme_tcp_setup_sock_ops(queue);
+
        if (idx)
                ret = nvmf_connect_io_queue(nctrl, idx);
        else
                ret = nvmf_connect_admin_queue(nctrl);
 
        if (!ret) {
-               set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
+               set_bit(NVME_TCP_Q_LIVE, &queue->flags);
        } else {
-               if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
-                       __nvme_tcp_stop_queue(&ctrl->queues[idx]);
+               if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
+                       __nvme_tcp_stop_queue(queue);
                dev_err(nctrl->device,
                        "failed to connect queue: %d ret=%d\n", idx, ret);
        }
index 07d9375..e311d40 100644 (file)
@@ -226,6 +226,7 @@ static void __of_attach_node(struct device_node *np)
        np->sibling = np->parent->child;
        np->parent->child = np;
        of_node_clear_flag(np, OF_DETACHED);
+       np->fwnode.flags |= FWNODE_FLAG_NOT_DEVICE;
 }
 
 /**
index b2bd2e7..78ae841 100644 (file)
@@ -737,6 +737,11 @@ static int of_platform_notify(struct notifier_block *nb,
                if (of_node_check_flag(rd->dn, OF_POPULATED))
                        return NOTIFY_OK;
 
+               /*
+                * Clear the flag before adding the device so that fw_devlink
+                * doesn't skip adding consumers to this device.
+                */
+               rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
                /* pdev_parent may be NULL when no bus platform device */
                pdev_parent = of_find_device_by_node(rd->dn->parent);
                pdev = of_platform_device_create(rd->dn, NULL,
index 53a16b8..8e33e6e 100644 (file)
@@ -1001,11 +1001,6 @@ void dw_pcie_setup(struct dw_pcie *pci)
                dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
        }
 
-       val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
-       val &= ~PORT_LINK_FAST_LINK_MODE;
-       val |= PORT_LINK_DLL_LINK_EN;
-       dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
-
        if (dw_pcie_cap_is(pci, CDM_CHECK)) {
                val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
                val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
@@ -1013,6 +1008,11 @@ void dw_pcie_setup(struct dw_pcie *pci)
                dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
        }
 
+       val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+       val &= ~PORT_LINK_FAST_LINK_MODE;
+       val |= PORT_LINK_DLL_LINK_EN;
+       dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
        if (!pci->num_lanes) {
                dev_dbg(pci->dev, "Using h/w default number of lanes\n");
                return;
index 66d9ab2..e5e9b28 100644 (file)
@@ -128,7 +128,7 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
                return -EIO;
 
        /* Length is 2 DW of header + length of payload in DW */
-       length = 2 + task->request_pl_sz / sizeof(u32);
+       length = 2 + task->request_pl_sz / sizeof(__le32);
        if (length > PCI_DOE_MAX_LENGTH)
                return -EIO;
        if (length == PCI_DOE_MAX_LENGTH)
@@ -141,9 +141,9 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
        pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
                               FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
                                          length));
-       for (i = 0; i < task->request_pl_sz / sizeof(u32); i++)
+       for (i = 0; i < task->request_pl_sz / sizeof(__le32); i++)
                pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
-                                      task->request_pl[i]);
+                                      le32_to_cpu(task->request_pl[i]));
 
        pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO);
 
@@ -195,11 +195,11 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
 
        /* First 2 dwords have already been read */
        length -= 2;
-       payload_length = min(length, task->response_pl_sz / sizeof(u32));
+       payload_length = min(length, task->response_pl_sz / sizeof(__le32));
        /* Read the rest of the response payload */
        for (i = 0; i < payload_length; i++) {
-               pci_read_config_dword(pdev, offset + PCI_DOE_READ,
-                                     &task->response_pl[i]);
+               pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
+               task->response_pl[i] = cpu_to_le32(val);
                /* Prior to the last ack, ensure Data Object Ready */
                if (i == (payload_length - 1) && !pci_doe_data_obj_ready(doe_mb))
                        return -EIO;
@@ -217,13 +217,14 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
        if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
                return -EIO;
 
-       return min(length, task->response_pl_sz / sizeof(u32)) * sizeof(u32);
+       return min(length, task->response_pl_sz / sizeof(__le32)) * sizeof(__le32);
 }
 
 static void signal_task_complete(struct pci_doe_task *task, int rv)
 {
        task->rv = rv;
        task->complete(task);
+       destroy_work_on_stack(&task->work);
 }
 
 static void signal_task_abort(struct pci_doe_task *task, int rv)
@@ -317,14 +318,16 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
 {
        u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
                                    *index);
+       __le32 request_pl_le = cpu_to_le32(request_pl);
+       __le32 response_pl_le;
        u32 response_pl;
        DECLARE_COMPLETION_ONSTACK(c);
        struct pci_doe_task task = {
                .prot.vid = PCI_VENDOR_ID_PCI_SIG,
                .prot.type = PCI_DOE_PROTOCOL_DISCOVERY,
-               .request_pl = &request_pl,
+               .request_pl = &request_pl_le,
                .request_pl_sz = sizeof(request_pl),
-               .response_pl = &response_pl,
+               .response_pl = &response_pl_le,
                .response_pl_sz = sizeof(response_pl),
                .complete = pci_doe_task_complete,
                .private = &c,
@@ -340,6 +343,7 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
        if (task.rv != sizeof(response_pl))
                return -EIO;
 
+       response_pl = le32_to_cpu(response_pl_le);
        *vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl);
        *protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL,
                              response_pl);
@@ -520,6 +524,8 @@ EXPORT_SYMBOL_GPL(pci_doe_supports_prot);
  * task->complete will be called when the state machine is done processing this
  * task.
  *
+ * @task must be allocated on the stack.
+ *
  * Excess data will be discarded.
  *
  * RETURNS: 0 when task has been successfully queued, -ERRNO on error
@@ -533,15 +539,15 @@ int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
         * DOE requests must be a whole number of DW and the response needs to
         * be big enough for at least 1 DW
         */
-       if (task->request_pl_sz % sizeof(u32) ||
-           task->response_pl_sz < sizeof(u32))
+       if (task->request_pl_sz % sizeof(__le32) ||
+           task->response_pl_sz < sizeof(__le32))
                return -EINVAL;
 
        if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags))
                return -EIO;
 
        task->doe_mb = doe_mb;
-       INIT_WORK(&task->work, doe_statemachine_work);
+       INIT_WORK_ONSTACK(&task->work, doe_statemachine_work);
        queue_work(doe_mb->work_queue, &task->work);
        return 0;
 }
index 0145aef..22d39e1 100644 (file)
@@ -157,8 +157,6 @@ void pci_remove_root_bus(struct pci_bus *bus)
        list_for_each_entry_safe(child, tmp,
                                 &bus->devices, bus_list)
                pci_remove_bus_device(child);
-       pci_remove_bus(bus);
-       host_bridge->bus = NULL;
 
 #ifdef CONFIG_PCI_DOMAINS_GENERIC
        /* Release domain_nr if it was dynamically allocated */
@@ -166,6 +164,9 @@ void pci_remove_root_bus(struct pci_bus *bus)
                pci_bus_release_domain_nr(bus, host_bridge->dev.parent);
 #endif
 
+       pci_remove_bus(bus);
+       host_bridge->bus = NULL;
+
        /* remove the host bridge */
        device_del(&host_bridge->dev);
 }
index a78fdb1..8b64388 100644 (file)
 #define DMC_QOS_IRQ            BIT(30)
 
 /* DMC bandwidth monitor register address offset */
-#define DMC_MON_G12_CTRL0              (0x20  << 2)
-#define DMC_MON_G12_CTRL1              (0x21  << 2)
-#define DMC_MON_G12_CTRL2              (0x22  << 2)
-#define DMC_MON_G12_CTRL3              (0x23  << 2)
-#define DMC_MON_G12_CTRL4              (0x24  << 2)
-#define DMC_MON_G12_CTRL5              (0x25  << 2)
-#define DMC_MON_G12_CTRL6              (0x26  << 2)
-#define DMC_MON_G12_CTRL7              (0x27  << 2)
-#define DMC_MON_G12_CTRL8              (0x28  << 2)
-
-#define DMC_MON_G12_ALL_REQ_CNT                (0x29  << 2)
-#define DMC_MON_G12_ALL_GRANT_CNT      (0x2a  << 2)
-#define DMC_MON_G12_ONE_GRANT_CNT      (0x2b  << 2)
-#define DMC_MON_G12_SEC_GRANT_CNT      (0x2c  << 2)
-#define DMC_MON_G12_THD_GRANT_CNT      (0x2d  << 2)
-#define DMC_MON_G12_FOR_GRANT_CNT      (0x2e  << 2)
-#define DMC_MON_G12_TIMER              (0x2f  << 2)
+#define DMC_MON_G12_CTRL0              (0x0  << 2)
+#define DMC_MON_G12_CTRL1              (0x1  << 2)
+#define DMC_MON_G12_CTRL2              (0x2  << 2)
+#define DMC_MON_G12_CTRL3              (0x3  << 2)
+#define DMC_MON_G12_CTRL4              (0x4  << 2)
+#define DMC_MON_G12_CTRL5              (0x5  << 2)
+#define DMC_MON_G12_CTRL6              (0x6  << 2)
+#define DMC_MON_G12_CTRL7              (0x7  << 2)
+#define DMC_MON_G12_CTRL8              (0x8  << 2)
+
+#define DMC_MON_G12_ALL_REQ_CNT                (0x9  << 2)
+#define DMC_MON_G12_ALL_GRANT_CNT      (0xa  << 2)
+#define DMC_MON_G12_ONE_GRANT_CNT      (0xb  << 2)
+#define DMC_MON_G12_SEC_GRANT_CNT      (0xc  << 2)
+#define DMC_MON_G12_THD_GRANT_CNT      (0xd  << 2)
+#define DMC_MON_G12_FOR_GRANT_CNT      (0xe  << 2)
+#define DMC_MON_G12_TIMER              (0xf  << 2)
 
 /* Each bit represent a axi line */
 PMU_FORMAT_ATTR(event, "config:0-7");
index f20c283..a71874f 100644 (file)
@@ -45,35 +45,35 @@ config PINCTRL_MTK_PARIS
 
 # For ARMv7 SoCs
 config PINCTRL_MT2701
-       bool "Mediatek MT2701 pin control"
+       bool "MediaTek MT2701 pin control"
        depends on MACH_MT7623 || MACH_MT2701 || COMPILE_TEST
        depends on OF
        default MACH_MT2701
        select PINCTRL_MTK
 
 config PINCTRL_MT7623
-       bool "Mediatek MT7623 pin control with generic binding"
+       bool "MediaTek MT7623 pin control with generic binding"
        depends on MACH_MT7623 || COMPILE_TEST
        depends on OF
        default MACH_MT7623
        select PINCTRL_MTK_MOORE
 
 config PINCTRL_MT7629
-       bool "Mediatek MT7629 pin control"
+       bool "MediaTek MT7629 pin control"
        depends on MACH_MT7629 || COMPILE_TEST
        depends on OF
        default MACH_MT7629
        select PINCTRL_MTK_MOORE
 
 config PINCTRL_MT8135
-       bool "Mediatek MT8135 pin control"
+       bool "MediaTek MT8135 pin control"
        depends on MACH_MT8135 || COMPILE_TEST
        depends on OF
        default MACH_MT8135
        select PINCTRL_MTK
 
 config PINCTRL_MT8127
-       bool "Mediatek MT8127 pin control"
+       bool "MediaTek MT8127 pin control"
        depends on MACH_MT8127 || COMPILE_TEST
        depends on OF
        default MACH_MT8127
@@ -88,33 +88,33 @@ config PINCTRL_MT2712
        select PINCTRL_MTK
 
 config PINCTRL_MT6765
-       tristate "Mediatek MT6765 pin control"
+       tristate "MediaTek MT6765 pin control"
        depends on OF
        depends on ARM64 || COMPILE_TEST
        default ARM64 && ARCH_MEDIATEK
        select PINCTRL_MTK_PARIS
 
 config PINCTRL_MT6779
-       tristate "Mediatek MT6779 pin control"
+       tristate "MediaTek MT6779 pin control"
        depends on OF
        depends on ARM64 || COMPILE_TEST
        default ARM64 && ARCH_MEDIATEK
        select PINCTRL_MTK_PARIS
        help
          Say yes here to support pin controller and gpio driver
-         on Mediatek MT6779 SoC.
+         on MediaTek MT6779 SoC.
          In MTK platform, we support virtual gpio and use it to
          map specific eint which doesn't have real gpio pin.
 
 config PINCTRL_MT6795
-       bool "Mediatek MT6795 pin control"
+       bool "MediaTek MT6795 pin control"
        depends on OF
        depends on ARM64 || COMPILE_TEST
        default ARM64 && ARCH_MEDIATEK
        select PINCTRL_MTK_PARIS
 
 config PINCTRL_MT6797
-       bool "Mediatek MT6797 pin control"
+       bool "MediaTek MT6797 pin control"
        depends on OF
        depends on ARM64 || COMPILE_TEST
        default ARM64 && ARCH_MEDIATEK
@@ -128,40 +128,42 @@ config PINCTRL_MT7622
        select PINCTRL_MTK_MOORE
 
 config PINCTRL_MT7981
-       bool "Mediatek MT7981 pin control"
+       bool "MediaTek MT7981 pin control"
        depends on OF
+       depends on ARM64 || COMPILE_TEST
+       default ARM64 && ARCH_MEDIATEK
        select PINCTRL_MTK_MOORE
 
 config PINCTRL_MT7986
-       bool "Mediatek MT7986 pin control"
+       bool "MediaTek MT7986 pin control"
        depends on OF
        depends on ARM64 || COMPILE_TEST
        default ARM64 && ARCH_MEDIATEK
        select PINCTRL_MTK_MOORE
 
 config PINCTRL_MT8167
-       bool "Mediatek MT8167 pin control"
+       bool "MediaTek MT8167 pin control"
        depends on OF
        depends on ARM64 || COMPILE_TEST
        default ARM64 && ARCH_MEDIATEK
        select PINCTRL_MTK
 
 config PINCTRL_MT8173
-       bool "Mediatek MT8173 pin control"
+       bool "MediaTek MT8173 pin control"
        depends on OF
        depends on ARM64 || COMPILE_TEST
        default ARM64 && ARCH_MEDIATEK
        select PINCTRL_MTK
 
 config PINCTRL_MT8183
-       bool "Mediatek MT8183 pin control"
+       bool "MediaTek MT8183 pin control"
        depends on OF
        depends on ARM64 || COMPILE_TEST
        default ARM64 && ARCH_MEDIATEK
        select PINCTRL_MTK_PARIS
 
 config PINCTRL_MT8186
-       bool "Mediatek MT8186 pin control"
+       bool "MediaTek MT8186 pin control"
        depends on OF
        depends on ARM64 || COMPILE_TEST
        default ARM64 && ARCH_MEDIATEK
@@ -180,28 +182,28 @@ config PINCTRL_MT8188
          map specific eint which doesn't have real gpio pin.
 
 config PINCTRL_MT8192
-       bool "Mediatek MT8192 pin control"
+       bool "MediaTek MT8192 pin control"
        depends on OF
        depends on ARM64 || COMPILE_TEST
        default ARM64 && ARCH_MEDIATEK
        select PINCTRL_MTK_PARIS
 
 config PINCTRL_MT8195
-       bool "Mediatek MT8195 pin control"
+       bool "MediaTek MT8195 pin control"
        depends on OF
        depends on ARM64 || COMPILE_TEST
        default ARM64 && ARCH_MEDIATEK
        select PINCTRL_MTK_PARIS
 
 config PINCTRL_MT8365
-       bool "Mediatek MT8365 pin control"
+       bool "MediaTek MT8365 pin control"
        depends on OF
        depends on ARM64 || COMPILE_TEST
        default ARM64 && ARCH_MEDIATEK
        select PINCTRL_MTK
 
 config PINCTRL_MT8516
-       bool "Mediatek MT8516 pin control"
+       bool "MediaTek MT8516 pin control"
        depends on OF
        depends on ARM64 || COMPILE_TEST
        default ARM64 && ARCH_MEDIATEK
@@ -209,7 +211,7 @@ config PINCTRL_MT8516
 
 # For PMIC
 config PINCTRL_MT6397
-       bool "Mediatek MT6397 pin control"
+       bool "MediaTek MT6397 pin control"
        depends on MFD_MT6397 || COMPILE_TEST
        depends on OF
        default MFD_MT6397
index 373eed8..c775d23 100644 (file)
@@ -1206,7 +1206,6 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
                dev_err(dev, "can't add the irq domain\n");
                return -ENODEV;
        }
-       atmel_pioctrl->irq_domain->name = "atmel gpio";
 
        for (i = 0; i < atmel_pioctrl->npins; i++) {
                int irq = irq_create_mapping(atmel_pioctrl->irq_domain, i);
index 29e4a62..1dcbd09 100644 (file)
@@ -1204,7 +1204,7 @@ static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev,
        regmap_update_bits(info->map, REG_ALT(0, info, pin->pin),
                           BIT(p), f << p);
        regmap_update_bits(info->map, REG_ALT(1, info, pin->pin),
-                          BIT(p), f << (p - 1));
+                          BIT(p), (f >> 1) << p);
 
        return 0;
 }
index cb33a23..04ace4c 100644 (file)
@@ -1330,7 +1330,7 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
        if (fwnode_property_read_u32(fwnode, "st,bank-ioport", &bank_ioport_nr))
                bank_ioport_nr = bank_nr;
 
-       bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
+       bank->gpio_chip.base = -1;
 
        bank->gpio_chip.ngpio = npins;
        bank->gpio_chip.fwnode = fwnode;
index aaad412..42ccd7f 100644 (file)
@@ -485,8 +485,10 @@ int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl,
                 * device, so ignore it and continue with the next one.
                 */
                status = ssam_add_client_device(parent, ctrl, child);
-               if (status && status != -ENODEV)
+               if (status && status != -ENODEV) {
+                       fwnode_handle_put(child);
                        goto err;
+               }
        }
 
        return 0;
index cb15acd..e2c9a68 100644 (file)
@@ -464,7 +464,8 @@ static const struct dmi_system_id asus_quirks[] = {
                .ident = "ASUS ROG FLOW X13",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "GV301Q"),
+                       /* Match GV301** */
+                       DMI_MATCH(DMI_PRODUCT_NAME, "GV301"),
                },
                .driver_data = &quirk_asus_tablet_mode,
        },
index 322cfae..2a42604 100644 (file)
@@ -140,6 +140,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
        }}
 
 static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("A320M-S2H V2-CF"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H-CF"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H WIFI-CF"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
@@ -150,6 +151,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B650 AORUS ELITE AX"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660 GAMING X DDR4"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660I AORUS PRO DDR4"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
@@ -159,6 +161,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570S AORUS ELITE"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z690M AORUS ELITE AX DDR4"),
        { }
 };
index 0eb5bfd..959ec3c 100644 (file)
@@ -1170,7 +1170,6 @@ static const struct key_entry ideapad_keymap[] = {
        { KE_KEY,  65, { KEY_PROG4 } },
        { KE_KEY,  66, { KEY_TOUCHPAD_OFF } },
        { KE_KEY,  67, { KEY_TOUCHPAD_ON } },
-       { KE_KEY,  68, { KEY_TOUCHPAD_TOGGLE } },
        { KE_KEY, 128, { KEY_ESC } },
 
        /*
@@ -1526,18 +1525,16 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv, bool send_
        if (priv->features.ctrl_ps2_aux_port)
                i8042_command(&param, value ? I8042_CMD_AUX_ENABLE : I8042_CMD_AUX_DISABLE);
 
-       if (send_events) {
-               /*
-                * On older models the EC controls the touchpad and toggles it
-                * on/off itself, in this case we report KEY_TOUCHPAD_ON/_OFF.
-                * If the EC did not toggle, report KEY_TOUCHPAD_TOGGLE.
-                */
-               if (value != priv->r_touchpad_val) {
-                       ideapad_input_report(priv, value ? 67 : 66);
-                       sysfs_notify(&priv->platform_device->dev.kobj, NULL, "touchpad");
-               } else {
-                       ideapad_input_report(priv, 68);
-               }
+       /*
+        * On older models the EC controls the touchpad and toggles it on/off
+        * itself, in this case we report KEY_TOUCHPAD_ON/_OFF. Some models do
+        * an acpi-notify with VPC bit 5 set on resume, so this function get
+        * called with send_events=true on every resume. Therefor if the EC did
+        * not toggle, do nothing to avoid sending spurious KEY_TOUCHPAD_TOGGLE.
+        */
+       if (send_events && value != priv->r_touchpad_val) {
+               ideapad_input_report(priv, value ? 67 : 66);
+               sysfs_notify(&priv->platform_device->dev.kobj, NULL, "touchpad");
        }
 
        priv->r_touchpad_val = value;
index 3a15d32..b959196 100644 (file)
@@ -66,7 +66,18 @@ static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset,
 
 static inline u64 pmc_core_adjust_slp_s0_step(struct pmc_dev *pmcdev, u32 value)
 {
-       return (u64)value * pmcdev->map->slp_s0_res_counter_step;
+       /*
+        * ADL PCH does not have the SLP_S0 counter and LPM Residency counters are
+        * used as a workaround which uses 30.5 usec tick. All other client
+        * programs have the legacy SLP_S0 residency counter that is using the 122
+        * usec tick.
+        */
+       const int lpm_adj_x2 = pmcdev->map->lpm_res_counter_step_x2;
+
+       if (pmcdev->map == &adl_reg_map)
+               return (u64)value * GET_X2_COUNTER((u64)lpm_adj_x2);
+       else
+               return (u64)value * pmcdev->map->slp_s0_res_counter_step;
 }
 
 static int set_etr3(struct pmc_dev *pmcdev)
index c999732..a522795 100644 (file)
@@ -203,7 +203,7 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info,
        struct intel_vsec_device *feature_vsec_dev;
        struct resource *res, *tmp;
        const char *name;
-       int ret, i;
+       int i;
 
        name = intel_tpmi_name(pfs->pfs_header.tpmi_id);
        if (!name)
@@ -215,8 +215,8 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info,
 
        feature_vsec_dev = kzalloc(sizeof(*feature_vsec_dev), GFP_KERNEL);
        if (!feature_vsec_dev) {
-               ret = -ENOMEM;
-               goto free_res;
+               kfree(res);
+               return -ENOMEM;
        }
 
        snprintf(feature_id_name, sizeof(feature_id_name), "tpmi-%s", name);
@@ -239,20 +239,11 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info,
        /*
         * intel_vsec_add_aux() is resource managed, no explicit
         * delete is required on error or on module unload.
-        * feature_vsec_dev memory is also freed as part of device
-        * delete.
+        * feature_vsec_dev and res memory are also freed as part of
+        * device deletion.
         */
-       ret = intel_vsec_add_aux(vsec_dev->pcidev, &vsec_dev->auxdev.dev,
-                                feature_vsec_dev, feature_id_name);
-       if (ret)
-               goto free_res;
-
-       return 0;
-
-free_res:
-       kfree(res);
-
-       return ret;
+       return intel_vsec_add_aux(vsec_dev->pcidev, &vsec_dev->auxdev.dev,
+                                 feature_vsec_dev, feature_id_name);
 }
 
 static int tpmi_create_devices(struct intel_tpmi_info *tpmi_info)
index 13decf3..2311c16 100644 (file)
@@ -154,6 +154,7 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
        ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
        mutex_unlock(&vsec_ida_lock);
        if (ret < 0) {
+               kfree(intel_vsec_dev->resource);
                kfree(intel_vsec_dev);
                return ret;
        }
index 86b33b7..78dc82b 100644 (file)
@@ -920,7 +920,7 @@ static ssize_t display_name_show(struct kobject *kobj, struct kobj_attribute *at
 static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 {
        struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
-       char *item, *value;
+       char *item, *value, *p;
        int ret;
 
        ret = tlmi_setting(setting->index, &item, LENOVO_BIOS_SETTING_GUID);
@@ -930,10 +930,15 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a
        /* validate and split from `item,value` -> `value` */
        value = strpbrk(item, ",");
        if (!value || value == item || !strlen(value + 1))
-               return -EINVAL;
-
-       ret = sysfs_emit(buf, "%s\n", value + 1);
+               ret = -EINVAL;
+       else {
+               /* On Workstations remove the Options part after the value */
+               p = strchrnul(value, ';');
+               *p = '\0';
+               ret = sysfs_emit(buf, "%s\n", value + 1);
+       }
        kfree(item);
+
        return ret;
 }
 
@@ -941,12 +946,23 @@ static ssize_t possible_values_show(struct kobject *kobj, struct kobj_attribute
 {
        struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
 
-       if (!tlmi_priv.can_get_bios_selections)
-               return -EOPNOTSUPP;
-
        return sysfs_emit(buf, "%s\n", setting->possible_values);
 }
 
+static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
+               char *buf)
+{
+       struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+
+       if (setting->possible_values) {
+               /* Figure out what setting type is as BIOS does not return this */
+               if (strchr(setting->possible_values, ';'))
+                       return sysfs_emit(buf, "enumeration\n");
+       }
+       /* Anything else is going to be a string */
+       return sysfs_emit(buf, "string\n");
+}
+
 static ssize_t current_value_store(struct kobject *kobj,
                struct kobj_attribute *attr,
                const char *buf, size_t count)
@@ -1036,14 +1052,30 @@ static struct kobj_attribute attr_possible_values = __ATTR_RO(possible_values);
 
 static struct kobj_attribute attr_current_val = __ATTR_RW_MODE(current_value, 0600);
 
+static struct kobj_attribute attr_type = __ATTR_RO(type);
+
+static umode_t attr_is_visible(struct kobject *kobj,
+                                            struct attribute *attr, int n)
+{
+       struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+
+       /* We don't want to display possible_values attributes if not available */
+       if ((attr == &attr_possible_values.attr) && (!setting->possible_values))
+               return 0;
+
+       return attr->mode;
+}
+
 static struct attribute *tlmi_attrs[] = {
        &attr_displ_name.attr,
        &attr_current_val.attr,
        &attr_possible_values.attr,
+       &attr_type.attr,
        NULL
 };
 
 static const struct attribute_group tlmi_attr_group = {
+       .is_visible = attr_is_visible,
        .attrs = tlmi_attrs,
 };
 
@@ -1423,7 +1455,35 @@ static int tlmi_analyze(void)
                        if (ret || !setting->possible_values)
                                pr_info("Error retrieving possible values for %d : %s\n",
                                                i, setting->display_name);
+               } else {
+                       /*
+                        * Older Thinkstations don't support the bios_selections API.
+                        * Instead they store this as a [Optional:Option1,Option2] section of the
+                        * name string.
+                        * Try and pull that out if it's available.
+                        */
+                       char *optitem, *optstart, *optend;
+
+                       if (!tlmi_setting(setting->index, &optitem, LENOVO_BIOS_SETTING_GUID)) {
+                               optstart = strstr(optitem, "[Optional:");
+                               if (optstart) {
+                                       optstart += strlen("[Optional:");
+                                       optend = strstr(optstart, "]");
+                                       if (optend)
+                                               setting->possible_values =
+                                                       kstrndup(optstart, optend - optstart,
+                                                                       GFP_KERNEL);
+                               }
+                               kfree(optitem);
+                       }
                }
+               /*
+                * firmware-attributes requires that possible_values are separated by ';' but
+                * Lenovo FW uses ','. Replace appropriately.
+                */
+               if (setting->possible_values)
+                       strreplace(setting->possible_values, ',', ';');
+
                kobject_init(&setting->kobj, &tlmi_attr_setting_ktype);
                tlmi_priv.setting[i] = setting;
                kfree(item);
index 32c1045..7191ff2 100644 (file)
@@ -4479,6 +4479,14 @@ static const struct dmi_system_id fwbug_list[] __initconst = {
                }
        },
        {
+               .ident = "T14s Gen1 AMD",
+               .driver_data = &quirk_s2idle_bug,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"),
+               }
+       },
+       {
                .ident = "P14s Gen1 AMD",
                .driver_data = &quirk_s2idle_bug,
                .matches = {
index 6153016..350154e 100644 (file)
@@ -637,7 +637,7 @@ static int ptp_qoriq_probe(struct platform_device *dev)
        return 0;
 
 no_clock:
-       iounmap(ptp_qoriq->base);
+       iounmap(base);
 no_ioremap:
        release_resource(ptp_qoriq->rsrc);
 no_resource:
index e01147f..4747257 100644 (file)
@@ -115,7 +115,14 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
        }
 
        if (pwm->chip->ops->get_state) {
-               struct pwm_state state;
+               /*
+                * Zero-initialize state because most drivers are unaware of
+                * .usage_power. The other members of state are supposed to be
+                * set by lowlevel drivers. We still initialize the whole
+                * structure for simplicity even though this might paper over
+                * faulty implementations of .get_state().
+                */
+               struct pwm_state state = { 0, };
 
                err = pwm->chip->ops->get_state(pwm->chip, pwm, &state);
                trace_pwm_get(pwm, &state, err);
@@ -448,7 +455,7 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
 {
        struct pwm_state *last = &pwm->last;
        struct pwm_chip *chip = pwm->chip;
-       struct pwm_state s1, s2;
+       struct pwm_state s1 = { 0 }, s2 = { 0 };
        int err;
 
        if (!IS_ENABLED(CONFIG_PWM_DEBUG))
@@ -530,6 +537,7 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
                return;
        }
 
+       *last = (struct pwm_state){ 0 };
        err = chip->ops->get_state(chip, pwm, last);
        trace_pwm_get(pwm, last, err);
        if (err)
index 86df670..ad18b0e 100644 (file)
@@ -198,6 +198,7 @@ static int cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
 
        state->enabled = (ret > 0);
        state->period = EC_PWM_MAX_DUTY;
+       state->polarity = PWM_POLARITY_NORMAL;
 
        /*
         * Note that "disabled" and "duty cycle == 0" are treated the same. If
index 12c05c1..1b9274c 100644 (file)
@@ -146,6 +146,7 @@ static int hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
 
        value = readl(base + PWM_CTRL_ADDR(pwm->hwpwm));
        state->enabled = (PWM_ENABLE_MASK & value);
+       state->polarity = (PWM_POLARITY_MASK & value) ? PWM_POLARITY_INVERSED : PWM_POLARITY_NORMAL;
 
        return 0;
 }
index 8362b48..47b3141 100644 (file)
@@ -126,6 +126,7 @@ static int iqs620_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
        mutex_unlock(&iqs620_pwm->lock);
 
        state->period = IQS620_PWM_PERIOD_NS;
+       state->polarity = PWM_POLARITY_NORMAL;
 
        return 0;
 }
index 16d79ca..5cd7b90 100644 (file)
@@ -162,6 +162,12 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
        duty = state->duty_cycle;
        period = state->period;
 
+       /*
+        * Note this is wrong. The result is an output wave that isn't really
+        * inverted and so is wrongly identified by .get_state as normal.
+        * Fixing this needs some care however as some machines might rely on
+        * this.
+        */
        if (state->polarity == PWM_POLARITY_INVERSED)
                duty = period - duty;
 
@@ -358,6 +364,8 @@ static int meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
                state->duty_cycle = 0;
        }
 
+       state->polarity = PWM_POLARITY_NORMAL;
+
        return 0;
 }
 
index d866ce3..bde579a 100644 (file)
@@ -109,6 +109,7 @@ static int sprd_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
        duty = val & SPRD_PWM_DUTY_MSK;
        tmp = (prescale + 1) * NSEC_PER_SEC * duty;
        state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, chn->clk_rate);
+       state->polarity = PWM_POLARITY_NORMAL;
 
        /* Disable PWM clocks if the PWM channel is not in enable state. */
        if (!state->enabled)
index 529963a..41537c4 100644 (file)
@@ -8,18 +8,19 @@
 // Copyright (c) 2012 Marvell Technology Ltd.
 // Yunfan Zhang <yfzhang@marvell.com>
 
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/param.h>
-#include <linux/err.h>
 #include <linux/platform_device.h>
+#include <linux/regmap.h>
 #include <linux/regulator/driver.h>
+#include <linux/regulator/fan53555.h>
 #include <linux/regulator/machine.h>
 #include <linux/regulator/of_regulator.h>
-#include <linux/of_device.h>
-#include <linux/i2c.h>
 #include <linux/slab.h>
-#include <linux/regmap.h>
-#include <linux/regulator/fan53555.h>
 
 /* Voltage setting */
 #define FAN53555_VSEL0         0x00
@@ -60,7 +61,7 @@
 #define TCS_VSEL1_MODE         (1 << 6)
 
 #define TCS_SLEW_SHIFT         3
-#define TCS_SLEW_MASK          (0x3 < 3)
+#define TCS_SLEW_MASK          GENMASK(4, 3)
 
 enum fan53555_vendor {
        FAN53526_VENDOR_FAIRCHILD = 0,
index 2a9867a..e6724a2 100644 (file)
@@ -215,7 +215,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
                drvdata->enable_clock = devm_clk_get(dev, NULL);
                if (IS_ERR(drvdata->enable_clock)) {
                        dev_err(dev, "Can't get enable-clock from devicetree\n");
-                       return -ENOENT;
+                       return PTR_ERR(drvdata->enable_clock);
                }
        } else if (drvtype && drvtype->has_performance_state) {
                drvdata->desc.ops = &fixed_voltage_domain_ops;
index 05ad28f..229df71 100644 (file)
@@ -42,6 +42,7 @@ static const int sm5703_buck_voltagemap[] = {
                .type = REGULATOR_VOLTAGE,                              \
                .id = SM5703_USBLDO ## _id,                             \
                .ops = &sm5703_regulator_ops_fixed,                     \
+               .n_voltages = 1,                                        \
                .fixed_uV = SM5703_USBLDO_MICROVOLT,                    \
                .enable_reg = SM5703_REG_USBLDO12,                      \
                .enable_mask = SM5703_REG_EN_USBLDO ##_id,              \
@@ -56,6 +57,7 @@ static const int sm5703_buck_voltagemap[] = {
                .type = REGULATOR_VOLTAGE,                              \
                .id = SM5703_VBUS,                                      \
                .ops = &sm5703_regulator_ops_fixed,                     \
+               .n_voltages = 1,                                        \
                .fixed_uV = SM5703_VBUS_MICROVOLT,                      \
                .enable_reg = SM5703_REG_CNTL,                          \
                .enable_mask = SM5703_OPERATION_MODE_MASK,              \
index 997b524..a48c693 100644 (file)
@@ -54,8 +54,9 @@ static struct ap_driver vfio_ap_drv = {
 
 static void vfio_ap_matrix_dev_release(struct device *dev)
 {
-       struct ap_matrix_dev *matrix_dev = dev_get_drvdata(dev);
+       struct ap_matrix_dev *matrix_dev;
 
+       matrix_dev = container_of(dev, struct ap_matrix_dev, device);
        kfree(matrix_dev);
 }
 
index c76f82f..15f4529 100644 (file)
@@ -771,13 +771,12 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
                iscsi_set_param(cls_conn, param, buf, buflen);
                break;
        case ISCSI_PARAM_DATADGST_EN:
-               iscsi_set_param(cls_conn, param, buf, buflen);
-
                mutex_lock(&tcp_sw_conn->sock_lock);
                if (!tcp_sw_conn->sock) {
                        mutex_unlock(&tcp_sw_conn->sock_lock);
                        return -ENOTCONN;
                }
+               iscsi_set_param(cls_conn, param, buf, buflen);
                tcp_sw_conn->sendpage = conn->datadgst_en ?
                        sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
                mutex_unlock(&tcp_sw_conn->sock_lock);
index 3ceece9..c895189 100644 (file)
@@ -3298,7 +3298,7 @@ fw_crash_buffer_show(struct device *cdev,
 
        spin_lock_irqsave(&instance->crashdump_lock, flags);
        buff_offset = instance->fw_crash_buffer_offset;
-       if (!instance->crash_dump_buf &&
+       if (!instance->crash_dump_buf ||
                !((instance->fw_crash_state == AVAILABLE) ||
                (instance->fw_crash_state == COPYING))) {
                dev_err(&instance->pdev->dev,
index 84c9a55..8a83f3f 100644 (file)
@@ -4771,7 +4771,7 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
        devhandle = megasas_get_tm_devhandle(scmd->device);
 
        if (devhandle == (u16)ULONG_MAX) {
-               ret = SUCCESS;
+               ret = FAILED;
                sdev_printk(KERN_INFO, scmd->device,
                        "task abort issued for invalid devhandle\n");
                mutex_unlock(&instance->reset_mutex);
@@ -4841,7 +4841,7 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
        devhandle = megasas_get_tm_devhandle(scmd->device);
 
        if (devhandle == (u16)ULONG_MAX) {
-               ret = SUCCESS;
+               ret = FAILED;
                sdev_printk(KERN_INFO, scmd->device,
                        "target reset issued for invalid devhandle\n");
                mutex_unlock(&instance->reset_mutex);
index a565817..d109a4c 100644 (file)
@@ -2526,7 +2526,7 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
                mrioc->unrecoverable = 1;
                goto schedule_work;
        case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
-               return;
+               goto schedule_work;
        case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
                reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
                break;
index 2ee9ea5..14ae0a9 100644 (file)
@@ -6616,11 +6616,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
        else if (rc == -EAGAIN)
                goto try_32bit_dma;
        total_sz += sense_sz;
-       ioc_info(ioc,
-           "sense pool(0x%p)- dma(0x%llx): depth(%d),"
-           "element_size(%d), pool_size(%d kB)\n",
-           ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
-           SCSI_SENSE_BUFFERSIZE, sz / 1024);
        /* reply pool, 4 byte align */
        sz = ioc->reply_free_queue_depth * ioc->reply_sz;
        rc = _base_allocate_reply_pool(ioc, sz);
index bee1b8a..d0cdbfe 100644 (file)
@@ -3617,6 +3617,7 @@ skip_dpc:
 probe_failed:
        qla_enode_stop(base_vha);
        qla_edb_stop(base_vha);
+       vfree(base_vha->scan.l);
        if (base_vha->gnl.l) {
                dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
                                base_vha->gnl.l, base_vha->gnl.ldma);
index 5cce1ba..09ef0b3 100644 (file)
@@ -314,11 +314,18 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
        if (result)
                return -EIO;
 
-       /* Sanity check that we got the page back that we asked for */
+       /*
+        * Sanity check that we got the page back that we asked for and that
+        * the page size is not 0.
+        */
        if (buffer[1] != page)
                return -EIO;
 
-       return get_unaligned_be16(&buffer[2]) + 4;
+       result = get_unaligned_be16(&buffer[2]);
+       if (!result)
+               return -EIO;
+
+       return result + 4;
 }
 
 static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
index b11a916..b54f2c6 100644 (file)
@@ -509,9 +509,6 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
        int i;
        struct ses_component *scomp;
 
-       if (!edev->component[0].scratch)
-               return 0;
-
        for (i = 0; i < edev->components; i++) {
                scomp = edev->component[i].scratch;
                if (scomp->addr != efd->addr)
@@ -602,8 +599,10 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
                                                components++,
                                                type_ptr[0],
                                                name);
-                               else
+                               else if (components < edev->components)
                                        ecomp = &edev->component[components++];
+                               else
+                                       ecomp = ERR_PTR(-EINVAL);
 
                                if (!IS_ERR(ecomp)) {
                                        if (addl_desc_ptr) {
@@ -734,11 +733,6 @@ static int ses_intf_add(struct device *cdev,
                        components += type_ptr[1];
        }
 
-       if (components == 0) {
-               sdev_printk(KERN_WARNING, sdev, "enclosure has no enumerated components\n");
-               goto err_free;
-       }
-
        ses_dev->page1 = buf;
        ses_dev->page1_len = len;
        buf = NULL;
@@ -780,9 +774,11 @@ static int ses_intf_add(struct device *cdev,
                buf = NULL;
        }
 page2_not_supported:
-       scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL);
-       if (!scomp)
-               goto err_free;
+       if (components > 0) {
+               scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL);
+               if (!scomp)
+                       goto err_free;
+       }
 
        edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev),
                                  components, &ses_enclosure_callbacks);
index bd87d3c..69347b6 100644 (file)
@@ -632,7 +632,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(dev, "Failed to request irq\n");
 
-               return ret;
+               goto err_irq;
        }
 
        ret = rockchip_sfc_init(sfc);
index 44b85a8..7bc14fb 100644 (file)
@@ -4456,6 +4456,11 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action,
                        return NOTIFY_OK;
                }
 
+               /*
+                * Clear the flag before adding the device so that fw_devlink
+                * doesn't skip adding consumers to this device.
+                */
+               rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
                spi = of_register_spi_device(ctlr, rd->dn);
                put_device(&ctlr->dev);
 
index 290b1bb..df5fb54 100644 (file)
@@ -488,7 +488,7 @@ static bool is_normal_memory(pgprot_t p)
 #elif defined(CONFIG_ARM64)
        return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
 #else
-#error "Unuspported architecture"
+#error "Unsupported architecture"
 #endif
 }
 
index b1c6231..673cf03 100644 (file)
@@ -32,7 +32,7 @@ static int shm_get_kernel_pages(unsigned long start, size_t page_count,
                         is_kmap_addr((void *)start)))
                return -EINVAL;
 
-       page = virt_to_page(start);
+       page = virt_to_page((void *)start);
        for (n = 0; n < page_count; n++) {
                pages[n] = page + n;
                get_page(pages[n]);
index 90526f4..d71ee50 100644 (file)
@@ -153,7 +153,6 @@ static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp
                cancel_delayed_work_sync(&pci_info->work);
                proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
                proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0);
-               thermal_zone_device_disable(tzd);
                pci_info->stored_thres = 0;
                return 0;
        }
index c7ba568..91fc7e2 100644 (file)
@@ -235,6 +235,12 @@ static int max_idle_set(const char *arg, const struct kernel_param *kp)
                goto skip_limit_set;
        }
 
+       if (!cpumask_available(idle_injection_cpu_mask)) {
+               ret = allocate_copy_idle_injection_mask(cpu_present_mask);
+               if (ret)
+                       goto skip_limit_set;
+       }
+
        if (check_invalid(idle_injection_cpu_mask, new_max_idle)) {
                ret = -EINVAL;
                goto skip_limit_set;
@@ -791,7 +797,8 @@ static int __init powerclamp_init(void)
                return retval;
 
        mutex_lock(&powerclamp_lock);
-       retval = allocate_copy_idle_injection_mask(cpu_present_mask);
+       if (!cpumask_available(idle_injection_cpu_mask))
+               retval = allocate_copy_idle_injection_mask(cpu_present_mask);
        mutex_unlock(&powerclamp_lock);
 
        if (retval)
index 2e22bb8..e69868e 100644 (file)
@@ -193,8 +193,67 @@ static const struct attribute_group thermal_attr_group = {
 #define THERM_THROT_POLL_INTERVAL      HZ
 #define THERM_STATUS_PROCHOT_LOG       BIT(1)
 
-#define THERM_STATUS_CLEAR_CORE_MASK (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11) | BIT(13) | BIT(15))
-#define THERM_STATUS_CLEAR_PKG_MASK  (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11))
+static u64 therm_intr_core_clear_mask;
+static u64 therm_intr_pkg_clear_mask;
+
+static void thermal_intr_init_core_clear_mask(void)
+{
+       if (therm_intr_core_clear_mask)
+               return;
+
+       /*
+        * Reference: Intel SDM  Volume 4
+        * "Table 2-2. IA-32 Architectural MSRs", MSR 0x19C
+        * IA32_THERM_STATUS.
+        */
+
+       /*
+        * Bit 1, 3, 5: CPUID.01H:EDX[22] = 1. This driver will not
+        * enable interrupts, when 0 as it checks for X86_FEATURE_ACPI.
+        */
+       therm_intr_core_clear_mask = (BIT(1) | BIT(3) | BIT(5));
+
+       /*
+        * Bit 7 and 9: Thermal Threshold #1 and #2 log
+        * If CPUID.01H:ECX[8] = 1
+        */
+       if (boot_cpu_has(X86_FEATURE_TM2))
+               therm_intr_core_clear_mask |= (BIT(7) | BIT(9));
+
+       /* Bit 11: Power Limitation log (R/WC0) If CPUID.06H:EAX[4] = 1 */
+       if (boot_cpu_has(X86_FEATURE_PLN))
+               therm_intr_core_clear_mask |= BIT(11);
+
+       /*
+        * Bit 13: Current Limit log (R/WC0) If CPUID.06H:EAX[7] = 1
+        * Bit 15: Cross Domain Limit log (R/WC0) If CPUID.06H:EAX[7] = 1
+        */
+       if (boot_cpu_has(X86_FEATURE_HWP))
+               therm_intr_core_clear_mask |= (BIT(13) | BIT(15));
+}
+
+static void thermal_intr_init_pkg_clear_mask(void)
+{
+       if (therm_intr_pkg_clear_mask)
+               return;
+
+       /*
+        * Reference: Intel SDM  Volume 4
+        * "Table 2-2. IA-32 Architectural MSRs", MSR 0x1B1
+        * IA32_PACKAGE_THERM_STATUS.
+        */
+
+       /* All bits except BIT 26 depend on CPUID.06H: EAX[6] = 1 */
+       if (boot_cpu_has(X86_FEATURE_PTS))
+               therm_intr_pkg_clear_mask = (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11));
+
+       /*
+        * Intel SDM Volume 2A: Thermal and Power Management Leaf
+        * Bit 26: CPUID.06H: EAX[19] = 1
+        */
+       if (boot_cpu_has(X86_FEATURE_HFI))
+               therm_intr_pkg_clear_mask |= BIT(26);
+}
 
 /*
  * Clear the bits in package thermal status register for bit = 1
@@ -207,13 +266,10 @@ void thermal_clear_package_intr_status(int level, u64 bit_mask)
 
        if (level == CORE_LEVEL) {
                msr  = MSR_IA32_THERM_STATUS;
-               msr_val = THERM_STATUS_CLEAR_CORE_MASK;
+               msr_val = therm_intr_core_clear_mask;
        } else {
                msr  = MSR_IA32_PACKAGE_THERM_STATUS;
-               msr_val = THERM_STATUS_CLEAR_PKG_MASK;
-               if (boot_cpu_has(X86_FEATURE_HFI))
-                       msr_val |= BIT(26);
-
+               msr_val = therm_intr_pkg_clear_mask;
        }
 
        msr_val &= ~bit_mask;
@@ -708,6 +764,9 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
        h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED;
        apic_write(APIC_LVTTHMR, h);
 
+       thermal_intr_init_core_clear_mask();
+       thermal_intr_init_pkg_clear_mask();
+
        rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
        if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable)
                wrmsr(MSR_IA32_THERM_INTERRUPT,
index a4aba7b..6c20c9f 100644 (file)
@@ -876,8 +876,6 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev)
        unsigned long states = cdev->max_state + 1;
        int var;
 
-       lockdep_assert_held(&cdev->lock);
-
        var = sizeof(*stats);
        var += sizeof(*stats->time_in_state) * states;
        var += sizeof(*stats->trans_table) * states * states;
@@ -903,8 +901,6 @@ out:
 
 static void cooling_device_stats_destroy(struct thermal_cooling_device *cdev)
 {
-       lockdep_assert_held(&cdev->lock);
-
        kfree(cdev->stats);
        cdev->stats = NULL;
 }
@@ -931,6 +927,8 @@ void thermal_cooling_device_destroy_sysfs(struct thermal_cooling_device *cdev)
 
 void thermal_cooling_device_stats_reinit(struct thermal_cooling_device *cdev)
 {
+       lockdep_assert_held(&cdev->lock);
+
        cooling_device_stats_destroy(cdev);
        cooling_device_stats_setup(cdev);
 }
index fa43df0..3ba9c8b 100644 (file)
@@ -1903,6 +1903,17 @@ EXPORT_SYMBOL_GPL(serial8250_modem_status);
 static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
 {
        switch (iir & 0x3f) {
+       case UART_IIR_THRI:
+               /*
+                * Postpone DMA or not decision to IIR_RDI or IIR_RX_TIMEOUT
+                * because it's impossible to do an informed decision about
+                * that with IIR_THRI.
+                *
+                * This also fixes one known DMA Rx corruption issue where
+                * DR is asserted but DMA Rx only gets a corrupted zero byte
+                * (too early DR?).
+                */
+               return false;
        case UART_IIR_RDI:
                if (!up->dma->rx_running)
                        break;
index 56e6ba3..074bfed 100644 (file)
@@ -858,11 +858,17 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
                        struct lpuart_port, port);
        unsigned long stat = lpuart32_read(port, UARTSTAT);
        unsigned long sfifo = lpuart32_read(port, UARTFIFO);
+       unsigned long ctrl = lpuart32_read(port, UARTCTRL);
 
        if (sport->dma_tx_in_progress)
                return 0;
 
-       if (stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT)
+       /*
+        * LPUART Transmission Complete Flag may never be set while queuing a break
+        * character, so avoid checking for transmission complete when UARTCTRL_SBK
+        * is asserted.
+        */
+       if ((stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT) || ctrl & UARTCTRL_SBK)
                return TIOCSER_TEMT;
 
        return 0;
@@ -2942,7 +2948,7 @@ static bool lpuart_uport_is_active(struct lpuart_port *sport)
        tty = tty_port_tty_get(port);
        if (tty) {
                tty_dev = tty->dev;
-               may_wake = device_may_wakeup(tty_dev);
+               may_wake = tty_dev && device_may_wakeup(tty_dev);
                tty_kref_put(tty);
        }
 
index 7bd0807..caa09a0 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/ioport.h>
 #include <linux/ktime.h>
 #include <linux/major.h>
+#include <linux/minmax.h>
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/of.h>
@@ -2864,6 +2865,13 @@ static int sci_init_single(struct platform_device *dev,
                        sci_port->irqs[i] = platform_get_irq(dev, i);
        }
 
+       /*
+        * The fourth interrupt on SCI port is transmit end interrupt, so
+        * shuffle the interrupts.
+        */
+       if (p->type == PORT_SCI)
+               swap(sci_port->irqs[SCIx_BRI_IRQ], sci_port->irqs[SCIx_TEI_IRQ]);
+
        /* The SCI generates several interrupts. They can be muxed together or
         * connected to different interrupt lines. In the muxed case only one
         * interrupt resource is specified as there is only one interrupt ID.
@@ -2929,7 +2937,7 @@ static int sci_init_single(struct platform_device *dev,
        port->flags             = UPF_FIXED_PORT | UPF_BOOT_AUTOCONF | p->flags;
        port->fifosize          = sci_port->params->fifosize;
 
-       if (port->type == PORT_SCI) {
+       if (port->type == PORT_SCI && !dev->dev.of_node) {
                if (sci_port->reg_size >= 0x20)
                        port->regshift = 2;
                else
index 37e178a..70b1120 100644 (file)
@@ -1409,13 +1409,6 @@ static int ufshcd_devfreq_target(struct device *dev,
        struct ufs_clk_info *clki;
        unsigned long irq_flags;
 
-       /*
-        * Skip devfreq if UFS initialization is not finished.
-        * Otherwise ufs could be in a inconsistent state.
-        */
-       if (!smp_load_acquire(&hba->logical_unit_scan_finished))
-               return 0;
-
        if (!ufshcd_is_clkscaling_supported(hba))
                return -EINVAL;
 
@@ -8399,6 +8392,22 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
        if (ret)
                goto out;
 
+       /* Initialize devfreq after UFS device is detected */
+       if (ufshcd_is_clkscaling_supported(hba)) {
+               memcpy(&hba->clk_scaling.saved_pwr_info.info,
+                       &hba->pwr_info,
+                       sizeof(struct ufs_pa_layer_attr));
+               hba->clk_scaling.saved_pwr_info.is_valid = true;
+               hba->clk_scaling.is_allowed = true;
+
+               ret = ufshcd_devfreq_init(hba);
+               if (ret)
+                       goto out;
+
+               hba->clk_scaling.is_enabled = true;
+               ufshcd_init_clk_scaling_sysfs(hba);
+       }
+
        ufs_bsg_probe(hba);
        ufshpb_init(hba);
        scsi_scan_host(hba->host);
@@ -8670,12 +8679,6 @@ out:
        if (ret) {
                pm_runtime_put_sync(hba->dev);
                ufshcd_hba_exit(hba);
-       } else {
-               /*
-                * Make sure that when reader code sees UFS initialization has finished,
-                * all initialization steps have really been executed.
-                */
-               smp_store_release(&hba->logical_unit_scan_finished, true);
        }
 }
 
@@ -10316,30 +10319,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
         */
        ufshcd_set_ufs_dev_active(hba);
 
-       /* Initialize devfreq */
-       if (ufshcd_is_clkscaling_supported(hba)) {
-               memcpy(&hba->clk_scaling.saved_pwr_info.info,
-                       &hba->pwr_info,
-                       sizeof(struct ufs_pa_layer_attr));
-               hba->clk_scaling.saved_pwr_info.is_valid = true;
-               hba->clk_scaling.is_allowed = true;
-
-               err = ufshcd_devfreq_init(hba);
-               if (err)
-                       goto rpm_put_sync;
-
-               hba->clk_scaling.is_enabled = true;
-               ufshcd_init_clk_scaling_sysfs(hba);
-       }
-
        async_schedule(ufshcd_async_scan, hba);
        ufs_sysfs_add_nodes(hba->dev);
 
        device_enable_async_suspend(dev);
        return 0;
 
-rpm_put_sync:
-       pm_runtime_put_sync(dev);
 free_tmf_queue:
        blk_mq_destroy_queue(hba->tmf_queue);
        blk_put_queue(hba->tmf_queue);
index d63d5d9..f317d3c 100644 (file)
@@ -414,7 +414,7 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
 void cdnsp_setup_analyze(struct cdnsp_device *pdev)
 {
        struct usb_ctrlrequest *ctrl = &pdev->setup;
-       int ret = 0;
+       int ret = -EINVAL;
        u16 len;
 
        trace_cdnsp_ctrl_req(ctrl);
@@ -424,7 +424,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
 
        if (pdev->gadget.state == USB_STATE_NOTATTACHED) {
                dev_err(pdev->dev, "ERR: Setup detected in unattached state\n");
-               ret = -EINVAL;
                goto out;
        }
 
index a23ddbb..5607935 100644 (file)
@@ -49,6 +49,7 @@
 #define PCI_DEVICE_ID_INTEL_RPLS               0x7a61
 #define PCI_DEVICE_ID_INTEL_MTLM               0x7eb1
 #define PCI_DEVICE_ID_INTEL_MTLP               0x7ec1
+#define PCI_DEVICE_ID_INTEL_MTLS               0x7f6f
 #define PCI_DEVICE_ID_INTEL_MTL                        0x7e7e
 #define PCI_DEVICE_ID_INTEL_TGL                        0x9a15
 #define PCI_DEVICE_ID_AMD_MR                   0x163a
@@ -474,6 +475,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLP),
          (kernel_ulong_t) &dwc3_pci_intel_swnode, },
 
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLS),
+         (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL),
          (kernel_ulong_t) &dwc3_pci_intel_swnode, },
 
index ddfc537..56cdfb2 100644 (file)
@@ -1251,7 +1251,7 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
        p->kiocb = kiocb;
        if (p->aio) {
                p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
-               if (!p->to_free) {
+               if (!iter_is_ubuf(&p->data) && !p->to_free) {
                        kfree(p);
                        return -ENOMEM;
                }
index d605bc2..28249d0 100644 (file)
@@ -614,7 +614,7 @@ ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
                if (!priv)
                        goto fail;
                priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
-               if (!priv->to_free) {
+               if (!iter_is_ubuf(&priv->to) && !priv->to_free) {
                        kfree(priv);
                        goto fail;
                }
index fb988e4..6db07ca 100644 (file)
@@ -771,12 +771,11 @@ static struct pci_driver xhci_pci_driver = {
        /* suspend and resume implemented later */
 
        .shutdown =     usb_hcd_pci_shutdown,
-       .driver = {
 #ifdef CONFIG_PM
-               .pm = &usb_hcd_pci_pm_ops,
-#endif
-               .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+       .driver = {
+               .pm = &usb_hcd_pci_pm_ops
        },
+#endif
 };
 
 static int __init xhci_pci_init(void)
index 1ff22f6..a88c39e 100644 (file)
@@ -1360,6 +1360,9 @@ static void tegra_xhci_id_work(struct work_struct *work)
 
        mutex_unlock(&tegra->lock);
 
+       tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl,
+                                                                   tegra->otg_usb2_port);
+
        if (tegra->host_mode) {
                /* switch to host mode */
                if (tegra->otg_usb3_port >= 0) {
@@ -1474,9 +1477,6 @@ static int tegra_xhci_id_notify(struct notifier_block *nb,
        }
 
        tegra->otg_usb2_port = tegra_xusb_get_usb2_port(tegra, usbphy);
-       tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(
-                                                       tegra->padctl,
-                                                       tegra->otg_usb2_port);
 
        tegra->host_mode = (usbphy->last_event == USB_EVENT_ID) ? true : false;
 
index 6183ce8..6307bae 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/pci.h>
+#include <linux/iommu.h>
 #include <linux/iopoll.h>
 #include <linux/irq.h>
 #include <linux/log2.h>
@@ -228,6 +229,7 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
 static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
 {
        struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+       struct iommu_domain *domain;
        int err, i;
        u64 val;
        u32 intrs;
@@ -246,7 +248,9 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
         * an iommu. Doing anything when there is no iommu is definitely
         * unsafe...
         */
-       if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
+       domain = iommu_get_domain_for_dev(dev);
+       if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
+           domain->type == IOMMU_DOMAIN_IDENTITY)
                return;
 
        xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
@@ -4438,6 +4442,7 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
 
        if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
                spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_free_command(xhci, command);
                return 0;
        }
 
index 832ad59..cdea1bf 100644 (file)
@@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
        { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
        { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+       { USB_DEVICE(0x10C4, 0x82AA) }, /* Silicon Labs IFS-USB-DATACABLE used with Quint UPS */
        { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
        { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
        { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
index e6d8d9b..f31cc3c 100644 (file)
@@ -1198,6 +1198,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0900, 0xff, 0, 0), /* RM500U-CN */
+         .driver_info = ZLP },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
@@ -1300,6 +1302,14 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff),    /* Telit FN990 (PCIe) */
          .driver_info = RSVD(0) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff),    /* Telit FE990 (rmnet) */
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff),    /* Telit FE990 (MBIM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff),    /* Telit FE990 (RNDIS) */
+         .driver_info = NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff),    /* Telit FE990 (ECM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
index 662cd04..8f3e884 100644 (file)
@@ -112,8 +112,12 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
                if (dp->data.status & DP_STATUS_PREFER_MULTI_FUNC &&
                    pin_assign & DP_PIN_ASSIGN_MULTI_FUNC_MASK)
                        pin_assign &= DP_PIN_ASSIGN_MULTI_FUNC_MASK;
-               else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK)
+               else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK) {
                        pin_assign &= DP_PIN_ASSIGN_DP_ONLY_MASK;
+                       /* Default to pin assign C if available */
+                       if (pin_assign & BIT(DP_PIN_ASSIGN_C))
+                               pin_assign = BIT(DP_PIN_ASSIGN_C);
+               }
 
                if (!pin_assign)
                        return -EINVAL;
index 520646a..195963b 100644 (file)
@@ -2467,10 +2467,11 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
                err = 0;
                goto out;
        }
+       mlx5_vdpa_add_debugfs(ndev);
        err = setup_virtqueues(mvdev);
        if (err) {
                mlx5_vdpa_warn(mvdev, "setup_virtqueues\n");
-               goto out;
+               goto err_setup;
        }
 
        err = create_rqt(ndev);
@@ -2500,6 +2501,8 @@ err_tir:
        destroy_rqt(ndev);
 err_rqt:
        teardown_virtqueues(ndev);
+err_setup:
+       mlx5_vdpa_remove_debugfs(ndev->debugfs);
 out:
        return err;
 }
@@ -2513,6 +2516,8 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev)
        if (!ndev->setup)
                return;
 
+       mlx5_vdpa_remove_debugfs(ndev->debugfs);
+       ndev->debugfs = NULL;
        teardown_steering(ndev);
        destroy_tir(ndev);
        destroy_rqt(ndev);
@@ -3261,7 +3266,6 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
        if (err)
                goto err_reg;
 
-       mlx5_vdpa_add_debugfs(ndev);
        mgtdev->ndev = ndev;
        return 0;
 
index 862f405..dfe2ce3 100644 (file)
@@ -466,16 +466,21 @@ static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
 
        vdpasim_net_setup_config(simdev, config);
 
-       ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_NET_VQ_NUM);
-       if (ret)
-               goto reg_err;
-
        net = sim_to_net(simdev);
 
        u64_stats_init(&net->tx_stats.syncp);
        u64_stats_init(&net->rx_stats.syncp);
        u64_stats_init(&net->cq_stats.syncp);
 
+       /*
+        * Initialization must be completed before this call, since it can
+        * connect the device to the vDPA bus, so requests can arrive after
+        * this call.
+        */
+       ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_NET_VQ_NUM);
+       if (ret)
+               goto reg_err;
+
        return 0;
 
 reg_err:
index b244e7c..32d0be9 100644 (file)
@@ -125,7 +125,6 @@ struct vhost_scsi_tpg {
        struct se_portal_group se_tpg;
        /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
        struct vhost_scsi *vhost_scsi;
-       struct list_head tmf_queue;
 };
 
 struct vhost_scsi_tport {
@@ -206,10 +205,8 @@ struct vhost_scsi {
 
 struct vhost_scsi_tmf {
        struct vhost_work vwork;
-       struct vhost_scsi_tpg *tpg;
        struct vhost_scsi *vhost;
        struct vhost_scsi_virtqueue *svq;
-       struct list_head queue_entry;
 
        struct se_cmd se_cmd;
        u8 scsi_resp;
@@ -352,12 +349,9 @@ static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
 
 static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
 {
-       struct vhost_scsi_tpg *tpg = tmf->tpg;
        struct vhost_scsi_inflight *inflight = tmf->inflight;
 
-       mutex_lock(&tpg->tv_tpg_mutex);
-       list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
-       mutex_unlock(&tpg->tv_tpg_mutex);
+       kfree(tmf);
        vhost_scsi_put_inflight(inflight);
 }
 
@@ -1194,19 +1188,11 @@ vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
                goto send_reject;
        }
 
-       mutex_lock(&tpg->tv_tpg_mutex);
-       if (list_empty(&tpg->tmf_queue)) {
-               pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
-               mutex_unlock(&tpg->tv_tpg_mutex);
+       tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
+       if (!tmf)
                goto send_reject;
-       }
-
-       tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
-                              queue_entry);
-       list_del_init(&tmf->queue_entry);
-       mutex_unlock(&tpg->tv_tpg_mutex);
 
-       tmf->tpg = tpg;
+       vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
        tmf->vhost = vs;
        tmf->svq = svq;
        tmf->resp_iov = vq->iov[vc->out];
@@ -1658,7 +1644,10 @@ undepend:
        for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
                tpg = vs_tpg[i];
                if (tpg) {
+                       mutex_lock(&tpg->tv_tpg_mutex);
+                       tpg->vhost_scsi = NULL;
                        tpg->tv_tpg_vhost_count--;
+                       mutex_unlock(&tpg->tv_tpg_mutex);
                        target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
                }
        }
@@ -2032,19 +2021,11 @@ static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
 {
        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
                                struct vhost_scsi_tpg, se_tpg);
-       struct vhost_scsi_tmf *tmf;
-
-       tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
-       if (!tmf)
-               return -ENOMEM;
-       INIT_LIST_HEAD(&tmf->queue_entry);
-       vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
 
        mutex_lock(&vhost_scsi_mutex);
 
        mutex_lock(&tpg->tv_tpg_mutex);
        tpg->tv_tpg_port_count++;
-       list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
        mutex_unlock(&tpg->tv_tpg_mutex);
 
        vhost_scsi_hotplug(tpg, lun);
@@ -2059,16 +2040,11 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
 {
        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
                                struct vhost_scsi_tpg, se_tpg);
-       struct vhost_scsi_tmf *tmf;
 
        mutex_lock(&vhost_scsi_mutex);
 
        mutex_lock(&tpg->tv_tpg_mutex);
        tpg->tv_tpg_port_count--;
-       tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
-                              queue_entry);
-       list_del(&tmf->queue_entry);
-       kfree(tmf);
        mutex_unlock(&tpg->tv_tpg_mutex);
 
        vhost_scsi_hotunplug(tpg, lun);
@@ -2329,7 +2305,6 @@ vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
        }
        mutex_init(&tpg->tv_tpg_mutex);
        INIT_LIST_HEAD(&tpg->tv_tpg_list);
-       INIT_LIST_HEAD(&tpg->tmf_queue);
        tpg->tport = tport;
        tpg->tport_tpgt = tpgt;
 
index 0a2c47d..eb565a1 100644 (file)
@@ -823,7 +823,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
        int oldidx = con2fb_map[unit];
        struct fb_info *info = fbcon_registered_fb[newidx];
        struct fb_info *oldinfo = NULL;
-       int found, err = 0, show_logo;
+       int err = 0, show_logo;
 
        WARN_CONSOLE_UNLOCKED();
 
@@ -841,26 +841,26 @@ static int set_con2fb_map(int unit, int newidx, int user)
        if (oldidx != -1)
                oldinfo = fbcon_registered_fb[oldidx];
 
-       found = search_fb_in_map(newidx);
-
-       if (!err && !found) {
+       if (!search_fb_in_map(newidx)) {
                err = con2fb_acquire_newinfo(vc, info, unit);
-               if (!err)
-                       con2fb_map[unit] = newidx;
+               if (err)
+                       return err;
+
+               fbcon_add_cursor_work(info);
        }
 
+       con2fb_map[unit] = newidx;
+
        /*
         * If old fb is not mapped to any of the consoles,
         * fbcon should release it.
         */
-       if (!err && oldinfo && !search_fb_in_map(oldidx))
+       if (oldinfo && !search_fb_in_map(oldidx))
                con2fb_release_oldinfo(vc, oldinfo, info);
 
        show_logo = (fg_console == 0 && !user &&
                         logo_shown != FBCON_LOGO_DONTSHOW);
 
-       if (!found)
-               fbcon_add_cursor_work(info);
        con2fb_map_boot[unit] = newidx;
        con2fb_init_display(vc, info, unit, show_logo);
 
index 875541f..3fd95a7 100644 (file)
@@ -1116,6 +1116,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
        case FBIOPUT_VSCREENINFO:
                if (copy_from_user(&var, argp, sizeof(var)))
                        return -EFAULT;
+               /* only for kernel-internal use */
+               var.activate &= ~FB_ACTIVATE_KD_TEXT;
                console_lock();
                lock_fb_info(info);
                ret = fbcon_modechange_possible(info, &var);
index 50f7f3f..1974a38 100644 (file)
@@ -35,10 +35,12 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
                return retval;
        }
        if (attr_size > buffer_size) {
-               if (!buffer_size) /* request to get the attr_size */
-                       retval = attr_size;
-               else
+               if (buffer_size)
                        retval = -ERANGE;
+               else if (attr_size > SSIZE_MAX)
+                       retval = -EOVERFLOW;
+               else /* request to get the attr_size */
+                       retval = attr_size;
        } else {
                iov_iter_truncate(&to, attr_size);
                retval = p9_client_read(attr_fid, 0, &to, &err);
index 90e40d5..e54f088 100644 (file)
@@ -1921,8 +1921,7 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
        level = -1;
        ULIST_ITER_INIT(&uiter);
        while (1) {
-               bool is_shared;
-               bool cached;
+               const unsigned long prev_ref_count = ctx->refs.nnodes;
 
                walk_ctx.bytenr = bytenr;
                ret = find_parent_nodes(&walk_ctx, &shared);
@@ -1940,21 +1939,36 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
                ret = 0;
 
                /*
-                * If our data extent was not directly shared (without multiple
-                * reference items), than it might have a single reference item
-                * with a count > 1 for the same offset, which means there are 2
-                * (or more) file extent items that point to the data extent -
-                * this happens when a file extent item needs to be split and
-                * then one item gets moved to another leaf due to a b+tree leaf
-                * split when inserting some item. In this case the file extent
-                * items may be located in different leaves and therefore some
-                * of the leaves may be referenced through shared subtrees while
-                * others are not. Since our extent buffer cache only works for
-                * a single path (by far the most common case and simpler to
-                * deal with), we can not use it if we have multiple leaves
-                * (which implies multiple paths).
+                * More than one extent buffer (bytenr) may have been added to
+                * the ctx->refs ulist, in which case we have to check multiple
+                * tree paths in case the first one is not shared, so we can not
+                * use the path cache which is made for a single path. Multiple
+                * extent buffers at the current level happen when:
+                *
+                * 1) level -1, the data extent: If our data extent was not
+                *    directly shared (without multiple reference items), then
+                *    it might have a single reference item with a count > 1 for
+                *    the same offset, which means there are 2 (or more) file
+                *    extent items that point to the data extent - this happens
+                *    when a file extent item needs to be split and then one
+                *    item gets moved to another leaf due to a b+tree leaf split
+                *    when inserting some item. In this case the file extent
+                *    items may be located in different leaves and therefore
+                *    some of the leaves may be referenced through shared
+                *    subtrees while others are not. Since our extent buffer
+                *    cache only works for a single path (by far the most common
+                *    case and simpler to deal with), we can not use it if we
+                *    have multiple leaves (which implies multiple paths).
+                *
+                * 2) level >= 0, a tree node/leaf: We can have a mix of direct
+                *    and indirect references on a b+tree node/leaf, so we have
+                *    to check multiple paths, and the extent buffer (the
+                *    current bytenr) may be shared or not. One example is
+                *    during relocation as we may get a shared tree block ref
+                *    (direct ref) and a non-shared tree block ref (indirect
+                *    ref) for the same node/leaf.
                 */
-               if (level == -1 && ctx->refs.nnodes > 1)
+               if ((ctx->refs.nnodes - prev_ref_count) > 1)
                        ctx->use_path_cache = false;
 
                if (level >= 0)
@@ -1964,12 +1978,17 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
                if (!node)
                        break;
                bytenr = node->val;
-               level++;
-               cached = lookup_backref_shared_cache(ctx, root, bytenr, level,
-                                                    &is_shared);
-               if (cached) {
-                       ret = (is_shared ? 1 : 0);
-                       break;
+               if (ctx->use_path_cache) {
+                       bool is_shared;
+                       bool cached;
+
+                       level++;
+                       cached = lookup_backref_shared_cache(ctx, root, bytenr,
+                                                            level, &is_shared);
+                       if (cached) {
+                               ret = (is_shared ? 1 : 0);
+                               break;
+                       }
                }
                shared.share_count = 0;
                shared.have_delayed_delete_refs = false;
@@ -1977,6 +1996,28 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
        }
 
        /*
+        * If the path cache is disabled, then it means at some tree level we
+        * got multiple parents due to a mix of direct and indirect backrefs or
+        * multiple leaves with file extent items pointing to the same data
+        * extent. We have to invalidate the cache and cache only the sharedness
+        * result for the levels where we got only one node/reference.
+        */
+       if (!ctx->use_path_cache) {
+               int i = 0;
+
+               level--;
+               if (ret >= 0 && level >= 0) {
+                       bytenr = ctx->path_cache_entries[level].bytenr;
+                       ctx->use_path_cache = true;
+                       store_backref_shared_cache(ctx, root, bytenr, level, ret);
+                       i = level + 1;
+               }
+
+               for ( ; i < BTRFS_MAX_LEVEL; i++)
+                       ctx->path_cache_entries[i].bytenr = 0;
+       }
+
+       /*
         * Cache the sharedness result for the data extent if we know our inode
         * has more than 1 file extent item that refers to the data extent.
         */
index b53f0e3..9e1596b 100644 (file)
@@ -2250,6 +2250,20 @@ static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
 
        fs_info->csum_shash = csum_shash;
 
+       /*
+        * Check if the checksum implementation is a fast accelerated one.
+        * As-is this is a bit of a hack and should be replaced once the csum
+        * implementations provide that information themselves.
+        */
+       switch (csum_type) {
+       case BTRFS_CSUM_TYPE_CRC32:
+               if (!strstr(crypto_shash_driver_name(csum_shash), "generic"))
+                       set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
+               break;
+       default:
+               break;
+       }
+
        btrfs_info(fs_info, "using %s (%s) checksum algorithm",
                        btrfs_super_csum_name(csum_type),
                        crypto_shash_driver_name(csum_shash));
index a0ef1a1..ba769a1 100644 (file)
@@ -3732,7 +3732,9 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
        }
 
        /* update qgroup status and info */
+       mutex_lock(&fs_info->qgroup_ioctl_lock);
        err = btrfs_run_qgroups(trans);
+       mutex_unlock(&fs_info->qgroup_ioctl_lock);
        if (err < 0)
                btrfs_handle_fs_error(fs_info, err,
                                      "failed to update qgroup status and info");
index 52a7d2f..f41da7a 100644 (file)
@@ -2828,13 +2828,22 @@ cleanup:
 }
 
 /*
- * called from commit_transaction. Writes all changed qgroups to disk.
+ * Writes all changed qgroups to disk.
+ * Called by the transaction commit path and the qgroup assign ioctl.
  */
 int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
        int ret = 0;
 
+       /*
+        * In case we are called from the qgroup assign ioctl, assert that we
+        * are holding the qgroup_ioctl_lock, otherwise we can race with a quota
+        * disable operation (ioctl) and access a freed quota root.
+        */
+       if (trans->transaction->state != TRANS_STATE_COMMIT_DOING)
+               lockdep_assert_held(&fs_info->qgroup_ioctl_lock);
+
        if (!fs_info->quota_root)
                return ret;
 
index 581845b..366fb4c 100644 (file)
@@ -1516,8 +1516,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
                shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s", fs_type->name,
                                        s->s_id);
                btrfs_sb(s)->bdev_holder = fs_type;
-               if (!strstr(crc32c_impl(), "generic"))
-                       set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
                error = btrfs_fill_super(s, fs_devices, data);
        }
        if (!error)
@@ -1631,6 +1629,8 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
        btrfs_workqueue_set_max(fs_info->hipri_workers, new_pool_size);
        btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
        btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
+       workqueue_set_max_active(fs_info->endio_workers, new_pool_size);
+       workqueue_set_max_active(fs_info->endio_meta_workers, new_pool_size);
        btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
        btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
        btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);
index 18329eb..b8d5b1f 100644 (file)
@@ -2035,7 +2035,20 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
 
        if (current->journal_info == trans)
                current->journal_info = NULL;
-       btrfs_scrub_cancel(fs_info);
+
+       /*
+        * If relocation is running, we can't cancel scrub because that will
+        * result in a deadlock. Before relocating a block group, relocation
+        * pauses scrub, then starts and commits a transaction before unpausing
+        * scrub. If the transaction commit is being done by the relocation
+        * task or triggered by another task and the relocation task is waiting
+        * for the commit, and we end up here due to an error in the commit
+        * path, then calling btrfs_scrub_cancel() will deadlock, as we are
+        * asking for scrub to stop while having it asked to be paused higher
+        * above in relocation code.
+        */
+       if (!test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
+               btrfs_scrub_cancel(fs_info);
 
        kmem_cache_free(btrfs_trans_handle_cachep, trans);
 }
index 6d0124b..c6d5928 100644 (file)
@@ -1366,8 +1366,17 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
         * So, we need to add a special mount option to scan for
         * later supers, using BTRFS_SUPER_MIRROR_MAX instead
         */
-       flags |= FMODE_EXCL;
 
+       /*
+        * Avoid using flag |= FMODE_EXCL here, as the systemd-udev may
+        * initiate the device scan which may race with the user's mount
+        * or mkfs command, resulting in failure.
+        * Since the device scan is solely for reading purposes, there is
+        * no need for FMODE_EXCL. Additionally, the devices are read again
+        * during the mount process. It is ok to get some inconsistent
+        * values temporarily, as the device paths of the fsid are the only
+        * required information for assembling the volume.
+        */
        bdev = blkdev_get_by_path(path, flags, holder);
        if (IS_ERR(bdev))
                return ERR_CAST(bdev);
@@ -3266,8 +3275,15 @@ int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
        btrfs_scrub_pause(fs_info);
        ret = btrfs_relocate_block_group(fs_info, chunk_offset);
        btrfs_scrub_continue(fs_info);
-       if (ret)
+       if (ret) {
+               /*
+                * If we had a transaction abort, stop all running scrubs.
+                * See transaction.c:cleanup_transaction() why we do it here.
+                */
+               if (BTRFS_FS_ERROR(fs_info))
+                       btrfs_scrub_cancel(fs_info);
                return ret;
+       }
 
        block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
        if (!block_group)
index 71fe0a0..415176b 100644 (file)
@@ -124,7 +124,10 @@ extern const struct dentry_operations cifs_ci_dentry_ops;
 #ifdef CONFIG_CIFS_DFS_UPCALL
 extern struct vfsmount *cifs_dfs_d_automount(struct path *path);
 #else
-#define cifs_dfs_d_automount NULL
+static inline struct vfsmount *cifs_dfs_d_automount(struct path *path)
+{
+       return ERR_PTR(-EREMOTE);
+}
 #endif
 
 /* Functions related to symlinks */
index 38a697e..9d963ca 100644 (file)
@@ -71,7 +71,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
        int rc;
        struct cifs_ses *ses;
        struct TCP_Server_Info *server;
-       struct nls_table *nls_codepage;
+       struct nls_table *nls_codepage = NULL;
 
        /*
         * SMBs NegProt, SessSetup, uLogoff do not have tcon yet so check for
@@ -99,6 +99,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
        }
        spin_unlock(&tcon->tc_lock);
 
+again:
        rc = cifs_wait_for_server_reconnect(server, tcon->retry);
        if (rc)
                return rc;
@@ -110,8 +111,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
        }
        spin_unlock(&ses->chan_lock);
 
-       nls_codepage = load_nls_default();
-
+       mutex_lock(&ses->session_mutex);
        /*
         * Recheck after acquire mutex. If another thread is negotiating
         * and the server never sends an answer the socket will be closed
@@ -120,29 +120,38 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
        spin_lock(&server->srv_lock);
        if (server->tcpStatus == CifsNeedReconnect) {
                spin_unlock(&server->srv_lock);
+               mutex_unlock(&ses->session_mutex);
+
+               if (tcon->retry)
+                       goto again;
                rc = -EHOSTDOWN;
                goto out;
        }
        spin_unlock(&server->srv_lock);
 
+       nls_codepage = load_nls_default();
+
        /*
         * need to prevent multiple threads trying to simultaneously
         * reconnect the same SMB session
         */
+       spin_lock(&ses->ses_lock);
        spin_lock(&ses->chan_lock);
-       if (!cifs_chan_needs_reconnect(ses, server)) {
+       if (!cifs_chan_needs_reconnect(ses, server) &&
+           ses->ses_status == SES_GOOD) {
                spin_unlock(&ses->chan_lock);
+               spin_unlock(&ses->ses_lock);
 
                /* this means that we only need to tree connect */
                if (tcon->need_reconnect)
                        goto skip_sess_setup;
 
-               rc = -EHOSTDOWN;
+               mutex_unlock(&ses->session_mutex);
                goto out;
        }
        spin_unlock(&ses->chan_lock);
+       spin_unlock(&ses->ses_lock);
 
-       mutex_lock(&ses->session_mutex);
        rc = cifs_negotiate_protocol(0, ses, server);
        if (!rc)
                rc = cifs_setup_session(0, ses, server, nls_codepage);
@@ -4373,8 +4382,13 @@ CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses,
                return -ENODEV;
 
 getDFSRetry:
-       rc = smb_init(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc, (void **) &pSMB,
-                     (void **) &pSMBr);
+       /*
+        * Use smb_init_no_reconnect() instead of smb_init() as
+        * CIFSGetDFSRefer() may be called from cifs_reconnect_tcon() and thus
+        * causing an infinite recursion.
+        */
+       rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc,
+                                  (void **)&pSMB, (void **)&pSMBr);
        if (rc)
                return rc;
 
index 6d13f82..ace11a1 100644 (file)
@@ -441,13 +441,14 @@ out:
  * but there are some bugs that prevent rename from working if there are
  * multiple delimiters.
  *
- * Returns a sanitized duplicate of @path. The caller is responsible for
- * cleaning up the original.
+ * Returns a sanitized duplicate of @path. @gfp indicates the GFP_* flags
+ * for kstrdup.
+ * The caller is responsible for freeing the original.
  */
 #define IS_DELIM(c) ((c) == '/' || (c) == '\\')
-static char *sanitize_path(char *path)
+char *cifs_sanitize_prepath(char *prepath, gfp_t gfp)
 {
-       char *cursor1 = path, *cursor2 = path;
+       char *cursor1 = prepath, *cursor2 = prepath;
 
        /* skip all prepended delimiters */
        while (IS_DELIM(*cursor1))
@@ -469,7 +470,7 @@ static char *sanitize_path(char *path)
                cursor2--;
 
        *(cursor2) = '\0';
-       return kstrdup(path, GFP_KERNEL);
+       return kstrdup(prepath, gfp);
 }
 
 /*
@@ -531,7 +532,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
        if (!*pos)
                return 0;
 
-       ctx->prepath = sanitize_path(pos);
+       ctx->prepath = cifs_sanitize_prepath(pos, GFP_KERNEL);
        if (!ctx->prepath)
                return -ENOMEM;
 
index 3de00e7..f4eaf85 100644 (file)
@@ -287,4 +287,7 @@ extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
  */
 #define SMB3_MAX_DCLOSETIMEO (1 << 30)
 #define SMB3_DEF_DCLOSETIMEO (1 * HZ) /* even 1 sec enough to help eg open/write/close/open/read */
+
+extern char *cifs_sanitize_prepath(char *prepath, gfp_t gfp);
+
 #endif
index b44fb51..7f085ed 100644 (file)
@@ -1195,7 +1195,7 @@ int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
        kfree(cifs_sb->prepath);
 
        if (prefix && *prefix) {
-               cifs_sb->prepath = kstrdup(prefix, GFP_ATOMIC);
+               cifs_sb->prepath = cifs_sanitize_prepath(prefix, GFP_ATOMIC);
                if (!cifs_sb->prepath)
                        return -ENOMEM;
 
index 6bd2aa6..4245249 100644 (file)
@@ -310,7 +310,6 @@ out:
        case SMB2_READ:
        case SMB2_WRITE:
        case SMB2_LOCK:
-       case SMB2_IOCTL:
        case SMB2_QUERY_DIRECTORY:
        case SMB2_CHANGE_NOTIFY:
        case SMB2_QUERY_INFO:
@@ -588,11 +587,15 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
 
 }
 
+/* If invalid preauth context warn but use what we requested, SHA-512 */
 static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
 {
        unsigned int len = le16_to_cpu(ctxt->DataLength);
 
-       /* If invalid preauth context warn but use what we requested, SHA-512 */
+       /*
+        * Caller checked that DataLength remains within SMB boundary. We still
+        * need to confirm that one HashAlgorithms member is accounted for.
+        */
        if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
                pr_warn_once("server sent bad preauth context\n");
                return;
@@ -611,7 +614,11 @@ static void decode_compress_ctx(struct TCP_Server_Info *server,
 {
        unsigned int len = le16_to_cpu(ctxt->DataLength);
 
-       /* sizeof compress context is a one element compression capbility struct */
+       /*
+        * Caller checked that DataLength remains within SMB boundary. We still
+        * need to confirm that one CompressionAlgorithms member is accounted
+        * for.
+        */
        if (len < 10) {
                pr_warn_once("server sent bad compression cntxt\n");
                return;
@@ -633,6 +640,11 @@ static int decode_encrypt_ctx(struct TCP_Server_Info *server,
        unsigned int len = le16_to_cpu(ctxt->DataLength);
 
        cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
+       /*
+        * Caller checked that DataLength remains within SMB boundary. We still
+        * need to confirm that one Cipher flexible array member is accounted
+        * for.
+        */
        if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
                pr_warn_once("server sent bad crypto ctxt len\n");
                return -EINVAL;
@@ -679,6 +691,11 @@ static void decode_signing_ctx(struct TCP_Server_Info *server,
 {
        unsigned int len = le16_to_cpu(pctxt->DataLength);
 
+       /*
+        * Caller checked that DataLength remains within SMB boundary. We still
+        * need to confirm that one SigningAlgorithms flexible array member is
+        * accounted for.
+        */
        if ((len < 4) || (len > 16)) {
                pr_warn_once("server sent bad signing negcontext\n");
                return;
@@ -720,14 +737,19 @@ static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
        for (i = 0; i < ctxt_cnt; i++) {
                int clen;
                /* check that offset is not beyond end of SMB */
-               if (len_of_ctxts == 0)
-                       break;
-
                if (len_of_ctxts < sizeof(struct smb2_neg_context))
                        break;
 
                pctx = (struct smb2_neg_context *)(offset + (char *)rsp);
-               clen = le16_to_cpu(pctx->DataLength);
+               clen = sizeof(struct smb2_neg_context)
+                       + le16_to_cpu(pctx->DataLength);
+               /*
+                * 2.2.4 SMB2 NEGOTIATE Response
+                * Subsequent negotiate contexts MUST appear at the first 8-byte
+                * aligned offset following the previous negotiate context.
+                */
+               if (i + 1 != ctxt_cnt)
+                       clen = ALIGN(clen, 8);
                if (clen > len_of_ctxts)
                        break;
 
@@ -748,12 +770,10 @@ static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
                else
                        cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n",
                                le16_to_cpu(pctx->ContextType));
-
                if (rc)
                        break;
-               /* offsets must be 8 byte aligned */
-               clen = ALIGN(clen, 8);
-               offset += clen + sizeof(struct smb2_neg_context);
+
+               offset += clen;
                len_of_ctxts -= clen;
        }
        return rc;
index 115a67d..365ac32 100644 (file)
@@ -112,10 +112,8 @@ void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
        struct ksmbd_conn *conn = work->conn;
        struct list_head *requests_queue = NULL;
 
-       if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) {
+       if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE)
                requests_queue = &conn->requests;
-               work->synchronous = true;
-       }
 
        if (requests_queue) {
                atomic_inc(&conn->req_running);
@@ -136,14 +134,14 @@ int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
 
        if (!work->multiRsp)
                atomic_dec(&conn->req_running);
-       spin_lock(&conn->request_lock);
        if (!work->multiRsp) {
+               spin_lock(&conn->request_lock);
                list_del_init(&work->request_entry);
-               if (!work->synchronous)
-                       list_del_init(&work->async_request_entry);
+               spin_unlock(&conn->request_lock);
+               if (work->asynchronous)
+                       release_async_work(work);
                ret = 0;
        }
-       spin_unlock(&conn->request_lock);
 
        wake_up_all(&conn->req_running_q);
        return ret;
@@ -326,10 +324,7 @@ int ksmbd_conn_handler_loop(void *p)
 
                /* 4 for rfc1002 length field */
                size = pdu_size + 4;
-               conn->request_buf = kvmalloc(size,
-                                            GFP_KERNEL |
-                                            __GFP_NOWARN |
-                                            __GFP_NORETRY);
+               conn->request_buf = kvmalloc(size, GFP_KERNEL);
                if (!conn->request_buf)
                        break;
 
index 3234f2c..f8ae614 100644 (file)
@@ -68,7 +68,7 @@ struct ksmbd_work {
        /* Request is encrypted */
        bool                            encrypted:1;
        /* Is this SYNC or ASYNC ksmbd_work */
-       bool                            synchronous:1;
+       bool                            asynchronous:1;
        bool                            need_invalidate_rkey:1;
 
        unsigned int                    remote_key;
index 394b6ce..0d82427 100644 (file)
@@ -289,10 +289,7 @@ static int queue_ksmbd_work(struct ksmbd_conn *conn)
        work->request_buf = conn->request_buf;
        conn->request_buf = NULL;
 
-       if (ksmbd_init_smb_server(work)) {
-               ksmbd_free_work_struct(work);
-               return -EINVAL;
-       }
+       ksmbd_init_smb_server(work);
 
        ksmbd_conn_enqueue_request(work);
        atomic_inc(&conn->r_count);
index 97c9d1b..67b7e76 100644 (file)
@@ -229,9 +229,6 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
        struct smb2_negotiate_rsp *rsp;
        struct ksmbd_conn *conn = work->conn;
 
-       if (conn->need_neg == false)
-               return -EINVAL;
-
        *(__be32 *)work->response_buf =
                cpu_to_be32(conn->vals->header_size);
 
@@ -498,12 +495,6 @@ int init_smb2_rsp_hdr(struct ksmbd_work *work)
        rsp_hdr->SessionId = rcv_hdr->SessionId;
        memcpy(rsp_hdr->Signature, rcv_hdr->Signature, 16);
 
-       work->synchronous = true;
-       if (work->async_id) {
-               ksmbd_release_id(&conn->async_ida, work->async_id);
-               work->async_id = 0;
-       }
-
        return 0;
 }
 
@@ -644,7 +635,7 @@ int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
                pr_err("Failed to alloc async message id\n");
                return id;
        }
-       work->synchronous = false;
+       work->asynchronous = true;
        work->async_id = id;
        rsp_hdr->Id.AsyncId = cpu_to_le64(id);
 
@@ -664,6 +655,24 @@ int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
        return 0;
 }
 
+void release_async_work(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+
+       spin_lock(&conn->request_lock);
+       list_del_init(&work->async_request_entry);
+       spin_unlock(&conn->request_lock);
+
+       work->asynchronous = 0;
+       work->cancel_fn = NULL;
+       kfree(work->cancel_argv);
+       work->cancel_argv = NULL;
+       if (work->async_id) {
+               ksmbd_release_id(&conn->async_ida, work->async_id);
+               work->async_id = 0;
+       }
+}
+
 void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status)
 {
        struct smb2_hdr *rsp_hdr;
@@ -867,17 +876,21 @@ static void assemble_neg_contexts(struct ksmbd_conn *conn,
 }
 
 static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn,
-                                 struct smb2_preauth_neg_context *pneg_ctxt)
+                                 struct smb2_preauth_neg_context *pneg_ctxt,
+                                 int len_of_ctxts)
 {
-       __le32 err = STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP;
+       /*
+        * sizeof(smb2_preauth_neg_context) assumes SMB311_SALT_SIZE Salt,
+        * which may not be present. Only check for used HashAlgorithms[1].
+        */
+       if (len_of_ctxts < MIN_PREAUTH_CTXT_DATA_LEN)
+               return STATUS_INVALID_PARAMETER;
 
-       if (pneg_ctxt->HashAlgorithms == SMB2_PREAUTH_INTEGRITY_SHA512) {
-               conn->preauth_info->Preauth_HashId =
-                       SMB2_PREAUTH_INTEGRITY_SHA512;
-               err = STATUS_SUCCESS;
-       }
+       if (pneg_ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
+               return STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP;
 
-       return err;
+       conn->preauth_info->Preauth_HashId = SMB2_PREAUTH_INTEGRITY_SHA512;
+       return STATUS_SUCCESS;
 }
 
 static void decode_encrypt_ctxt(struct ksmbd_conn *conn,
@@ -1005,7 +1018,8 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
                                break;
 
                        status = decode_preauth_ctxt(conn,
-                                                    (struct smb2_preauth_neg_context *)pctx);
+                                                    (struct smb2_preauth_neg_context *)pctx,
+                                                    len_of_ctxts);
                        if (status != STATUS_SUCCESS)
                                break;
                } else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) {
@@ -7045,13 +7059,9 @@ skip:
 
                                ksmbd_vfs_posix_lock_wait(flock);
 
-                               spin_lock(&work->conn->request_lock);
                                spin_lock(&fp->f_lock);
                                list_del(&work->fp_entry);
-                               work->cancel_fn = NULL;
-                               kfree(argv);
                                spin_unlock(&fp->f_lock);
-                               spin_unlock(&work->conn->request_lock);
 
                                if (work->state != KSMBD_WORK_ACTIVE) {
                                        list_del(&smb_lock->llist);
@@ -7069,6 +7079,7 @@ skip:
                                                work->send_no_response = 1;
                                                goto out;
                                        }
+
                                        init_smb2_rsp_hdr(work);
                                        smb2_set_err_rsp(work);
                                        rsp->hdr.Status =
@@ -7081,7 +7092,7 @@ skip:
                                spin_lock(&work->conn->llist_lock);
                                list_del(&smb_lock->clist);
                                spin_unlock(&work->conn->llist_lock);
-
+                               release_async_work(work);
                                goto retry;
                        } else if (!rc) {
                                spin_lock(&work->conn->llist_lock);
index 0c8a770..9420dd2 100644 (file)
@@ -486,6 +486,7 @@ int find_matching_smb2_dialect(int start_index, __le16 *cli_dialects,
 struct file_lock *smb_flock_init(struct file *f);
 int setup_async_work(struct ksmbd_work *work, void (*fn)(void **),
                     void **arg);
+void release_async_work(struct ksmbd_work *work);
 void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status);
 struct channel *lookup_chann_list(struct ksmbd_session *sess,
                                  struct ksmbd_conn *conn);
index 9c1ce6d..af0c2a9 100644 (file)
@@ -283,20 +283,121 @@ err_out:
        return BAD_PROT_ID;
 }
 
-int ksmbd_init_smb_server(struct ksmbd_work *work)
+#define SMB_COM_NEGOTIATE_EX   0x0
+
+/**
+ * get_smb1_cmd_val() - get smb command value from smb header
+ * @work:      smb work containing smb header
+ *
+ * Return:      smb command value
+ */
+static u16 get_smb1_cmd_val(struct ksmbd_work *work)
 {
-       struct ksmbd_conn *conn = work->conn;
+       return SMB_COM_NEGOTIATE_EX;
+}
 
-       if (conn->need_neg == false)
+/**
+ * init_smb1_rsp_hdr() - initialize smb negotiate response header
+ * @work:      smb work containing smb request
+ *
+ * Return:      0 on success, otherwise -EINVAL
+ */
+static int init_smb1_rsp_hdr(struct ksmbd_work *work)
+{
+       struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf;
+       struct smb_hdr *rcv_hdr = (struct smb_hdr *)work->request_buf;
+
+       /*
+        * Remove 4 byte direct TCP header.
+        */
+       *(__be32 *)work->response_buf =
+               cpu_to_be32(sizeof(struct smb_hdr) - 4);
+
+       rsp_hdr->Command = SMB_COM_NEGOTIATE;
+       *(__le32 *)rsp_hdr->Protocol = SMB1_PROTO_NUMBER;
+       rsp_hdr->Flags = SMBFLG_RESPONSE;
+       rsp_hdr->Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS |
+               SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME;
+       rsp_hdr->Pid = rcv_hdr->Pid;
+       rsp_hdr->Mid = rcv_hdr->Mid;
+       return 0;
+}
+
+/**
+ * smb1_check_user_session() - check for valid session for a user
+ * @work:      smb work containing smb request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+static int smb1_check_user_session(struct ksmbd_work *work)
+{
+       unsigned int cmd = work->conn->ops->get_cmd_val(work);
+
+       if (cmd == SMB_COM_NEGOTIATE_EX)
                return 0;
 
-       init_smb3_11_server(conn);
+       return -EINVAL;
+}
+
+/**
+ * smb1_allocate_rsp_buf() - allocate response buffer for a command
+ * @work:      smb work containing smb request
+ *
+ * Return:      0 on success, otherwise -ENOMEM
+ */
+static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
+{
+       work->response_buf = kmalloc(MAX_CIFS_SMALL_BUFFER_SIZE,
+                       GFP_KERNEL | __GFP_ZERO);
+       work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
+
+       if (!work->response_buf) {
+               pr_err("Failed to allocate %u bytes buffer\n",
+                               MAX_CIFS_SMALL_BUFFER_SIZE);
+               return -ENOMEM;
+       }
 
-       if (conn->ops->get_cmd_val(work) != SMB_COM_NEGOTIATE)
-               conn->need_neg = false;
        return 0;
 }
 
+static struct smb_version_ops smb1_server_ops = {
+       .get_cmd_val = get_smb1_cmd_val,
+       .init_rsp_hdr = init_smb1_rsp_hdr,
+       .allocate_rsp_buf = smb1_allocate_rsp_buf,
+       .check_user_session = smb1_check_user_session,
+};
+
+static int smb1_negotiate(struct ksmbd_work *work)
+{
+       return ksmbd_smb_negotiate_common(work, SMB_COM_NEGOTIATE);
+}
+
+static struct smb_version_cmds smb1_server_cmds[1] = {
+       [SMB_COM_NEGOTIATE_EX]  = { .proc = smb1_negotiate, },
+};
+
+static void init_smb1_server(struct ksmbd_conn *conn)
+{
+       conn->ops = &smb1_server_ops;
+       conn->cmds = smb1_server_cmds;
+       conn->max_cmds = ARRAY_SIZE(smb1_server_cmds);
+}
+
+void ksmbd_init_smb_server(struct ksmbd_work *work)
+{
+       struct ksmbd_conn *conn = work->conn;
+       __le32 proto;
+
+       if (conn->need_neg == false)
+               return;
+
+       proto = *(__le32 *)((struct smb_hdr *)work->request_buf)->Protocol;
+       if (proto == SMB1_PROTO_NUMBER)
+               init_smb1_server(conn);
+       else
+               init_smb3_11_server(conn);
+}
+
 int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
                                      struct ksmbd_file *dir,
                                      struct ksmbd_dir_info *d_info,
@@ -444,20 +545,10 @@ static int smb_handle_negotiate(struct ksmbd_work *work)
 
        ksmbd_debug(SMB, "Unsupported SMB1 protocol\n");
 
-       /*
-        * Remove 4 byte direct TCP header, add 2 byte bcc and
-        * 2 byte DialectIndex.
-        */
-       *(__be32 *)work->response_buf =
-               cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2 + 2);
+       /* Add 2 byte bcc and 2 byte DialectIndex. */
+       inc_rfc1001_len(work->response_buf, 4);
        neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
 
-       neg_rsp->hdr.Command = SMB_COM_NEGOTIATE;
-       *(__le32 *)neg_rsp->hdr.Protocol = SMB1_PROTO_NUMBER;
-       neg_rsp->hdr.Flags = SMBFLG_RESPONSE;
-       neg_rsp->hdr.Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS |
-               SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME;
-
        neg_rsp->hdr.WordCount = 1;
        neg_rsp->DialectIndex = cpu_to_le16(work->conn->dialect);
        neg_rsp->ByteCount = 0;
@@ -474,23 +565,12 @@ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
        ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect);
 
        if (command == SMB2_NEGOTIATE_HE) {
-               struct smb2_hdr *smb2_hdr = smb2_get_msg(work->request_buf);
-
-               if (smb2_hdr->ProtocolId != SMB2_PROTO_NUMBER) {
-                       ksmbd_debug(SMB, "Downgrade to SMB1 negotiation\n");
-                       command = SMB_COM_NEGOTIATE;
-               }
-       }
-
-       if (command == SMB2_NEGOTIATE_HE) {
                ret = smb2_handle_negotiate(work);
-               init_smb2_neg_rsp(work);
                return ret;
        }
 
        if (command == SMB_COM_NEGOTIATE) {
                if (__smb2_negotiate(conn)) {
-                       conn->need_neg = true;
                        init_smb3_11_server(conn);
                        init_smb2_neg_rsp(work);
                        ksmbd_debug(SMB, "Upgrade to SMB2 negotiation\n");
index d30ce4c..9130d2e 100644 (file)
@@ -427,7 +427,7 @@ bool ksmbd_smb_request(struct ksmbd_conn *conn);
 
 int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count);
 
-int ksmbd_init_smb_server(struct ksmbd_work *work);
+void ksmbd_init_smb_server(struct ksmbd_work *work);
 
 struct ksmbd_kstat;
 int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
index a0db699..9ae6769 100644 (file)
@@ -114,24 +114,6 @@ cp_convert:
 }
 
 /*
- * is_char_allowed() - check for valid character
- * @ch:                input character to be checked
- *
- * Return:     1 if char is allowed, otherwise 0
- */
-static inline int is_char_allowed(char *ch)
-{
-       /* check for control chars, wildcards etc. */
-       if (!(*ch & 0x80) &&
-           (*ch <= 0x1f ||
-            *ch == '?' || *ch == '"' || *ch == '<' ||
-            *ch == '>' || *ch == '|'))
-               return 0;
-
-       return 1;
-}
-
-/*
  * smb_from_utf16() - convert utf16le string to local charset
  * @to:                destination buffer
  * @from:      source buffer
index bc0f152..6836e93 100644 (file)
@@ -4183,9 +4183,9 @@ out:
        unlock_mount_hash();
 
        if (kattr->propagation) {
-               namespace_unlock();
                if (err)
                        cleanup_group_ids(mnt, NULL);
+               namespace_unlock();
        }
 
        return err;
index e9a45de..8a4c866 100644 (file)
@@ -139,7 +139,7 @@ static ssize_t netfs_extract_user_to_sg(struct iov_iter *iter,
                        size_t seg = min_t(size_t, PAGE_SIZE - off, len);
 
                        *pages++ = NULL;
-                       sg_set_page(sg, page, len, off);
+                       sg_set_page(sg, page, seg, off);
                        sgtable->nents++;
                        sg++;
                        len -= seg;
index 450d6c3..c1c7ed2 100644 (file)
@@ -75,7 +75,6 @@ config NFS_V3_ACL
 config NFS_V4
        tristate "NFS client support for NFS version 4"
        depends on NFS_FS
-       select RPCSEC_GSS_KRB5
        select KEYS
        help
          This option enables support for version 4 of the NFS protocol
index 22a93ae..5607b1e 100644 (file)
@@ -1980,8 +1980,7 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
        if (!data->rpc_done) {
                if (data->rpc_status)
                        return ERR_PTR(data->rpc_status);
-               /* cached opens have already been processed */
-               goto update;
+               return nfs4_try_open_cached(data);
        }
 
        ret = nfs_refresh_inode(inode, &data->f_attr);
@@ -1990,7 +1989,7 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
 
        if (data->o_res.delegation_type != 0)
                nfs4_opendata_check_deleg(data, state);
-update:
+
        if (!update_open_stateid(state, &data->o_res.stateid,
                                NULL, data->o_arg.fmode))
                return ERR_PTR(-EAGAIN);
index 04697f8..01d7fd1 100644 (file)
@@ -297,6 +297,7 @@ nfsd4_block_get_device_info_scsi(struct super_block *sb,
 
 out_free_dev:
        kfree(dev);
+       gdp->gd_device = NULL;
        return ret;
 }
 
index 2a815f5..4039ffc 100644 (file)
@@ -946,8 +946,8 @@ static const struct cred *get_backchannel_cred(struct nfs4_client *clp, struct r
                if (!kcred)
                        return NULL;
 
-               kcred->uid = ses->se_cb_sec.uid;
-               kcred->gid = ses->se_cb_sec.gid;
+               kcred->fsuid = ses->se_cb_sec.uid;
+               kcred->fsgid = ses->se_cb_sec.gid;
                return kcred;
        }
 }
index e12e5a4..e2e4851 100644 (file)
@@ -2476,10 +2476,12 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
        for (i = 0; i < argp->opcnt; i++) {
                op = &argp->ops[i];
                op->replay = NULL;
+               op->opdesc = NULL;
 
                if (xdr_stream_decode_u32(argp->xdr, &op->opnum) < 0)
                        return false;
                if (nfsd4_opnum_in_range(argp, op)) {
+                       op->opdesc = OPDESC(op);
                        op->status = nfsd4_dec_ops[op->opnum](argp, &op->u);
                        if (op->status != nfs_ok)
                                trace_nfsd_compound_decode_err(argp->rqstp,
@@ -2490,7 +2492,7 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
                        op->opnum = OP_ILLEGAL;
                        op->status = nfserr_op_illegal;
                }
-               op->opdesc = OPDESC(op);
+
                /*
                 * We'll try to cache the result in the DRC if any one
                 * op in the compound wants to be cached:
@@ -5400,10 +5402,8 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
        __be32 *p;
 
        p = xdr_reserve_space(xdr, 8);
-       if (!p) {
-               WARN_ON_ONCE(1);
-               return;
-       }
+       if (!p)
+               goto release;
        *p++ = cpu_to_be32(op->opnum);
        post_err_offset = xdr->buf->len;
 
@@ -5418,8 +5418,6 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
        op->status = encoder(resp, op->status, &op->u);
        if (op->status)
                trace_nfsd_compound_encode_err(rqstp, op->opnum, op->status);
-       if (opdesc && opdesc->op_release)
-               opdesc->op_release(&op->u);
        xdr_commit_encode(xdr);
 
        /* nfsd4_check_resp_size guarantees enough room for error status */
@@ -5460,6 +5458,9 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
        }
 status:
        *p = op->status;
+release:
+       if (opdesc && opdesc->op_release)
+               opdesc->op_release(&op->u);
 }
 
 /* 
index 617e4f9..132f01d 100644 (file)
@@ -382,6 +382,7 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
        struct zonefs_zone *z = zonefs_inode_zone(inode);
        struct block_device *bdev = inode->i_sb->s_bdev;
        unsigned int max = bdev_max_zone_append_sectors(bdev);
+       pgoff_t start, end;
        struct bio *bio;
        ssize_t size = 0;
        int nr_pages;
@@ -390,6 +391,19 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
        max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
        iov_iter_truncate(from, max);
 
+       /*
+        * If the inode block size (zone write granularity) is smaller than the
+        * page size, we may be appending data belonging to the last page of the
+        * inode straddling inode->i_size, with that page already cached due to
+        * a buffered read or readahead. So make sure to invalidate that page.
+        * This will always be a no-op for the case where the block size is
+        * equal to the page size.
+        */
+       start = iocb->ki_pos >> PAGE_SHIFT;
+       end = (iocb->ki_pos + iov_iter_count(from) - 1) >> PAGE_SHIFT;
+       if (invalidate_inode_pages2_range(inode->i_mapping, start, end))
+               return -EBUSY;
+
        nr_pages = iov_iter_npages(from, BIO_MAX_VECS);
        if (!nr_pages)
                return 0;
@@ -567,11 +581,21 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
                append = sync;
        }
 
-       if (append)
+       if (append) {
                ret = zonefs_file_dio_append(iocb, from);
-       else
+       } else {
+               /*
+                * iomap_dio_rw() may return ENOTBLK if there was an issue with
+                * page invalidation. Overwrite that error code with EBUSY to
+                * be consistent with zonefs_file_dio_append() return value for
+                * similar issues.
+                */
                ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
                                   &zonefs_write_dio_ops, 0, NULL, 0);
+               if (ret == -ENOTBLK)
+                       ret = -EBUSY;
+       }
+
        if (zonefs_zone_is_seq(z) &&
            (ret > 0 || ret == -EIOCBQUEUED)) {
                if (ret > 0)
index 8ed9bec..ff5a8da 100644 (file)
@@ -59,8 +59,6 @@ extern void acpi_video_unregister(void);
 extern void acpi_video_register_backlight(void);
 extern int acpi_video_get_edid(struct acpi_device *device, int type,
                               int device_id, void **edid);
-extern enum acpi_backlight_type acpi_video_get_backlight_type(void);
-extern bool acpi_video_backlight_use_native(void);
 /*
  * Note: The value returned by acpi_video_handles_brightness_key_presses()
  * may change over time and should not be cached.
@@ -69,6 +67,19 @@ extern bool acpi_video_handles_brightness_key_presses(void);
 extern int acpi_video_get_levels(struct acpi_device *device,
                                 struct acpi_video_device_brightness **dev_br,
                                 int *pmax_level);
+
+extern enum acpi_backlight_type __acpi_video_get_backlight_type(bool native,
+                                                               bool *auto_detect);
+
+static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
+{
+       return __acpi_video_get_backlight_type(false, NULL);
+}
+
+static inline bool acpi_video_backlight_use_native(void)
+{
+       return __acpi_video_get_backlight_type(true, NULL) == acpi_backlight_native;
+}
 #else
 static inline void acpi_video_report_nolcd(void) { return; };
 static inline int acpi_video_register(void) { return -ENODEV; }
index 04b8be9..e271d67 100644 (file)
@@ -130,7 +130,7 @@ ATOMIC_OP(xor, ^)
 #define arch_atomic_read(v)                    READ_ONCE((v)->counter)
 #define arch_atomic_set(v, i)                  WRITE_ONCE(((v)->counter), (i))
 
-#define arch_atomic_xchg(ptr, v)               (arch_xchg(&(ptr)->counter, (v)))
-#define arch_atomic_cmpxchg(v, old, new)       (arch_cmpxchg(&((v)->counter), (old), (new)))
+#define arch_atomic_xchg(ptr, v)               (arch_xchg(&(ptr)->counter, (u32)(v)))
+#define arch_atomic_cmpxchg(v, old, new)       (arch_cmpxchg(&((v)->counter), (u32)(old), (u32)(new)))
 
 #endif /* __ASM_GENERIC_ATOMIC_H */
index c3e7315..3df9f59 100644 (file)
@@ -26,16 +26,16 @@ static inline unsigned long __generic_cmpxchg_local(volatile void *ptr,
        raw_local_irq_save(flags);
        switch (size) {
        case 1: prev = *(u8 *)ptr;
-               if (prev == (u8)old)
-                       *(u8 *)ptr = (u8)new;
+               if (prev == (old & 0xffu))
+                       *(u8 *)ptr = (new & 0xffu);
                break;
        case 2: prev = *(u16 *)ptr;
-               if (prev == (u16)old)
-                       *(u16 *)ptr = (u16)new;
+               if (prev == (old & 0xffffu))
+                       *(u16 *)ptr = (new & 0xffffu);
                break;
        case 4: prev = *(u32 *)ptr;
-               if (prev == (u32)old)
-                       *(u32 *)ptr = (u32)new;
+               if (prev == (old & 0xffffffffffu))
+                       *(u32 *)ptr = (new & 0xffffffffu);
                break;
        case 8: prev = *(u64 *)ptr;
                if (prev == old)
index dca4419..848de25 100644 (file)
@@ -32,7 +32,7 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
 #else
                local_irq_save(flags);
                ret = *(volatile u8 *)ptr;
-               *(volatile u8 *)ptr = x;
+               *(volatile u8 *)ptr = (x & 0xffu);
                local_irq_restore(flags);
                return ret;
 #endif /* __xchg_u8 */
@@ -43,7 +43,7 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
 #else
                local_irq_save(flags);
                ret = *(volatile u16 *)ptr;
-               *(volatile u16 *)ptr = x;
+               *(volatile u16 *)ptr = (x & 0xffffu);
                local_irq_restore(flags);
                return ret;
 #endif /* __xchg_u16 */
@@ -54,7 +54,7 @@ unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
 #else
                local_irq_save(flags);
                ret = *(volatile u32 *)ptr;
-               *(volatile u32 *)ptr = x;
+               *(volatile u32 *)ptr = (x & 0xffffffffu);
                local_irq_restore(flags);
                return ret;
 #endif /* __xchg_u32 */
index 4c44a29..587e7e9 100644 (file)
@@ -236,7 +236,7 @@ static inline u64 readq(const volatile void __iomem *addr)
 
        log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
        __io_br();
-       val = __le64_to_cpu(__raw_readq(addr));
+       val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
        __io_ar(val);
        log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
        return val;
@@ -287,7 +287,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
 {
        log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
        __io_bw();
-       __raw_writeq(__cpu_to_le64(value), addr);
+       __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
        __io_aw();
        log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
 }
@@ -319,7 +319,7 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr)
        u16 val;
 
        log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
-       val = __le16_to_cpu(__raw_readw(addr));
+       val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
        log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
        return val;
 }
@@ -332,7 +332,7 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr)
        u32 val;
 
        log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
-       val = __le32_to_cpu(__raw_readl(addr));
+       val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
        log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
        return val;
 }
@@ -345,7 +345,7 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
        u64 val;
 
        log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
-       val = __le64_to_cpu(__raw_readq(addr));
+       val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
        log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
        return val;
 }
@@ -366,7 +366,7 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
 static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
 {
        log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
-       __raw_writew(cpu_to_le16(value), addr);
+       __raw_writew((u16 __force)cpu_to_le16(value), addr);
        log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
 }
 #endif
@@ -376,7 +376,7 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
 static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
 {
        log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
-       __raw_writel(__cpu_to_le32(value), addr);
+       __raw_writel((u32 __force)__cpu_to_le32(value), addr);
        log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
 }
 #endif
@@ -386,7 +386,7 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
 static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
 {
        log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
-       __raw_writeq(__cpu_to_le64(value), addr);
+       __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
        log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
 }
 #endif
index 9db9e5e..9935d1e 100644 (file)
@@ -228,13 +228,6 @@ struct drm_sched_entity {
         */
        struct rb_node                  rb_tree_node;
 
-       /**
-        * @elapsed_ns:
-        *
-        * Records the amount of time where jobs from this entity were active
-        * on the GPU.
-        */
-       uint64_t elapsed_ns;
 };
 
 /**
index c6fab00..5b2f814 100644 (file)
@@ -218,7 +218,6 @@ enum cpuhp_state {
        CPUHP_AP_PERF_X86_CQM_ONLINE,
        CPUHP_AP_PERF_X86_CSTATE_ONLINE,
        CPUHP_AP_PERF_X86_IDXD_ONLINE,
-       CPUHP_AP_PERF_X86_IOMMU_PERF_ONLINE,
        CPUHP_AP_PERF_S390_CF_ONLINE,
        CPUHP_AP_PERF_S390_SF_ONLINE,
        CPUHP_AP_PERF_ARM_CCI_ONLINE,
index 366c730..402fc06 100644 (file)
@@ -980,7 +980,7 @@ static inline void __ftrace_enabled_restore(int enabled)
 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
 
-static inline unsigned long get_lock_parent_ip(void)
+static __always_inline unsigned long get_lock_parent_ip(void)
 {
        unsigned long addr = CALLER_ADDR0;
 
index 8ada237..a9adf75 100644 (file)
@@ -755,6 +755,7 @@ struct kvm {
        struct {
                spinlock_t        lock;
                struct list_head  items;
+               /* resampler_list update side is protected by resampler_lock. */
                struct list_head  resampler_list;
                struct mutex      resampler_lock;
        } irqfds;
@@ -1986,6 +1987,9 @@ int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
 #ifdef CONFIG_HAVE_KVM_IRQFD
 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
 void kvm_irqfd_release(struct kvm *kvm);
+bool kvm_notify_irqfd_resampler(struct kvm *kvm,
+                               unsigned int irqchip,
+                               unsigned int pin);
 void kvm_irq_routing_update(struct kvm *);
 #else
 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
@@ -1994,6 +1998,13 @@ static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 }
 
 static inline void kvm_irqfd_release(struct kvm *kvm) {}
+
+static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm,
+                                             unsigned int irqchip,
+                                             unsigned int pin)
+{
+       return false;
+}
 #endif
 
 #else
index dac047a..8ad4369 100644 (file)
@@ -31,7 +31,7 @@ struct kvm_kernel_irqfd_resampler {
        /*
         * Entry in list of kvm->irqfd.resampler_list.  Use for sharing
         * resamplers among irqfds on the same gsi.
-        * Accessed and modified under kvm->irqfds.resampler_lock
+        * RCU list modified under kvm->irqfds.resampler_lock
         */
        struct list_head link;
 };
index 71b06eb..1db19a9 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/types.h>
 #include <rdma/ib_verbs.h>
 #include <linux/mlx5/mlx5_ifc.h>
+#include <linux/bitfield.h>
 
 #if defined(__LITTLE_ENDIAN)
 #define MLX5_SET_HOST_ENDIANNESS       0
@@ -980,14 +981,23 @@ enum {
 };
 
 enum {
-       CQE_RSS_HTYPE_IP        = 0x3 << 2,
+       CQE_RSS_HTYPE_IP        = GENMASK(3, 2),
        /* cqe->rss_hash_type[3:2] - IP destination selected for hash
         * (00 = none,  01 = IPv4, 10 = IPv6, 11 = Reserved)
         */
-       CQE_RSS_HTYPE_L4        = 0x3 << 6,
+       CQE_RSS_IP_NONE         = 0x0,
+       CQE_RSS_IPV4            = 0x1,
+       CQE_RSS_IPV6            = 0x2,
+       CQE_RSS_RESERVED        = 0x3,
+
+       CQE_RSS_HTYPE_L4        = GENMASK(7, 6),
        /* cqe->rss_hash_type[7:6] - L4 destination selected for hash
         * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
         */
+       CQE_RSS_L4_NONE         = 0x0,
+       CQE_RSS_L4_TCP          = 0x1,
+       CQE_RSS_L4_UDP          = 0x2,
+       CQE_RSS_L4_IPSEC        = 0x3,
 };
 
 enum {
index 470085b..c35f04f 100644 (file)
@@ -1624,7 +1624,8 @@ struct net_device_ops {
 
 struct xdp_metadata_ops {
        int     (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp);
-       int     (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash);
+       int     (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash,
+                              enum xdp_rss_hash_type *rss_type);
 };
 
 /**
index ed9b4df..43765ea 100644 (file)
@@ -34,6 +34,10 @@ struct pci_doe_mb;
  * @work: Used internally by the mailbox
  * @doe_mb: Used internally by the mailbox
  *
+ * Payloads are treated as opaque byte streams which are transmitted verbatim,
+ * without byte-swapping.  If payloads contain little-endian register values,
+ * the caller is responsible for conversion with cpu_to_le32() / le32_to_cpu().
+ *
  * The payload sizes and rv are specified in bytes with the following
  * restrictions concerning the protocol.
  *
@@ -45,9 +49,9 @@ struct pci_doe_mb;
  */
 struct pci_doe_task {
        struct pci_doe_protocol prot;
-       u32 *request_pl;
+       __le32 *request_pl;
        size_t request_pl_sz;
-       u32 *response_pl;
+       __le32 *response_pl;
        size_t response_pl_sz;
        int rv;
        void (*complete)(struct pci_doe_task *task);
index b50e5c7..a5dda51 100644 (file)
@@ -1624,6 +1624,8 @@ pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
                                              flags, NULL);
 }
 
+static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
+{ return false; }
 static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
                                                   const struct irq_affinity_desc *affdesc)
 {
index 36bf0bb..db7c0bd 100644 (file)
@@ -1547,7 +1547,7 @@ int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id);
 struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode);
 struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode);
 struct phy_device *device_phy_find_device(struct device *dev);
-struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_get_phy_node(const struct fwnode_handle *fwnode);
 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
 int phy_device_register(struct phy_device *phy);
 void phy_device_free(struct phy_device *phydev);
index c492c26..637698e 100644 (file)
@@ -574,6 +574,7 @@ struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *,
                               phy_interface_t iface,
                               const struct phylink_mac_ops *mac_ops);
 void phylink_destroy(struct phylink *);
+bool phylink_expects_phy(struct phylink *pl);
 
 int phylink_connect_phy(struct phylink *, struct phy_device *);
 int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags);
index 92ad755..b6e6378 100644 (file)
@@ -25,7 +25,8 @@ void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
                                       unsigned change, u32 event,
                                       gfp_t flags, int *new_nsid,
-                                      int new_ifindex, u32 portid, u32 seq);
+                                      int new_ifindex, u32 portid,
+                                      const struct nlmsghdr *nlh);
 void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
                       gfp_t flags, u32 portid, const struct nlmsghdr *nlh);
 
index 52b98f9..ef06a19 100644 (file)
@@ -557,7 +557,7 @@ int sfp_get_module_eeprom_by_page(struct sfp_bus *bus,
 void sfp_upstream_start(struct sfp_bus *bus);
 void sfp_upstream_stop(struct sfp_bus *bus);
 void sfp_bus_put(struct sfp_bus *bus);
-struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode);
+struct sfp_bus *sfp_bus_find_fwnode(const struct fwnode_handle *fwnode);
 int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
                         const struct sfp_upstream_ops *ops);
 void sfp_bus_del_upstream(struct sfp_bus *bus);
@@ -619,7 +619,8 @@ static inline void sfp_bus_put(struct sfp_bus *bus)
 {
 }
 
-static inline struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
+static inline struct sfp_bus *
+sfp_bus_find_fwnode(const struct fwnode_handle *fwnode)
 {
        return NULL;
 }
index 6ed9b4d..d5311ce 100644 (file)
@@ -954,6 +954,7 @@ enum {
        HCI_CONN_STK_ENCRYPT,
        HCI_CONN_AUTH_INITIATOR,
        HCI_CONN_DROP,
+       HCI_CONN_CANCEL,
        HCI_CONN_PARAM_REMOVAL_PEND,
        HCI_CONN_NEW_LINK_KEY,
        HCI_CONN_SCANNING,
index ea36ab7..c384323 100644 (file)
@@ -761,13 +761,17 @@ static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
 #if IS_ENABLED(CONFIG_IPV6)
 static inline int bond_get_targets_ip6(struct in6_addr *targets, struct in6_addr *ip)
 {
+       struct in6_addr mcaddr;
        int i;
 
-       for (i = 0; i < BOND_MAX_NS_TARGETS; i++)
-               if (ipv6_addr_equal(&targets[i], ip))
+       for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
+               addrconf_addr_solict_mult(&targets[i], &mcaddr);
+               if ((ipv6_addr_equal(&targets[i], ip)) ||
+                   (ipv6_addr_equal(&mcaddr, ip)))
                        return i;
                else if (ipv6_addr_any(&targets[i]))
                        break;
+       }
 
        return -1;
 }
index 2c004c2..3af5289 100644 (file)
@@ -37,7 +37,7 @@ int raw_rcv(struct sock *, struct sk_buff *);
 struct raw_hashinfo {
        spinlock_t lock;
 
-       struct hlist_nulls_head ht[RAW_HTABLE_SIZE] ____cacheline_aligned;
+       struct hlist_head ht[RAW_HTABLE_SIZE] ____cacheline_aligned;
 };
 
 static inline u32 raw_hashfunc(const struct net *net, u32 proto)
@@ -51,7 +51,7 @@ static inline void raw_hashinfo_init(struct raw_hashinfo *hashinfo)
 
        spin_lock_init(&hashinfo->lock);
        for (i = 0; i < RAW_HTABLE_SIZE; i++)
-               INIT_HLIST_NULLS_HEAD(&hashinfo->ht[i], i);
+               INIT_HLIST_HEAD(&hashinfo->ht[i]);
 }
 
 #ifdef CONFIG_PROC_FS
index 41c57b8..76aa748 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/skbuff.h> /* skb_shared_info */
 #include <uapi/linux/netdev.h>
+#include <linux/bitfield.h>
 
 /**
  * DOC: XDP RX-queue information
@@ -425,6 +426,52 @@ XDP_METADATA_KFUNC_xxx
 MAX_XDP_METADATA_KFUNC,
 };
 
+enum xdp_rss_hash_type {
+       /* First part: Individual bits for L3/L4 types */
+       XDP_RSS_L3_IPV4         = BIT(0),
+       XDP_RSS_L3_IPV6         = BIT(1),
+
+       /* The fixed (L3) IPv4 and IPv6 headers can both be followed by
+        * variable/dynamic headers, IPv4 called Options and IPv6 called
+        * Extension Headers. HW RSS type can contain this info.
+        */
+       XDP_RSS_L3_DYNHDR       = BIT(2),
+
+       /* When RSS hash covers L4 then drivers MUST set XDP_RSS_L4 bit in
+        * addition to the protocol specific bit.  This ease interaction with
+        * SKBs and avoids reserving a fixed mask for future L4 protocol bits.
+        */
+       XDP_RSS_L4              = BIT(3), /* L4 based hash, proto can be unknown */
+       XDP_RSS_L4_TCP          = BIT(4),
+       XDP_RSS_L4_UDP          = BIT(5),
+       XDP_RSS_L4_SCTP         = BIT(6),
+       XDP_RSS_L4_IPSEC        = BIT(7), /* L4 based hash include IPSEC SPI */
+
+       /* Second part: RSS hash type combinations used for driver HW mapping */
+       XDP_RSS_TYPE_NONE            = 0,
+       XDP_RSS_TYPE_L2              = XDP_RSS_TYPE_NONE,
+
+       XDP_RSS_TYPE_L3_IPV4         = XDP_RSS_L3_IPV4,
+       XDP_RSS_TYPE_L3_IPV6         = XDP_RSS_L3_IPV6,
+       XDP_RSS_TYPE_L3_IPV4_OPT     = XDP_RSS_L3_IPV4 | XDP_RSS_L3_DYNHDR,
+       XDP_RSS_TYPE_L3_IPV6_EX      = XDP_RSS_L3_IPV6 | XDP_RSS_L3_DYNHDR,
+
+       XDP_RSS_TYPE_L4_ANY          = XDP_RSS_L4,
+       XDP_RSS_TYPE_L4_IPV4_TCP     = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_TCP,
+       XDP_RSS_TYPE_L4_IPV4_UDP     = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
+       XDP_RSS_TYPE_L4_IPV4_SCTP    = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
+       XDP_RSS_TYPE_L4_IPV4_IPSEC   = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
+
+       XDP_RSS_TYPE_L4_IPV6_TCP     = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_TCP,
+       XDP_RSS_TYPE_L4_IPV6_UDP     = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
+       XDP_RSS_TYPE_L4_IPV6_SCTP    = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
+       XDP_RSS_TYPE_L4_IPV6_IPSEC   = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
+
+       XDP_RSS_TYPE_L4_IPV6_TCP_EX  = XDP_RSS_TYPE_L4_IPV6_TCP  | XDP_RSS_L3_DYNHDR,
+       XDP_RSS_TYPE_L4_IPV6_UDP_EX  = XDP_RSS_TYPE_L4_IPV6_UDP  | XDP_RSS_L3_DYNHDR,
+       XDP_RSS_TYPE_L4_IPV6_SCTP_EX = XDP_RSS_TYPE_L4_IPV6_SCTP | XDP_RSS_L3_DYNHDR,
+};
+
 #ifdef CONFIG_NET
 u32 bpf_xdp_metadata_kfunc_id(int id);
 bool bpf_dev_bound_kfunc_id(u32 btf_id);
index 1322d34..99cbc59 100644 (file)
@@ -512,7 +512,7 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
        TP_STRUCT__entry(
                __field(dev_t,  dev)
                __field(ino_t,  ino)
-               __field(nid_t,  nid[3])
+               __array(nid_t,  nid, 3)
                __field(int,    depth)
                __field(int,    err)
        ),
index 90b2fb0..012fa0d 100644 (file)
@@ -768,7 +768,7 @@ TRACE_EVENT_RCU(rcu_torture_read,
        TP_ARGS(rcutorturename, rhp, secs, c_old, c),
 
        TP_STRUCT__entry(
-               __field(char, rcutorturename[RCUTORTURENAME_LEN])
+               __array(char, rcutorturename, RCUTORTURENAME_LEN)
                __field(struct rcu_head *, rhp)
                __field(unsigned long, secs)
                __field(unsigned long, c_old)
index ac5c24d..e30a13b 100644 (file)
@@ -9,17 +9,30 @@
 #undef __entry
 #define __entry entry
 
+/*
+ * Fields should never declare an array: i.e. __field(int, arr[5])
+ * If they do, it will cause issues in parsing and possibly corrupt the
+ * events. To prevent that from happening, test the sizeof() a fictitious
+ * type called "struct _test_no_array_##item" which will fail if "item"
+ * contains array elements (like "arr[5]").
+ *
+ * If you hit this, use __array(int, arr, 5) instead.
+ */
 #undef __field
-#define __field(type, item)
+#define __field(type, item)                                    \
+       { (void)sizeof(struct _test_no_array_##item *); }
 
 #undef __field_ext
-#define __field_ext(type, item, filter_type)
+#define __field_ext(type, item, filter_type)                   \
+       { (void)sizeof(struct _test_no_array_##item *); }
 
 #undef __field_struct
-#define __field_struct(type, item)
+#define __field_struct(type, item)                             \
+       { (void)sizeof(struct _test_no_array_##item *); }
 
 #undef __field_struct_ext
-#define __field_struct_ext(type, item, filter_type)
+#define __field_struct_ext(type, item, filter_type)            \
+       { (void)sizeof(struct _test_no_array_##item *); }
 
 #undef __array
 #define __array(type, item, len)
index 5af2a03..3744e4d 100644 (file)
@@ -140,11 +140,11 @@ struct virtio_blk_config {
 
        /* Zoned block device characteristics (if VIRTIO_BLK_F_ZONED) */
        struct virtio_blk_zoned_characteristics {
-               __le32 zone_sectors;
-               __le32 max_open_zones;
-               __le32 max_active_zones;
-               __le32 max_append_sectors;
-               __le32 write_granularity;
+               __virtio32 zone_sectors;
+               __virtio32 max_open_zones;
+               __virtio32 max_active_zones;
+               __virtio32 max_append_sectors;
+               __virtio32 write_granularity;
                __u8 model;
                __u8 unused2[3];
        } zoned;
@@ -241,11 +241,11 @@ struct virtio_blk_outhdr {
  */
 struct virtio_blk_zone_descriptor {
        /* Zone capacity */
-       __le64 z_cap;
+       __virtio64 z_cap;
        /* The starting sector of the zone */
-       __le64 z_start;
+       __virtio64 z_start;
        /* Zone write pointer position in sectors */
-       __le64 z_wp;
+       __virtio64 z_wp;
        /* Zone type */
        __u8 z_type;
        /* Zone state */
@@ -254,7 +254,7 @@ struct virtio_blk_zone_descriptor {
 };
 
 struct virtio_blk_zone_report {
-       __le64 nr_zones;
+       __virtio64 nr_zones;
        __u8 reserved[56];
        struct virtio_blk_zone_descriptor zones[];
 };
index 25aab8e..431c3af 100644 (file)
@@ -979,7 +979,6 @@ struct ufs_hba {
        struct completion *uic_async_done;
 
        enum ufshcd_state ufshcd_state;
-       bool logical_unit_scan_finished;
        u32 eh_flags;
        u32 intr_mask;
        u16 ee_ctrl_mask;
index f6c112e..e7a01c2 100644 (file)
@@ -60,15 +60,8 @@ static void __init error(char *x)
                message = x;
 }
 
-static void panic_show_mem(const char *fmt, ...)
-{
-       va_list args;
-
-       show_mem(0, NULL);
-       va_start(args, fmt);
-       panic(fmt, args);
-       va_end(args);
-}
+#define panic_show_mem(fmt, ...) \
+       ({ show_mem(0, NULL); panic(fmt, ##__VA_ARGS__); })
 
 /* link hash */
 
index 729793a..c2cde88 100644 (file)
@@ -27,6 +27,7 @@ static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c
                struct hlist_node *node = cache->list.first;
 
                hlist_del(node);
+               cache->nr_cached--;
                return container_of(node, struct io_cache_entry, node);
        }
 
index 722624b..4a865f0 100644 (file)
@@ -998,7 +998,7 @@ static void __io_req_complete_post(struct io_kiocb *req)
 
 void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
 {
-       if (req->ctx->task_complete && (issue_flags & IO_URING_F_IOWQ)) {
+       if (req->ctx->task_complete && req->ctx->submitter_task != current) {
                req->io_task_work.func = io_req_task_complete;
                io_req_task_work_add(req);
        } else if (!(issue_flags & IO_URING_F_UNLOCKED) ||
@@ -2789,8 +2789,8 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
        io_eventfd_unregister(ctx);
        io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
        io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
-       mutex_unlock(&ctx->uring_lock);
        io_destroy_buffers(ctx);
+       mutex_unlock(&ctx->uring_lock);
        if (ctx->sq_creds)
                put_cred(ctx->sq_creds);
        if (ctx->submitter_task)
index 3002dc8..a90c820 100644 (file)
@@ -228,17 +228,18 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
                return i;
        }
 
-       /* the head kbuf is the list itself */
+       /* protects io_buffers_cache */
+       lockdep_assert_held(&ctx->uring_lock);
+
        while (!list_empty(&bl->buf_list)) {
                struct io_buffer *nxt;
 
                nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
-               list_del(&nxt->list);
+               list_move(&nxt->list, &ctx->io_buffers_cache);
                if (++i == nbufs)
                        return i;
                cond_resched();
        }
-       i++;
 
        return i;
 }
index 795facb..55306e8 100644 (file)
@@ -726,6 +726,7 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
        apoll = io_req_alloc_apoll(req, issue_flags);
        if (!apoll)
                return IO_APOLL_ABORTED;
+       req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
        req->flags |= REQ_F_POLLED;
        ipt.pt._qproc = io_async_queue_proc;
 
index 2b87436..f27f497 100644 (file)
@@ -144,15 +144,13 @@ static inline void io_req_set_rsrc_node(struct io_kiocb *req,
                                        unsigned int issue_flags)
 {
        if (!req->rsrc_node) {
-               req->rsrc_node = ctx->rsrc_node;
+               io_ring_submit_lock(ctx, issue_flags);
 
-               if (!(issue_flags & IO_URING_F_UNLOCKED)) {
-                       lockdep_assert_held(&ctx->uring_lock);
+               lockdep_assert_held(&ctx->uring_lock);
 
-                       io_charge_rsrc_node(ctx);
-               } else {
-                       percpu_ref_get(&req->rsrc_node->refs);
-               }
+               req->rsrc_node = ctx->rsrc_node;
+               io_charge_rsrc_node(ctx);
+               io_ring_submit_unlock(ctx, issue_flags);
        }
 }
 
index 636f1c6..505d86b 100644 (file)
@@ -1513,7 +1513,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
        spin_unlock_irq(&callback_lock);
 
        if (adding || deleting)
-               update_tasks_cpumask(parent, tmp->new_cpus);
+               update_tasks_cpumask(parent, tmp->addmask);
 
        /*
         * Set or clear CS_SCHED_LOAD_BALANCE when partcmd_update, if necessary.
@@ -1770,10 +1770,13 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
        /*
         * Use the cpumasks in trialcs for tmpmasks when they are pointers
         * to allocated cpumasks.
+        *
+        * Note that update_parent_subparts_cpumask() uses only addmask &
+        * delmask, but not new_cpus.
         */
        tmp.addmask  = trialcs->subparts_cpus;
        tmp.delmask  = trialcs->effective_cpus;
-       tmp.new_cpus = trialcs->cpus_allowed;
+       tmp.new_cpus = NULL;
 #endif
 
        retval = validate_change(cs, trialcs);
@@ -1838,6 +1841,11 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
        }
        spin_unlock_irq(&callback_lock);
 
+#ifdef CONFIG_CPUMASK_OFFSTACK
+       /* Now trialcs->cpus_allowed is available */
+       tmp.new_cpus = trialcs->cpus_allowed;
+#endif
+
        /* effective_cpus will be updated here */
        update_cpumasks_hier(cs, &tmp, false);
 
@@ -2445,6 +2453,20 @@ static int fmeter_getrate(struct fmeter *fmp)
 
 static struct cpuset *cpuset_attach_old_cs;
 
+/*
+ * Check to see if a cpuset can accept a new task
+ * For v1, cpus_allowed and mems_allowed can't be empty.
+ * For v2, effective_cpus can't be empty.
+ * Note that in v1, effective_cpus = cpus_allowed.
+ */
+static int cpuset_can_attach_check(struct cpuset *cs)
+{
+       if (cpumask_empty(cs->effective_cpus) ||
+          (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
+               return -ENOSPC;
+       return 0;
+}
+
 /* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */
 static int cpuset_can_attach(struct cgroup_taskset *tset)
 {
@@ -2459,16 +2481,9 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
 
        percpu_down_write(&cpuset_rwsem);
 
-       /* allow moving tasks into an empty cpuset if on default hierarchy */
-       ret = -ENOSPC;
-       if (!is_in_v2_mode() &&
-           (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
-               goto out_unlock;
-
-       /*
-        * Task cannot be moved to a cpuset with empty effective cpus.
-        */
-       if (cpumask_empty(cs->effective_cpus))
+       /* Check to see if task is allowed in the cpuset */
+       ret = cpuset_can_attach_check(cs);
+       if (ret)
                goto out_unlock;
 
        cgroup_taskset_for_each(task, css, tset) {
@@ -2485,7 +2500,6 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
         * changes which zero cpus/mems_allowed.
         */
        cs->attach_in_progress++;
-       ret = 0;
 out_unlock:
        percpu_up_write(&cpuset_rwsem);
        return ret;
@@ -2494,25 +2508,47 @@ out_unlock:
 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
 {
        struct cgroup_subsys_state *css;
+       struct cpuset *cs;
 
        cgroup_taskset_first(tset, &css);
+       cs = css_cs(css);
 
        percpu_down_write(&cpuset_rwsem);
-       css_cs(css)->attach_in_progress--;
+       cs->attach_in_progress--;
+       if (!cs->attach_in_progress)
+               wake_up(&cpuset_attach_wq);
        percpu_up_write(&cpuset_rwsem);
 }
 
 /*
- * Protected by cpuset_rwsem.  cpus_attach is used only by cpuset_attach()
+ * Protected by cpuset_rwsem. cpus_attach is used only by cpuset_attach_task()
  * but we can't allocate it dynamically there.  Define it global and
  * allocate from cpuset_init().
  */
 static cpumask_var_t cpus_attach;
+static nodemask_t cpuset_attach_nodemask_to;
+
+static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
+{
+       percpu_rwsem_assert_held(&cpuset_rwsem);
+
+       if (cs != &top_cpuset)
+               guarantee_online_cpus(task, cpus_attach);
+       else
+               cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
+                              cs->subparts_cpus);
+       /*
+        * can_attach beforehand should guarantee that this doesn't
+        * fail.  TODO: have a better way to handle failure here
+        */
+       WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
+
+       cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
+       cpuset_update_task_spread_flags(cs, task);
+}
 
 static void cpuset_attach(struct cgroup_taskset *tset)
 {
-       /* static buf protected by cpuset_rwsem */
-       static nodemask_t cpuset_attach_nodemask_to;
        struct task_struct *task;
        struct task_struct *leader;
        struct cgroup_subsys_state *css;
@@ -2543,20 +2579,8 @@ static void cpuset_attach(struct cgroup_taskset *tset)
 
        guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
 
-       cgroup_taskset_for_each(task, css, tset) {
-               if (cs != &top_cpuset)
-                       guarantee_online_cpus(task, cpus_attach);
-               else
-                       cpumask_copy(cpus_attach, task_cpu_possible_mask(task));
-               /*
-                * can_attach beforehand should guarantee that this doesn't
-                * fail.  TODO: have a better way to handle failure here
-                */
-               WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
-
-               cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
-               cpuset_update_task_spread_flags(cs, task);
-       }
+       cgroup_taskset_for_each(task, css, tset)
+               cpuset_attach_task(cs, task);
 
        /*
         * Change mm for all threadgroup leaders. This is expensive and may
@@ -3248,17 +3272,101 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
 }
 
 /*
+ * In case the child is cloned into a cpuset different from its parent,
+ * additional checks are done to see if the move is allowed.
+ */
+static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
+{
+       struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
+       bool same_cs;
+       int ret;
+
+       rcu_read_lock();
+       same_cs = (cs == task_cs(current));
+       rcu_read_unlock();
+
+       if (same_cs)
+               return 0;
+
+       lockdep_assert_held(&cgroup_mutex);
+       percpu_down_write(&cpuset_rwsem);
+
+       /* Check to see if task is allowed in the cpuset */
+       ret = cpuset_can_attach_check(cs);
+       if (ret)
+               goto out_unlock;
+
+       ret = task_can_attach(task, cs->effective_cpus);
+       if (ret)
+               goto out_unlock;
+
+       ret = security_task_setscheduler(task);
+       if (ret)
+               goto out_unlock;
+
+       /*
+        * Mark attach is in progress.  This makes validate_change() fail
+        * changes which zero cpus/mems_allowed.
+        */
+       cs->attach_in_progress++;
+out_unlock:
+       percpu_up_write(&cpuset_rwsem);
+       return ret;
+}
+
+static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
+{
+       struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
+       bool same_cs;
+
+       rcu_read_lock();
+       same_cs = (cs == task_cs(current));
+       rcu_read_unlock();
+
+       if (same_cs)
+               return;
+
+       percpu_down_write(&cpuset_rwsem);
+       cs->attach_in_progress--;
+       if (!cs->attach_in_progress)
+               wake_up(&cpuset_attach_wq);
+       percpu_up_write(&cpuset_rwsem);
+}
+
+/*
  * Make sure the new task conform to the current state of its parent,
  * which could have been changed by cpuset just after it inherits the
  * state from the parent and before it sits on the cgroup's task list.
  */
 static void cpuset_fork(struct task_struct *task)
 {
-       if (task_css_is_root(task, cpuset_cgrp_id))
+       struct cpuset *cs;
+       bool same_cs;
+
+       rcu_read_lock();
+       cs = task_cs(task);
+       same_cs = (cs == task_cs(current));
+       rcu_read_unlock();
+
+       if (same_cs) {
+               if (cs == &top_cpuset)
+                       return;
+
+               set_cpus_allowed_ptr(task, current->cpus_ptr);
+               task->mems_allowed = current->mems_allowed;
                return;
+       }
+
+       /* CLONE_INTO_CGROUP */
+       percpu_down_write(&cpuset_rwsem);
+       guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
+       cpuset_attach_task(cs, task);
+
+       cs->attach_in_progress--;
+       if (!cs->attach_in_progress)
+               wake_up(&cpuset_attach_wq);
 
-       set_cpus_allowed_ptr(task, current->cpus_ptr);
-       task->mems_allowed = current->mems_allowed;
+       percpu_up_write(&cpuset_rwsem);
 }
 
 struct cgroup_subsys cpuset_cgrp_subsys = {
@@ -3271,6 +3379,8 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
        .attach         = cpuset_attach,
        .post_attach    = cpuset_post_attach,
        .bind           = cpuset_bind,
+       .can_fork       = cpuset_can_fork,
+       .cancel_fork    = cpuset_cancel_fork,
        .fork           = cpuset_fork,
        .legacy_cftypes = legacy_files,
        .dfl_cftypes    = dfl_files,
index 1b6b218..9364732 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/freezer.h>
 #include <linux/seq_file.h>
 #include <linux/mutex.h>
+#include <linux/cpu.h>
 
 /*
  * A cgroup is freezing if any FREEZING flags are set.  FREEZING_SELF is
@@ -350,7 +351,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
 
        if (freeze) {
                if (!(freezer->state & CGROUP_FREEZING))
-                       static_branch_inc(&freezer_active);
+                       static_branch_inc_cpuslocked(&freezer_active);
                freezer->state |= state;
                freeze_cgroup(freezer);
        } else {
@@ -361,7 +362,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
                if (!(freezer->state & CGROUP_FREEZING)) {
                        freezer->state &= ~CGROUP_FROZEN;
                        if (was_freezing)
-                               static_branch_dec(&freezer_active);
+                               static_branch_dec_cpuslocked(&freezer_active);
                        unfreeze_cgroup(freezer);
                }
        }
@@ -379,6 +380,7 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
 {
        struct cgroup_subsys_state *pos;
 
+       cpus_read_lock();
        /*
         * Update all its descendants in pre-order traversal.  Each
         * descendant will try to inherit its parent's FREEZING state as
@@ -407,6 +409,7 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
        }
        rcu_read_unlock();
        mutex_unlock(&freezer_mutex);
+       cpus_read_unlock();
 }
 
 static ssize_t freezer_write(struct kernfs_open_file *of,
index 831f1f4..0a2b496 100644 (file)
@@ -457,9 +457,7 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
        struct task_cputime *cputime = &bstat->cputime;
        int i;
 
-       cputime->stime = 0;
-       cputime->utime = 0;
-       cputime->sum_exec_runtime = 0;
+       memset(bstat, 0, sizeof(*bstat));
        for_each_possible_cpu(i) {
                struct kernel_cpustat kcpustat;
                u64 *cpustat = kcpustat.cpustat;
index 03e3251..dac42a2 100644 (file)
@@ -623,10 +623,10 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index,
                phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
        unsigned long max_slots = get_max_slots(boundary_mask);
        unsigned int iotlb_align_mask =
-               dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
+               dma_get_min_align_mask(dev) | alloc_align_mask;
        unsigned int nslots = nr_slots(alloc_size), stride;
-       unsigned int index, wrap, count = 0, i;
        unsigned int offset = swiotlb_align_offset(dev, orig_addr);
+       unsigned int index, slots_checked, count = 0, i;
        unsigned long flags;
        unsigned int slot_base;
        unsigned int slot_index;
@@ -635,29 +635,34 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index,
        BUG_ON(area_index >= mem->nareas);
 
        /*
+        * For allocations of PAGE_SIZE or larger only look for page aligned
+        * allocations.
+        */
+       if (alloc_size >= PAGE_SIZE)
+               iotlb_align_mask |= ~PAGE_MASK;
+       iotlb_align_mask &= ~(IO_TLB_SIZE - 1);
+
+       /*
         * For mappings with an alignment requirement don't bother looping to
-        * unaligned slots once we found an aligned one.  For allocations of
-        * PAGE_SIZE or larger only look for page aligned allocations.
+        * unaligned slots once we found an aligned one.
         */
        stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
-       if (alloc_size >= PAGE_SIZE)
-               stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
-       stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1);
 
        spin_lock_irqsave(&area->lock, flags);
        if (unlikely(nslots > mem->area_nslabs - area->used))
                goto not_found;
 
        slot_base = area_index * mem->area_nslabs;
-       index = wrap = wrap_area_index(mem, ALIGN(area->index, stride));
+       index = area->index;
 
-       do {
+       for (slots_checked = 0; slots_checked < mem->area_nslabs; ) {
                slot_index = slot_base + index;
 
                if (orig_addr &&
                    (slot_addr(tbl_dma_addr, slot_index) &
                     iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
                        index = wrap_area_index(mem, index + 1);
+                       slots_checked++;
                        continue;
                }
 
@@ -673,7 +678,8 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index,
                                goto found;
                }
                index = wrap_area_index(mem, index + stride);
-       } while (index != wrap);
+               slots_checked += stride;
+       }
 
 not_found:
        spin_unlock_irqrestore(&area->lock, flags);
@@ -693,10 +699,7 @@ found:
        /*
         * Update the indices to avoid searching in the next round.
         */
-       if (index + nslots < mem->area_nslabs)
-               area->index = index + nslots;
-       else
-               area->index = 0;
+       area->index = wrap_area_index(mem, index + nslots);
        area->used += nslots;
        spin_unlock_irqrestore(&area->lock, flags);
        return slot_index;
index fb3e436..435815d 100644 (file)
@@ -12173,7 +12173,7 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
        /*
         * If its not a per-cpu rb, it must be the same task.
         */
-       if (output_event->cpu == -1 && output_event->ctx != event->ctx)
+       if (output_event->cpu == -1 && output_event->hw.target != event->hw.target)
                goto out;
 
        /*
@@ -12893,12 +12893,14 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
        __perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->pinned_groups, &events);
        __perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->flexible_groups, &events);
 
-       /*
-        * Wait for the events to quiesce before re-instating them.
-        */
-       synchronize_rcu();
+       if (!list_empty(&events)) {
+               /*
+                * Wait for the events to quiesce before re-instating them.
+                */
+               synchronize_rcu();
 
-       __perf_pmu_install(dst_ctx, dst_cpu, pmu, &events);
+               __perf_pmu_install(dst_ctx, dst_cpu, pmu, &events);
+       }
 
        mutex_unlock(&dst_ctx->mutex);
        mutex_unlock(&src_ctx->mutex);
index 8e880c0..7b95ee9 100644 (file)
@@ -3024,6 +3024,18 @@ need_offload_krc(struct kfree_rcu_cpu *krcp)
        return !!READ_ONCE(krcp->head);
 }
 
+static bool
+need_wait_for_krwp_work(struct kfree_rcu_cpu_work *krwp)
+{
+       int i;
+
+       for (i = 0; i < FREE_N_CHANNELS; i++)
+               if (!list_empty(&krwp->bulk_head_free[i]))
+                       return true;
+
+       return !!krwp->head_free;
+}
+
 static int krc_count(struct kfree_rcu_cpu *krcp)
 {
        int sum = atomic_read(&krcp->head_count);
@@ -3107,15 +3119,14 @@ static void kfree_rcu_monitor(struct work_struct *work)
        for (i = 0; i < KFREE_N_BATCHES; i++) {
                struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
 
-               // Try to detach bulk_head or head and attach it over any
-               // available corresponding free channel. It can be that
-               // a previous RCU batch is in progress, it means that
-               // immediately to queue another one is not possible so
-               // in that case the monitor work is rearmed.
-               if ((!list_empty(&krcp->bulk_head[0]) && list_empty(&krwp->bulk_head_free[0])) ||
-                       (!list_empty(&krcp->bulk_head[1]) && list_empty(&krwp->bulk_head_free[1])) ||
-                               (READ_ONCE(krcp->head) && !krwp->head_free)) {
+               // Try to detach bulk_head or head and attach it, only when
+               // all channels are free.  Any channel is not free means at krwp
+               // there is on-going rcu work to handle krwp's free business.
+               if (need_wait_for_krwp_work(krwp))
+                       continue;
 
+               // kvfree_rcu_drain_ready() might handle this krcp, if so give up.
+               if (need_offload_krc(krcp)) {
                        // Channel 1 corresponds to the SLAB-pointer bulk path.
                        // Channel 2 corresponds to vmalloc-pointer bulk path.
                        for (j = 0; j < FREE_N_CHANNELS; j++) {
index 6986ea3..5f6587d 100644 (file)
@@ -10238,6 +10238,16 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
 
                sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
                                sds->total_capacity;
+
+               /*
+                * If the local group is more loaded than the average system
+                * load, don't try to pull any tasks.
+                */
+               if (local->avg_load >= sds->avg_load) {
+                       env->imbalance = 0;
+                       return;
+               }
+
        }
 
        /*
index 0feea14..c67bcc8 100644 (file)
@@ -5667,12 +5667,15 @@ int modify_ftrace_direct(unsigned long ip,
                ret = 0;
        }
 
-       if (unlikely(ret && new_direct)) {
-               direct->count++;
-               list_del_rcu(&new_direct->next);
-               synchronize_rcu_tasks();
-               kfree(new_direct);
-               ftrace_direct_func_count--;
+       if (ret) {
+               direct->addr = old_addr;
+               if (unlikely(new_direct)) {
+                       direct->count++;
+                       list_del_rcu(&new_direct->next);
+                       synchronize_rcu_tasks();
+                       kfree(new_direct);
+                       ftrace_direct_func_count--;
+               }
        }
 
  out_unlock:
index c6f47b6..76a2d91 100644 (file)
@@ -3098,6 +3098,10 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
                if (RB_WARN_ON(cpu_buffer,
                               rb_is_reader_page(cpu_buffer->tail_page)))
                        return;
+               /*
+                * No need for a memory barrier here, as the update
+                * of the tail_page did it for this page.
+                */
                local_set(&cpu_buffer->commit_page->page->commit,
                          rb_page_write(cpu_buffer->commit_page));
                rb_inc_page(&cpu_buffer->commit_page);
@@ -3107,6 +3111,8 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
        while (rb_commit_index(cpu_buffer) !=
               rb_page_write(cpu_buffer->commit_page)) {
 
+               /* Make sure the readers see the content of what is committed. */
+               smp_wmb();
                local_set(&cpu_buffer->commit_page->page->commit,
                          rb_page_write(cpu_buffer->commit_page));
                RB_WARN_ON(cpu_buffer,
@@ -4684,7 +4690,12 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
 
        /*
         * Make sure we see any padding after the write update
-        * (see rb_reset_tail())
+        * (see rb_reset_tail()).
+        *
+        * In addition, a writer may be writing on the reader page
+        * if the page has not been fully filled, so the read barrier
+        * is also needed to make sure we see the content of what is
+        * committed by the writer (see rb_set_commit_to_write()).
         */
        smp_rmb();
 
index 937e967..36a6037 100644 (file)
@@ -1149,22 +1149,22 @@ static void tracing_snapshot_instance_cond(struct trace_array *tr,
        unsigned long flags;
 
        if (in_nmi()) {
-               internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
-               internal_trace_puts("*** snapshot is being ignored        ***\n");
+               trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
+               trace_array_puts(tr, "*** snapshot is being ignored        ***\n");
                return;
        }
 
        if (!tr->allocated_snapshot) {
-               internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
-               internal_trace_puts("*** stopping trace here!   ***\n");
-               tracing_off();
+               trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
+               trace_array_puts(tr, "*** stopping trace here!   ***\n");
+               tracer_tracing_off(tr);
                return;
        }
 
        /* Note, snapshot can not be used when the tracer uses it */
        if (tracer->use_max_tr) {
-               internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
-               internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
+               trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
+               trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
                return;
        }
 
@@ -9516,6 +9516,7 @@ static int __remove_instance(struct trace_array *tr)
        tracefs_remove(tr->dir);
        free_percpu(tr->last_func_repeats);
        free_trace_buffers(tr);
+       clear_tracing_err_log(tr);
 
        for (i = 0; i < tr->nr_topts; i++) {
                kfree(tr->topts[i].topts);
@@ -10393,19 +10394,20 @@ out:
 
 void __init ftrace_boot_snapshot(void)
 {
+#ifdef CONFIG_TRACER_MAX_TRACE
        struct trace_array *tr;
 
-       if (snapshot_at_boot) {
-               tracing_snapshot();
-               internal_trace_puts("** Boot snapshot taken **\n");
-       }
+       if (!snapshot_at_boot)
+               return;
 
        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
-               if (tr == &global_trace)
+               if (!tr->allocated_snapshot)
                        continue;
-               trace_array_puts(tr, "** Boot snapshot taken **\n");
+
                tracing_snapshot_instance(tr);
+               trace_array_puts(tr, "** Boot snapshot taken **\n");
        }
+#endif
 }
 
 void __init early_trace_init(void)
index 46d0abb..d6a70af 100644 (file)
@@ -44,14 +44,21 @@ enum { ERRORS };
 
 static const char *err_text[] = { ERRORS };
 
+static DEFINE_MUTEX(lastcmd_mutex);
 static char *last_cmd;
 
 static int errpos(const char *str)
 {
+       int ret = 0;
+
+       mutex_lock(&lastcmd_mutex);
        if (!str || !last_cmd)
-               return 0;
+               goto out;
 
-       return err_pos(last_cmd, str);
+       ret = err_pos(last_cmd, str);
+ out:
+       mutex_unlock(&lastcmd_mutex);
+       return ret;
 }
 
 static void last_cmd_set(const char *str)
@@ -59,18 +66,22 @@ static void last_cmd_set(const char *str)
        if (!str)
                return;
 
+       mutex_lock(&lastcmd_mutex);
        kfree(last_cmd);
-
        last_cmd = kstrdup(str, GFP_KERNEL);
+       mutex_unlock(&lastcmd_mutex);
 }
 
 static void synth_err(u8 err_type, u16 err_pos)
 {
+       mutex_lock(&lastcmd_mutex);
        if (!last_cmd)
-               return;
+               goto out;
 
        tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
                        err_type, err_pos);
+ out:
+       mutex_unlock(&lastcmd_mutex);
 }
 
 static int create_synth_event(const char *raw_command);
index 9176bb7..4496975 100644 (file)
@@ -1296,7 +1296,7 @@ static void notify_new_max_latency(u64 latency)
        rcu_read_lock();
        list_for_each_entry_rcu(inst, &osnoise_instances, list) {
                tr = inst->tr;
-               if (tr->max_latency < latency) {
+               if (tracer_tracing_is_on(tr) && tr->max_latency < latency) {
                        tr->max_latency = latency;
                        latency_fsnotify(tr);
                }
@@ -1738,6 +1738,8 @@ static int timerlat_main(void *data)
 
                trace_timerlat_sample(&s);
 
+               notify_new_max_latency(diff);
+
                timerlat_dump_stack(time_to_us(diff));
 
                tlat->tracing_thread = false;
index c64050e..1fffe2b 100644 (file)
@@ -280,6 +280,10 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
        write_unlock(&xen_9pfs_lock);
 
        for (i = 0; i < priv->num_rings; i++) {
+               struct xen_9pfs_dataring *ring = &priv->rings[i];
+
+               cancel_work_sync(&ring->work);
+
                if (!priv->rings[i].intf)
                        break;
                if (priv->rings[i].irq > 0)
index 17b946f..8455ba1 100644 (file)
@@ -68,7 +68,7 @@ static const struct sco_param esco_param_msbc[] = {
 };
 
 /* This function requires the caller holds hdev->lock */
-static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
+static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
 {
        struct hci_conn_params *params;
        struct hci_dev *hdev = conn->hdev;
@@ -88,9 +88,28 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
 
        params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
                                           bdaddr_type);
-       if (!params || !params->explicit_connect)
+       if (!params)
                return;
 
+       if (params->conn) {
+               hci_conn_drop(params->conn);
+               hci_conn_put(params->conn);
+               params->conn = NULL;
+       }
+
+       if (!params->explicit_connect)
+               return;
+
+       /* If the status indicates successful cancellation of
+        * the attempt (i.e. Unknown Connection Id) there's no point of
+        * notifying failure since we'll go back to keep trying to
+        * connect. The only exception is explicit connect requests
+        * where a timeout + cancel does indicate an actual failure.
+        */
+       if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
+               mgmt_connect_failed(hdev, &conn->dst, conn->type,
+                                   conn->dst_type, status);
+
        /* The connection attempt was doing scan for new RPA, and is
         * in scan phase. If params are not associated with any other
         * autoconnect action, remove them completely. If they are, just unmark
@@ -178,7 +197,7 @@ static void le_scan_cleanup(struct work_struct *work)
        rcu_read_unlock();
 
        if (c == conn) {
-               hci_connect_le_scan_cleanup(conn);
+               hci_connect_le_scan_cleanup(conn, 0x00);
                hci_conn_cleanup(conn);
        }
 
@@ -1049,6 +1068,17 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
        return conn;
 }
 
+static bool hci_conn_unlink(struct hci_conn *conn)
+{
+       if (!conn->link)
+               return false;
+
+       conn->link->link = NULL;
+       conn->link = NULL;
+
+       return true;
+}
+
 int hci_conn_del(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
@@ -1060,15 +1090,16 @@ int hci_conn_del(struct hci_conn *conn)
        cancel_delayed_work_sync(&conn->idle_work);
 
        if (conn->type == ACL_LINK) {
-               struct hci_conn *sco = conn->link;
-               if (sco) {
-                       sco->link = NULL;
+               struct hci_conn *link = conn->link;
+
+               if (link) {
+                       hci_conn_unlink(conn);
                        /* Due to race, SCO connection might be not established
                         * yet at this point. Delete it now, otherwise it is
                         * possible for it to be stuck and can't be deleted.
                         */
-                       if (sco->handle == HCI_CONN_HANDLE_UNSET)
-                               hci_conn_del(sco);
+                       if (link->handle == HCI_CONN_HANDLE_UNSET)
+                               hci_conn_del(link);
                }
 
                /* Unacked frames */
@@ -1084,7 +1115,7 @@ int hci_conn_del(struct hci_conn *conn)
                struct hci_conn *acl = conn->link;
 
                if (acl) {
-                       acl->link = NULL;
+                       hci_conn_unlink(conn);
                        hci_conn_drop(acl);
                }
 
@@ -1179,31 +1210,8 @@ EXPORT_SYMBOL(hci_get_route);
 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
 {
        struct hci_dev *hdev = conn->hdev;
-       struct hci_conn_params *params;
 
-       params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
-                                          conn->dst_type);
-       if (params && params->conn) {
-               hci_conn_drop(params->conn);
-               hci_conn_put(params->conn);
-               params->conn = NULL;
-       }
-
-       /* If the status indicates successful cancellation of
-        * the attempt (i.e. Unknown Connection Id) there's no point of
-        * notifying failure since we'll go back to keep trying to
-        * connect. The only exception is explicit connect requests
-        * where a timeout + cancel does indicate an actual failure.
-        */
-       if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
-           (params && params->explicit_connect))
-               mgmt_connect_failed(hdev, &conn->dst, conn->type,
-                                   conn->dst_type, status);
-
-       /* Since we may have temporarily stopped the background scanning in
-        * favor of connection establishment, we should restart it.
-        */
-       hci_update_passive_scan(hdev);
+       hci_connect_le_scan_cleanup(conn, status);
 
        /* Enable advertising in case this was a failed connection
         * attempt as a peripheral.
@@ -1237,15 +1245,15 @@ static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
 {
        struct hci_conn *conn = data;
 
+       bt_dev_dbg(hdev, "err %d", err);
+
        hci_dev_lock(hdev);
 
        if (!err) {
-               hci_connect_le_scan_cleanup(conn);
+               hci_connect_le_scan_cleanup(conn, 0x00);
                goto done;
        }
 
-       bt_dev_err(hdev, "request failed to create LE connection: err %d", err);
-
        /* Check if connection is still pending */
        if (conn != hci_lookup_le_connect(hdev))
                goto done;
@@ -2438,6 +2446,12 @@ void hci_conn_hash_flush(struct hci_dev *hdev)
                c->state = BT_CLOSED;
 
                hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
+
+               /* Unlink before deleting otherwise it is possible that
+                * hci_conn_del removes the link which may cause the list to
+                * contain items already freed.
+                */
+               hci_conn_unlink(c);
                hci_conn_del(c);
        }
 }
@@ -2775,6 +2789,9 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
 {
        int r = 0;
 
+       if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
+               return 0;
+
        switch (conn->state) {
        case BT_CONNECTED:
        case BT_CONFIG:
index ad92a4b..e87c928 100644 (file)
@@ -2881,16 +2881,6 @@ static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
 
        conn->resp_addr_type = peer_addr_type;
        bacpy(&conn->resp_addr, peer_addr);
-
-       /* We don't want the connection attempt to stick around
-        * indefinitely since LE doesn't have a page timeout concept
-        * like BR/EDR. Set a timer for any connection that doesn't use
-        * the accept list for connecting.
-        */
-       if (filter_policy == HCI_LE_USE_PEER_ADDR)
-               queue_delayed_work(conn->hdev->workqueue,
-                                  &conn->le_conn_timeout,
-                                  conn->conn_timeout);
 }
 
 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
@@ -5902,6 +5892,12 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
        if (status)
                goto unlock;
 
+       /* Drop the connection if it has been aborted */
+       if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
+               hci_conn_drop(conn);
+               goto unlock;
+       }
+
        if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
                addr_type = BDADDR_LE_PUBLIC;
        else
@@ -6995,7 +6991,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
                bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
                bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu);
 
-               hci_connect_cfm(bis, ev->status);
+               hci_iso_setup_path(bis);
        }
 
        hci_dev_unlock(hdev);
index 5a6aa16..632be12 100644 (file)
@@ -246,8 +246,9 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
 
        skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
        if (IS_ERR(skb)) {
-               bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
-                               PTR_ERR(skb));
+               if (!event)
+                       bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
+                                  PTR_ERR(skb));
                return PTR_ERR(skb);
        }
 
@@ -5126,8 +5127,11 @@ static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
        if (test_bit(HCI_CONN_SCANNING, &conn->flags))
                return 0;
 
+       if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
+               return 0;
+
        return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
-                                    6, &conn->dst, HCI_CMD_TIMEOUT);
+                                    0, NULL, HCI_CMD_TIMEOUT);
 }
 
 static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn)
@@ -6102,6 +6106,9 @@ int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
                                       conn->conn_timeout, NULL);
 
 done:
+       if (err == -ETIMEDOUT)
+               hci_le_connect_cancel_sync(hdev, conn);
+
        /* Re-enable advertising after the connection attempt is finished. */
        hci_resume_advertising_sync(hdev);
        return err;
index bed1a7b..707f229 100644 (file)
@@ -433,7 +433,7 @@ static void hidp_set_timer(struct hidp_session *session)
 static void hidp_del_timer(struct hidp_session *session)
 {
        if (session->idle_to > 0)
-               del_timer(&session->timer);
+               del_timer_sync(&session->timer);
 }
 
 static void hidp_process_report(struct hidp_session *session, int type,
index 49926f5..55a7226 100644 (file)
@@ -4652,33 +4652,27 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
 
        BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
 
-       mutex_lock(&conn->chan_lock);
-
-       chan = __l2cap_get_chan_by_scid(conn, dcid);
+       chan = l2cap_get_chan_by_scid(conn, dcid);
        if (!chan) {
-               mutex_unlock(&conn->chan_lock);
                cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
                return 0;
        }
 
-       l2cap_chan_hold(chan);
-       l2cap_chan_lock(chan);
-
        rsp.dcid = cpu_to_le16(chan->scid);
        rsp.scid = cpu_to_le16(chan->dcid);
        l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
 
        chan->ops->set_shutdown(chan);
 
+       mutex_lock(&conn->chan_lock);
        l2cap_chan_del(chan, ECONNRESET);
+       mutex_unlock(&conn->chan_lock);
 
        chan->ops->close(chan);
 
        l2cap_chan_unlock(chan);
        l2cap_chan_put(chan);
 
-       mutex_unlock(&conn->chan_lock);
-
        return 0;
 }
 
@@ -4698,33 +4692,27 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
 
        BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
 
-       mutex_lock(&conn->chan_lock);
-
-       chan = __l2cap_get_chan_by_scid(conn, scid);
+       chan = l2cap_get_chan_by_scid(conn, scid);
        if (!chan) {
                mutex_unlock(&conn->chan_lock);
                return 0;
        }
 
-       l2cap_chan_hold(chan);
-       l2cap_chan_lock(chan);
-
        if (chan->state != BT_DISCONN) {
                l2cap_chan_unlock(chan);
                l2cap_chan_put(chan);
-               mutex_unlock(&conn->chan_lock);
                return 0;
        }
 
+       mutex_lock(&conn->chan_lock);
        l2cap_chan_del(chan, 0);
+       mutex_unlock(&conn->chan_lock);
 
        chan->ops->close(chan);
 
        l2cap_chan_unlock(chan);
        l2cap_chan_put(chan);
 
-       mutex_unlock(&conn->chan_lock);
-
        return 0;
 }
 
index 1111da4..cd1a27a 100644 (file)
@@ -235,27 +235,41 @@ static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
        return err;
 }
 
-static int sco_connect(struct hci_dev *hdev, struct sock *sk)
+static int sco_connect(struct sock *sk)
 {
        struct sco_conn *conn;
        struct hci_conn *hcon;
+       struct hci_dev  *hdev;
        int err, type;
 
        BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst);
 
+       hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR);
+       if (!hdev)
+               return -EHOSTUNREACH;
+
+       hci_dev_lock(hdev);
+
        if (lmp_esco_capable(hdev) && !disable_esco)
                type = ESCO_LINK;
        else
                type = SCO_LINK;
 
        if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT &&
-           (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev)))
-               return -EOPNOTSUPP;
+           (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) {
+               err = -EOPNOTSUPP;
+               goto unlock;
+       }
 
        hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
                               sco_pi(sk)->setting, &sco_pi(sk)->codec);
-       if (IS_ERR(hcon))
-               return PTR_ERR(hcon);
+       if (IS_ERR(hcon)) {
+               err = PTR_ERR(hcon);
+               goto unlock;
+       }
+
+       hci_dev_unlock(hdev);
+       hci_dev_put(hdev);
 
        conn = sco_conn_add(hcon);
        if (!conn) {
@@ -263,13 +277,15 @@ static int sco_connect(struct hci_dev *hdev, struct sock *sk)
                return -ENOMEM;
        }
 
-       /* Update source addr of the socket */
-       bacpy(&sco_pi(sk)->src, &hcon->src);
-
        err = sco_chan_add(conn, sk, NULL);
        if (err)
                return err;
 
+       lock_sock(sk);
+
+       /* Update source addr of the socket */
+       bacpy(&sco_pi(sk)->src, &hcon->src);
+
        if (hcon->state == BT_CONNECTED) {
                sco_sock_clear_timer(sk);
                sk->sk_state = BT_CONNECTED;
@@ -278,6 +294,13 @@ static int sco_connect(struct hci_dev *hdev, struct sock *sk)
                sco_sock_set_timer(sk, sk->sk_sndtimeo);
        }
 
+       release_sock(sk);
+
+       return err;
+
+unlock:
+       hci_dev_unlock(hdev);
+       hci_dev_put(hdev);
        return err;
 }
 
@@ -565,7 +588,6 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
 {
        struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
        struct sock *sk = sock->sk;
-       struct hci_dev  *hdev;
        int err;
 
        BT_DBG("sk %p", sk);
@@ -574,37 +596,26 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
            addr->sa_family != AF_BLUETOOTH)
                return -EINVAL;
 
-       lock_sock(sk);
-       if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
-               err = -EBADFD;
-               goto done;
-       }
+       if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
+               return -EBADFD;
 
-       if (sk->sk_type != SOCK_SEQPACKET) {
+       if (sk->sk_type != SOCK_SEQPACKET)
                err = -EINVAL;
-               goto done;
-       }
-
-       hdev = hci_get_route(&sa->sco_bdaddr, &sco_pi(sk)->src, BDADDR_BREDR);
-       if (!hdev) {
-               err = -EHOSTUNREACH;
-               goto done;
-       }
-       hci_dev_lock(hdev);
 
+       lock_sock(sk);
        /* Set destination address and psm */
        bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);
+       release_sock(sk);
 
-       err = sco_connect(hdev, sk);
-       hci_dev_unlock(hdev);
-       hci_dev_put(hdev);
+       err = sco_connect(sk);
        if (err)
-               goto done;
+               return err;
+
+       lock_sock(sk);
 
        err = bt_sock_wait_state(sk, BT_CONNECTED,
                                 sock_sndtimeo(sk, flags & O_NONBLOCK));
 
-done:
        release_sock(sk);
        return err;
 }
@@ -1129,6 +1140,8 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
                        break;
                }
 
+               release_sock(sk);
+
                /* find total buffer size required to copy codec + caps */
                hci_dev_lock(hdev);
                list_for_each_entry(c, &hdev->local_codecs, list) {
@@ -1146,15 +1159,13 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
                buf_len += sizeof(struct bt_codecs);
                if (buf_len > len) {
                        hci_dev_put(hdev);
-                       err = -ENOBUFS;
-                       break;
+                       return -ENOBUFS;
                }
                ptr = optval;
 
                if (put_user(num_codecs, ptr)) {
                        hci_dev_put(hdev);
-                       err = -EFAULT;
-                       break;
+                       return -EFAULT;
                }
                ptr += sizeof(num_codecs);
 
@@ -1194,12 +1205,14 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
                        ptr += len;
                }
 
-               if (!err && put_user(buf_len, optlen))
-                       err = -EFAULT;
-
                hci_dev_unlock(hdev);
                hci_dev_put(hdev);
 
+               lock_sock(sk);
+
+               if (!err && put_user(buf_len, optlen))
+                       err = -EFAULT;
+
                break;
 
        default:
index 27706f6..a962ec2 100644 (file)
@@ -941,6 +941,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
 
                        cf = op->frames + op->cfsiz * i;
                        err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
+                       if (err < 0)
+                               goto free_op;
 
                        if (op->flags & CAN_FD_FRAME) {
                                if (cf->len > 64)
@@ -950,12 +952,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                                        err = -EINVAL;
                        }
 
-                       if (err < 0) {
-                               if (op->frames != &op->sframe)
-                                       kfree(op->frames);
-                               kfree(op);
-                               return err;
-                       }
+                       if (err < 0)
+                               goto free_op;
 
                        if (msg_head->flags & TX_CP_CAN_ID) {
                                /* copy can_id into frame */
@@ -1026,6 +1024,12 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                bcm_tx_start_timer(op);
 
        return msg_head->nframes * op->cfsiz + MHSIZ;
+
+free_op:
+       if (op->frames != &op->sframe)
+               kfree(op->frames);
+       kfree(op);
+       return err;
 }
 
 /*
index 9bc3448..5761d4a 100644 (file)
@@ -119,7 +119,8 @@ enum {
        ISOTP_WAIT_FIRST_FC,
        ISOTP_WAIT_FC,
        ISOTP_WAIT_DATA,
-       ISOTP_SENDING
+       ISOTP_SENDING,
+       ISOTP_SHUTDOWN,
 };
 
 struct tpcon {
@@ -880,8 +881,8 @@ static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
                                             txtimer);
        struct sock *sk = &so->sk;
 
-       /* don't handle timeouts in IDLE state */
-       if (so->tx.state == ISOTP_IDLE)
+       /* don't handle timeouts in IDLE or SHUTDOWN state */
+       if (so->tx.state == ISOTP_IDLE || so->tx.state == ISOTP_SHUTDOWN)
                return HRTIMER_NORESTART;
 
        /* we did not get any flow control or echo frame in time */
@@ -918,7 +919,6 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 {
        struct sock *sk = sock->sk;
        struct isotp_sock *so = isotp_sk(sk);
-       u32 old_state = so->tx.state;
        struct sk_buff *skb;
        struct net_device *dev;
        struct canfd_frame *cf;
@@ -928,23 +928,24 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        int off;
        int err;
 
-       if (!so->bound)
+       if (!so->bound || so->tx.state == ISOTP_SHUTDOWN)
                return -EADDRNOTAVAIL;
 
+wait_free_buffer:
        /* we do not support multiple buffers - for now */
-       if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE ||
-           wq_has_sleeper(&so->wait)) {
-               if (msg->msg_flags & MSG_DONTWAIT) {
-                       err = -EAGAIN;
-                       goto err_out;
-               }
+       if (wq_has_sleeper(&so->wait) && (msg->msg_flags & MSG_DONTWAIT))
+               return -EAGAIN;
 
-               /* wait for complete transmission of current pdu */
-               err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
-               if (err)
-                       goto err_out;
+       /* wait for complete transmission of current pdu */
+       err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+       if (err)
+               goto err_event_drop;
 
-               so->tx.state = ISOTP_SENDING;
+       if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
+               if (so->tx.state == ISOTP_SHUTDOWN)
+                       return -EADDRNOTAVAIL;
+
+               goto wait_free_buffer;
        }
 
        if (!size || size > MAX_MSG_LENGTH) {
@@ -1074,7 +1075,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 
        if (wait_tx_done) {
                /* wait for complete transmission of current pdu */
-               wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+               err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+               if (err)
+                       goto err_event_drop;
 
                if (sk->sk_err)
                        return -sk->sk_err;
@@ -1082,13 +1085,15 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
 
        return size;
 
+err_event_drop:
+       /* got signal: force tx state machine to be idle */
+       so->tx.state = ISOTP_IDLE;
+       hrtimer_cancel(&so->txfrtimer);
+       hrtimer_cancel(&so->txtimer);
 err_out_drop:
        /* drop this PDU and unlock a potential wait queue */
-       old_state = ISOTP_IDLE;
-err_out:
-       so->tx.state = old_state;
-       if (so->tx.state == ISOTP_IDLE)
-               wake_up_interruptible(&so->wait);
+       so->tx.state = ISOTP_IDLE;
+       wake_up_interruptible(&so->wait);
 
        return err;
 }
@@ -1120,7 +1125,7 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        if (ret < 0)
                goto out_err;
 
-       sock_recv_timestamp(msg, sk, skb);
+       sock_recv_cmsgs(msg, sk, skb);
 
        if (msg->msg_name) {
                __sockaddr_check_size(ISOTP_MIN_NAMELEN);
@@ -1150,10 +1155,12 @@ static int isotp_release(struct socket *sock)
        net = sock_net(sk);
 
        /* wait for complete transmission of current pdu */
-       wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+       while (wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE) == 0 &&
+              cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SHUTDOWN) != ISOTP_IDLE)
+               ;
 
        /* force state machines to be idle also when a signal occurred */
-       so->tx.state = ISOTP_IDLE;
+       so->tx.state = ISOTP_SHUTDOWN;
        so->rx.state = ISOTP_IDLE;
 
        spin_lock(&isotp_notifier_lock);
@@ -1608,6 +1615,21 @@ static int isotp_init(struct sock *sk)
        return 0;
 }
 
+static __poll_t isotp_poll(struct file *file, struct socket *sock, poll_table *wait)
+{
+       struct sock *sk = sock->sk;
+       struct isotp_sock *so = isotp_sk(sk);
+
+       __poll_t mask = datagram_poll(file, sock, wait);
+       poll_wait(file, &so->wait, wait);
+
+       /* Check for false positives due to TX state */
+       if ((mask & EPOLLWRNORM) && (so->tx.state != ISOTP_IDLE))
+               mask &= ~(EPOLLOUT | EPOLLWRNORM);
+
+       return mask;
+}
+
 static int isotp_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
                                  unsigned long arg)
 {
@@ -1623,7 +1645,7 @@ static const struct proto_ops isotp_ops = {
        .socketpair = sock_no_socketpair,
        .accept = sock_no_accept,
        .getname = isotp_getname,
-       .poll = datagram_poll,
+       .poll = isotp_poll,
        .ioctl = isotp_sock_no_ioctlcmd,
        .gettstamp = sock_gettstamp,
        .listen = sock_no_listen,
index fce9b9e..fe3df23 100644 (file)
@@ -604,7 +604,10 @@ sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv,
        /* reserve CAN header */
        skb_reserve(skb, offsetof(struct can_frame, data));
 
-       memcpy(skb->cb, re_skcb, sizeof(skb->cb));
+       /* skb->cb must be large enough to hold a j1939_sk_buff_cb structure */
+       BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*re_skcb));
+
+       memcpy(skb->cb, re_skcb, sizeof(*re_skcb));
        skcb = j1939_skb_to_cb(skb);
        if (swap_src_dst)
                j1939_skbcb_swap(skcb);
@@ -1124,8 +1127,6 @@ static void __j1939_session_cancel(struct j1939_session *session,
 
        if (session->sk)
                j1939_sk_send_loop_abort(session->sk, session->err);
-       else
-               j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_ABORT);
 }
 
 static void j1939_session_cancel(struct j1939_session *session,
@@ -1140,6 +1141,9 @@ static void j1939_session_cancel(struct j1939_session *session,
        }
 
        j1939_session_list_unlock(session->priv);
+
+       if (!session->sk)
+               j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_ABORT);
 }
 
 static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
@@ -1253,6 +1257,9 @@ static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
                        __j1939_session_cancel(session, J1939_XTP_ABORT_TIMEOUT);
                }
                j1939_session_list_unlock(session->priv);
+
+               if (!session->sk)
+                       j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_ABORT);
        }
 
        j1939_session_put(session);
index 2535847..1488f70 100644 (file)
@@ -3199,6 +3199,7 @@ static u16 skb_tx_hash(const struct net_device *dev,
        }
 
        if (skb_rx_queue_recorded(skb)) {
+               DEBUG_NET_WARN_ON_ONCE(qcount == 0);
                hash = skb_get_rx_queue(skb);
                if (hash >= qoffset)
                        hash -= qoffset;
@@ -10846,7 +10847,7 @@ void unregister_netdevice_many_notify(struct list_head *head,
                    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
                        skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
                                                     GFP_KERNEL, NULL, 0,
-                                                    portid, nlmsg_seq(nlh));
+                                                    portid, nlh);
 
                /*
                 *      Flush the unicast and multicast chains
index a089b70..e6a739b 100644 (file)
@@ -137,6 +137,20 @@ static void queue_process(struct work_struct *work)
        }
 }
 
+static int netif_local_xmit_active(struct net_device *dev)
+{
+       int i;
+
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+               if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
+                       return 1;
+       }
+
+       return 0;
+}
+
 static void poll_one_napi(struct napi_struct *napi)
 {
        int work;
@@ -183,7 +197,10 @@ void netpoll_poll_dev(struct net_device *dev)
        if (!ni || down_trylock(&ni->dev_lock))
                return;
 
-       if (!netif_running(dev)) {
+       /* Some drivers will take the same locks in poll and xmit,
+        * we can't poll if local CPU is already in xmit.
+        */
+       if (!netif_running(dev) || netif_local_xmit_active(dev)) {
                up(&ni->dev_lock);
                return;
        }
index 5d8eb57..6e44e92 100644 (file)
@@ -3972,16 +3972,23 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
 struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
                                       unsigned int change,
                                       u32 event, gfp_t flags, int *new_nsid,
-                                      int new_ifindex, u32 portid, u32 seq)
+                                      int new_ifindex, u32 portid,
+                                      const struct nlmsghdr *nlh)
 {
        struct net *net = dev_net(dev);
        struct sk_buff *skb;
        int err = -ENOBUFS;
+       u32 seq = 0;
 
        skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
        if (skb == NULL)
                goto errout;
 
+       if (nlmsg_report(nlh))
+               seq = nlmsg_seq(nlh);
+       else
+               portid = 0;
+
        err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
                               type, portid, seq, change, 0, 0, event,
                               new_nsid, new_ifindex, -1, flags);
@@ -4017,7 +4024,7 @@ static void rtmsg_ifinfo_event(int type, struct net_device *dev,
                return;
 
        skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
-                                    new_ifindex, portid, nlmsg_seq(nlh));
+                                    new_ifindex, portid, nlh);
        if (skb)
                rtmsg_ifinfo_send(skb, dev, flags, portid, nlh);
 }
index 1a31815..4c08797 100644 (file)
@@ -5599,18 +5599,18 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
        if (skb_cloned(to))
                return false;
 
-       /* In general, avoid mixing slab allocated and page_pool allocated
-        * pages within the same SKB. However when @to is not pp_recycle and
-        * @from is cloned, we can transition frag pages from page_pool to
-        * reference counted.
-        *
-        * On the other hand, don't allow coalescing two pp_recycle SKBs if
-        * @from is cloned, in case the SKB is using page_pool fragment
+       /* In general, avoid mixing page_pool and non-page_pool allocated
+        * pages within the same SKB. Additionally avoid dealing with clones
+        * with page_pool pages, in case the SKB is using page_pool fragment
         * references (PP_FLAG_PAGE_FRAG). Since we only take full page
         * references for cloned SKBs at the moment that would result in
         * inconsistent reference counts.
+        * In theory we could take full references if @from is cloned and
+        * !@to->pp_recycle but its tricky (due to potential race with
+        * the clone disappearing) and rare, so not worth dealing with.
         */
-       if (to->pp_recycle != (from->pp_recycle && !skb_cloned(from)))
+       if (to->pp_recycle != from->pp_recycle ||
+           (from->pp_recycle && skb_cloned(from)))
                return false;
 
        if (len <= skb_tailroom(to)) {
index 528d4b3..fb85aca 100644 (file)
@@ -734,13 +734,21 @@ __bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *tim
  * bpf_xdp_metadata_rx_hash - Read XDP frame RX hash.
  * @ctx: XDP context pointer.
  * @hash: Return value pointer.
+ * @rss_type: Return value pointer for RSS type.
+ *
+ * The RSS hash type (@rss_type) specifies what portion of packet headers NIC
+ * hardware used when calculating RSS hash value.  The RSS type can be decoded
+ * via &enum xdp_rss_hash_type either matching on individual L3/L4 bits
+ * ``XDP_RSS_L*`` or by combined traditional *RSS Hashing Types*
+ * ``XDP_RSS_TYPE_L*``.
  *
  * Return:
  * * Returns 0 on success or ``-errno`` on error.
  * * ``-EOPNOTSUPP`` : means device driver doesn't implement kfunc
  * * ``-ENODATA``    : means no RX-hash available for this frame
  */
-__bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash)
+__bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash,
+                                        enum xdp_rss_hash_type *rss_type)
 {
        return -EOPNOTSUPP;
 }
index cac1718..165bb2c 100644 (file)
@@ -57,6 +57,12 @@ struct dsa_standalone_event_work {
        u16 vid;
 };
 
+struct dsa_host_vlan_rx_filtering_ctx {
+       struct net_device *dev;
+       const unsigned char *addr;
+       enum dsa_standalone_event event;
+};
+
 static bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds)
 {
        return ds->ops->port_fdb_add && ds->ops->port_fdb_del &&
@@ -155,18 +161,37 @@ static int dsa_slave_schedule_standalone_work(struct net_device *dev,
        return 0;
 }
 
+static int dsa_slave_host_vlan_rx_filtering(struct net_device *vdev, int vid,
+                                           void *arg)
+{
+       struct dsa_host_vlan_rx_filtering_ctx *ctx = arg;
+
+       return dsa_slave_schedule_standalone_work(ctx->dev, ctx->event,
+                                                 ctx->addr, vid);
+}
+
 static int dsa_slave_sync_uc(struct net_device *dev,
                             const unsigned char *addr)
 {
        struct net_device *master = dsa_slave_to_master(dev);
        struct dsa_port *dp = dsa_slave_to_port(dev);
+       struct dsa_host_vlan_rx_filtering_ctx ctx = {
+               .dev = dev,
+               .addr = addr,
+               .event = DSA_UC_ADD,
+       };
+       int err;
 
        dev_uc_add(master, addr);
 
        if (!dsa_switch_supports_uc_filtering(dp->ds))
                return 0;
 
-       return dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0);
+       err = dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0);
+       if (err)
+               return err;
+
+       return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx);
 }
 
 static int dsa_slave_unsync_uc(struct net_device *dev,
@@ -174,13 +199,23 @@ static int dsa_slave_unsync_uc(struct net_device *dev,
 {
        struct net_device *master = dsa_slave_to_master(dev);
        struct dsa_port *dp = dsa_slave_to_port(dev);
+       struct dsa_host_vlan_rx_filtering_ctx ctx = {
+               .dev = dev,
+               .addr = addr,
+               .event = DSA_UC_DEL,
+       };
+       int err;
 
        dev_uc_del(master, addr);
 
        if (!dsa_switch_supports_uc_filtering(dp->ds))
                return 0;
 
-       return dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0);
+       err = dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0);
+       if (err)
+               return err;
+
+       return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx);
 }
 
 static int dsa_slave_sync_mc(struct net_device *dev,
@@ -188,13 +223,23 @@ static int dsa_slave_sync_mc(struct net_device *dev,
 {
        struct net_device *master = dsa_slave_to_master(dev);
        struct dsa_port *dp = dsa_slave_to_port(dev);
+       struct dsa_host_vlan_rx_filtering_ctx ctx = {
+               .dev = dev,
+               .addr = addr,
+               .event = DSA_MC_ADD,
+       };
+       int err;
 
        dev_mc_add(master, addr);
 
        if (!dsa_switch_supports_mc_filtering(dp->ds))
                return 0;
 
-       return dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0);
+       err = dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0);
+       if (err)
+               return err;
+
+       return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx);
 }
 
 static int dsa_slave_unsync_mc(struct net_device *dev,
@@ -202,13 +247,23 @@ static int dsa_slave_unsync_mc(struct net_device *dev,
 {
        struct net_device *master = dsa_slave_to_master(dev);
        struct dsa_port *dp = dsa_slave_to_port(dev);
+       struct dsa_host_vlan_rx_filtering_ctx ctx = {
+               .dev = dev,
+               .addr = addr,
+               .event = DSA_MC_DEL,
+       };
+       int err;
 
        dev_mc_del(master, addr);
 
        if (!dsa_switch_supports_mc_filtering(dp->ds))
                return 0;
 
-       return dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0);
+       err = dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0);
+       if (err)
+               return err;
+
+       return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx);
 }
 
 void dsa_slave_sync_ha(struct net_device *dev)
@@ -1702,6 +1757,8 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
                .flags = 0,
        };
        struct netlink_ext_ack extack = {0};
+       struct dsa_switch *ds = dp->ds;
+       struct netdev_hw_addr *ha;
        int ret;
 
        /* User port... */
@@ -1721,6 +1778,30 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
                return ret;
        }
 
+       if (!dsa_switch_supports_uc_filtering(ds) &&
+           !dsa_switch_supports_mc_filtering(ds))
+               return 0;
+
+       netif_addr_lock_bh(dev);
+
+       if (dsa_switch_supports_mc_filtering(ds)) {
+               netdev_for_each_synced_mc_addr(ha, dev) {
+                       dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD,
+                                                          ha->addr, vid);
+               }
+       }
+
+       if (dsa_switch_supports_uc_filtering(ds)) {
+               netdev_for_each_synced_uc_addr(ha, dev) {
+                       dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD,
+                                                          ha->addr, vid);
+               }
+       }
+
+       netif_addr_unlock_bh(dev);
+
+       dsa_flush_workqueue();
+
        return 0;
 }
 
@@ -1733,13 +1814,43 @@ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
                /* This API only allows programming tagged, non-PVID VIDs */
                .flags = 0,
        };
+       struct dsa_switch *ds = dp->ds;
+       struct netdev_hw_addr *ha;
        int err;
 
        err = dsa_port_vlan_del(dp, &vlan);
        if (err)
                return err;
 
-       return dsa_port_host_vlan_del(dp, &vlan);
+       err = dsa_port_host_vlan_del(dp, &vlan);
+       if (err)
+               return err;
+
+       if (!dsa_switch_supports_uc_filtering(ds) &&
+           !dsa_switch_supports_mc_filtering(ds))
+               return 0;
+
+       netif_addr_lock_bh(dev);
+
+       if (dsa_switch_supports_mc_filtering(ds)) {
+               netdev_for_each_synced_mc_addr(ha, dev) {
+                       dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL,
+                                                          ha->addr, vid);
+               }
+       }
+
+       if (dsa_switch_supports_uc_filtering(ds)) {
+               netdev_for_each_synced_uc_addr(ha, dev) {
+                       dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL,
+                                                          ha->addr, vid);
+               }
+       }
+
+       netif_addr_unlock_bh(dev);
+
+       dsa_flush_workqueue();
+
+       return 0;
 }
 
 static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg)
index fab66c1..20165e0 100644 (file)
@@ -270,11 +270,12 @@ static int ethnl_update_linkmodes(struct genl_info *info, struct nlattr **tb,
                                            "lanes configuration not supported by device");
                        return -EOPNOTSUPP;
                }
-       } else if (!lsettings->autoneg) {
-               /* If autoneg is off and lanes parameter is not passed from user,
-                * set the lanes parameter to 0.
+       } else if (!lsettings->autoneg && ksettings->lanes) {
+               /* If autoneg is off and lanes parameter is not passed from user but
+                * it was defined previously then set the lanes parameter to 0.
                 */
                ksettings->lanes = 0;
+               *mod = true;
        }
 
        ret = ethnl_update_bitset(ksettings->link_modes.advertising,
index d8f4379..832e3c5 100644 (file)
@@ -2488,8 +2488,7 @@ static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
        if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
                return -EOPNOTSUPP;
 
-       if (!info->attrs[NL802154_ATTR_SEC_LEVEL] ||
-           llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
+       if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
                                 &sl) < 0)
                return -EINVAL;
 
index 8cebb47..b860776 100644 (file)
@@ -749,6 +749,11 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
                room = 576;
        room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen;
        room -= sizeof(struct icmphdr);
+       /* Guard against tiny mtu. We need to include at least one
+        * IP network header for this message to make any sense.
+        */
+       if (room <= (int)sizeof(struct iphdr))
+               goto ende;
 
        icmp_param.data_len = skb_in->len - icmp_param.offset;
        if (icmp_param.data_len > room)
index 409ec2a..5178a3f 100644 (file)
@@ -1089,13 +1089,13 @@ static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos)
 }
 
 void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family)
-       __acquires(RCU)
+       __acquires(ping_table.lock)
 {
        struct ping_iter_state *state = seq->private;
        state->bucket = 0;
        state->family = family;
 
-       rcu_read_lock();
+       spin_lock(&ping_table.lock);
 
        return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
 }
@@ -1121,9 +1121,9 @@ void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 EXPORT_SYMBOL_GPL(ping_seq_next);
 
 void ping_seq_stop(struct seq_file *seq, void *v)
-       __releases(RCU)
+       __releases(ping_table.lock)
 {
-       rcu_read_unlock();
+       spin_unlock(&ping_table.lock);
 }
 EXPORT_SYMBOL_GPL(ping_seq_stop);
 
index 94df935..8088a50 100644 (file)
@@ -91,12 +91,12 @@ EXPORT_SYMBOL_GPL(raw_v4_hashinfo);
 int raw_hash_sk(struct sock *sk)
 {
        struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
-       struct hlist_nulls_head *hlist;
+       struct hlist_head *hlist;
 
        hlist = &h->ht[raw_hashfunc(sock_net(sk), inet_sk(sk)->inet_num)];
 
        spin_lock(&h->lock);
-       __sk_nulls_add_node_rcu(sk, hlist);
+       sk_add_node_rcu(sk, hlist);
        sock_set_flag(sk, SOCK_RCU_FREE);
        spin_unlock(&h->lock);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
@@ -110,7 +110,7 @@ void raw_unhash_sk(struct sock *sk)
        struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
 
        spin_lock(&h->lock);
-       if (__sk_nulls_del_node_init_rcu(sk))
+       if (sk_del_node_init_rcu(sk))
                sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
        spin_unlock(&h->lock);
 }
@@ -163,16 +163,15 @@ static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
 static int raw_v4_input(struct net *net, struct sk_buff *skb,
                        const struct iphdr *iph, int hash)
 {
-       struct hlist_nulls_head *hlist;
-       struct hlist_nulls_node *hnode;
        int sdif = inet_sdif(skb);
+       struct hlist_head *hlist;
        int dif = inet_iif(skb);
        int delivered = 0;
        struct sock *sk;
 
        hlist = &raw_v4_hashinfo.ht[hash];
        rcu_read_lock();
-       sk_nulls_for_each(sk, hnode, hlist) {
+       sk_for_each_rcu(sk, hlist) {
                if (!raw_v4_match(net, sk, iph->protocol,
                                  iph->saddr, iph->daddr, dif, sdif))
                        continue;
@@ -264,10 +263,9 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
 void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
 {
        struct net *net = dev_net(skb->dev);
-       struct hlist_nulls_head *hlist;
-       struct hlist_nulls_node *hnode;
        int dif = skb->dev->ifindex;
        int sdif = inet_sdif(skb);
+       struct hlist_head *hlist;
        const struct iphdr *iph;
        struct sock *sk;
        int hash;
@@ -276,7 +274,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
        hlist = &raw_v4_hashinfo.ht[hash];
 
        rcu_read_lock();
-       sk_nulls_for_each(sk, hnode, hlist) {
+       sk_for_each_rcu(sk, hlist) {
                iph = (const struct iphdr *)skb->data;
                if (!raw_v4_match(net, sk, iph->protocol,
                                  iph->daddr, iph->saddr, dif, sdif))
@@ -950,14 +948,13 @@ static struct sock *raw_get_first(struct seq_file *seq, int bucket)
 {
        struct raw_hashinfo *h = pde_data(file_inode(seq->file));
        struct raw_iter_state *state = raw_seq_private(seq);
-       struct hlist_nulls_head *hlist;
-       struct hlist_nulls_node *hnode;
+       struct hlist_head *hlist;
        struct sock *sk;
 
        for (state->bucket = bucket; state->bucket < RAW_HTABLE_SIZE;
                        ++state->bucket) {
                hlist = &h->ht[state->bucket];
-               sk_nulls_for_each(sk, hnode, hlist) {
+               sk_for_each(sk, hlist) {
                        if (sock_net(sk) == seq_file_net(seq))
                                return sk;
                }
@@ -970,7 +967,7 @@ static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk)
        struct raw_iter_state *state = raw_seq_private(seq);
 
        do {
-               sk = sk_nulls_next(sk);
+               sk = sk_next(sk);
        } while (sk && sock_net(sk) != seq_file_net(seq));
 
        if (!sk)
@@ -989,9 +986,12 @@ static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos)
 }
 
 void *raw_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(RCU)
+       __acquires(&h->lock)
 {
-       rcu_read_lock();
+       struct raw_hashinfo *h = pde_data(file_inode(seq->file));
+
+       spin_lock(&h->lock);
+
        return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
 }
 EXPORT_SYMBOL_GPL(raw_seq_start);
@@ -1010,9 +1010,11 @@ void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 EXPORT_SYMBOL_GPL(raw_seq_next);
 
 void raw_seq_stop(struct seq_file *seq, void *v)
-       __releases(RCU)
+       __releases(&h->lock)
 {
-       rcu_read_unlock();
+       struct raw_hashinfo *h = pde_data(file_inode(seq->file));
+
+       spin_unlock(&h->lock);
 }
 EXPORT_SYMBOL_GPL(raw_seq_stop);
 
index 9993218..da3591a 100644 (file)
@@ -57,8 +57,7 @@ static bool raw_lookup(struct net *net, struct sock *sk,
 static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2 *r)
 {
        struct raw_hashinfo *hashinfo = raw_get_hashinfo(r);
-       struct hlist_nulls_head *hlist;
-       struct hlist_nulls_node *hnode;
+       struct hlist_head *hlist;
        struct sock *sk;
        int slot;
 
@@ -68,7 +67,7 @@ static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2
        rcu_read_lock();
        for (slot = 0; slot < RAW_HTABLE_SIZE; slot++) {
                hlist = &hashinfo->ht[slot];
-               sk_nulls_for_each(sk, hnode, hlist) {
+               sk_for_each_rcu(sk, hlist) {
                        if (raw_lookup(net, sk, r)) {
                                /*
                                 * Grab it and keep until we fill
@@ -142,9 +141,8 @@ static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
        struct raw_hashinfo *hashinfo = raw_get_hashinfo(r);
        struct net *net = sock_net(skb->sk);
        struct inet_diag_dump_data *cb_data;
-       struct hlist_nulls_head *hlist;
-       struct hlist_nulls_node *hnode;
        int num, s_num, slot, s_slot;
+       struct hlist_head *hlist;
        struct sock *sk = NULL;
        struct nlattr *bc;
 
@@ -161,7 +159,7 @@ static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
                num = 0;
 
                hlist = &hashinfo->ht[slot];
-               sk_nulls_for_each(sk, hnode, hlist) {
+               sk_for_each_rcu(sk, hlist) {
                        struct inet_sock *inet = inet_sk(sk);
 
                        if (!net_eq(sock_net(sk), net))
index 0d0cc4e..40fe70f 100644 (file)
@@ -25,6 +25,7 @@ static int ip_local_port_range_min[] = { 1, 1 };
 static int ip_local_port_range_max[] = { 65535, 65535 };
 static int tcp_adv_win_scale_min = -31;
 static int tcp_adv_win_scale_max = 31;
+static int tcp_app_win_max = 31;
 static int tcp_min_snd_mss_min = TCP_MIN_SND_MSS;
 static int tcp_min_snd_mss_max = 65535;
 static int ip_privileged_port_min;
@@ -1198,6 +1199,8 @@ static struct ctl_table ipv4_net_table[] = {
                .maxlen         = sizeof(u8),
                .mode           = 0644,
                .proc_handler   = proc_dou8vec_minmax,
+               .extra1         = SYSCTL_ZERO,
+               .extra2         = &tcp_app_win_max,
        },
        {
                .procname       = "tcp_adv_win_scale",
index ea370af..b9d5527 100644 (file)
@@ -2780,7 +2780,7 @@ static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
 static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
 {
        while (iter->cur_sk < iter->end_sk)
-               sock_put(iter->batch[iter->cur_sk++]);
+               sock_gen_put(iter->batch[iter->cur_sk++]);
 }
 
 static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
@@ -2941,7 +2941,7 @@ static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                 * st->bucket.  See tcp_seek_last_pos().
                 */
                st->offset++;
-               sock_put(iter->batch[iter->cur_sk++]);
+               sock_gen_put(iter->batch[iter->cur_sk++]);
        }
 
        if (iter->cur_sk < iter->end_sk)
index c314fdd..95a55c6 100644 (file)
@@ -1965,8 +1965,13 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
        IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
        if (proto == IPPROTO_ICMPV6) {
                struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+               u8 icmp6_type;
 
-               ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
+               if (sk->sk_socket->type == SOCK_RAW && !inet_sk(sk)->hdrincl)
+                       icmp6_type = fl6->fl6_icmp_type;
+               else
+                       icmp6_type = icmp6_hdr(skb)->icmp6_type;
+               ICMP6MSGOUT_INC_STATS(net, idev, icmp6_type);
                ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
        }
 
index bac9ba7..a327aa4 100644 (file)
@@ -141,10 +141,9 @@ EXPORT_SYMBOL(rawv6_mh_filter_unregister);
 static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
 {
        struct net *net = dev_net(skb->dev);
-       struct hlist_nulls_head *hlist;
-       struct hlist_nulls_node *hnode;
        const struct in6_addr *saddr;
        const struct in6_addr *daddr;
+       struct hlist_head *hlist;
        struct sock *sk;
        bool delivered = false;
        __u8 hash;
@@ -155,7 +154,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
        hash = raw_hashfunc(net, nexthdr);
        hlist = &raw_v6_hashinfo.ht[hash];
        rcu_read_lock();
-       sk_nulls_for_each(sk, hnode, hlist) {
+       sk_for_each_rcu(sk, hlist) {
                int filtered;
 
                if (!raw_v6_match(net, sk, nexthdr, daddr, saddr,
@@ -333,15 +332,14 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
                u8 type, u8 code, int inner_offset, __be32 info)
 {
        struct net *net = dev_net(skb->dev);
-       struct hlist_nulls_head *hlist;
-       struct hlist_nulls_node *hnode;
+       struct hlist_head *hlist;
        struct sock *sk;
        int hash;
 
        hash = raw_hashfunc(net, nexthdr);
        hlist = &raw_v6_hashinfo.ht[hash];
        rcu_read_lock();
-       sk_nulls_for_each(sk, hnode, hlist) {
+       sk_for_each_rcu(sk, hlist) {
                /* Note: ipv6_hdr(skb) != skb->data */
                const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
 
index 9fb2f33..a675acf 100644 (file)
@@ -1395,9 +1395,11 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                        msg->msg_name = &sin;
                        msg->msg_namelen = sizeof(sin);
 do_udp_sendmsg:
-                       if (ipv6_only_sock(sk))
-                               return -ENETUNREACH;
-                       return udp_sendmsg(sk, msg, len);
+                       err = ipv6_only_sock(sk) ?
+                               -ENETUNREACH : udp_sendmsg(sk, msg, len);
+                       msg->msg_name = sin6;
+                       msg->msg_namelen = addr_len;
+                       return err;
                }
        }
 
index 4db5a55..41a74fc 100644 (file)
@@ -677,8 +677,8 @@ MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
 MODULE_DESCRIPTION("L2TP over IP");
 MODULE_VERSION("1.0");
 
-/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
- * enums
+/* Use the values of SOCK_DGRAM (2) as type and IPPROTO_L2TP (115) as protocol,
+ * because __stringify doesn't like enums
  */
-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
-MODULE_ALIAS_NET_PF_PROTO(PF_INET, IPPROTO_L2TP);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 115, 2);
+MODULE_ALIAS_NET_PF_PROTO(PF_INET, 115);
index 2478aa6..5137ea1 100644 (file)
@@ -806,8 +806,8 @@ MODULE_AUTHOR("Chris Elston <celston@katalix.com>");
 MODULE_DESCRIPTION("L2TP IP encapsulation for IPv6");
 MODULE_VERSION("1.0");
 
-/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
- * enums
+/* Use the values of SOCK_DGRAM (2) as type and IPPROTO_L2TP (115) as protocol,
+ * because __stringify doesn't like enums
  */
-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 2, IPPROTO_L2TP);
-MODULE_ALIAS_NET_PF_PROTO(PF_INET6, IPPROTO_L2TP);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 115, 2);
+MODULE_ALIAS_NET_PF_PROTO(PF_INET6, 115);
index e8de500..af57616 100644 (file)
@@ -2769,14 +2769,6 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
        if (sdata->crypto_tx_tailroom_needed_cnt)
                tailroom = IEEE80211_ENCRYPT_TAILROOM;
 
-       if (!--mesh_hdr->ttl) {
-               if (multicast)
-                       goto rx_accept;
-
-               IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
-               return RX_DROP_MONITOR;
-       }
-
        if (mesh_hdr->flags & MESH_FLAGS_AE) {
                struct mesh_path *mppath;
                char *proxied_addr;
@@ -2807,6 +2799,14 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
        if (ether_addr_equal(sdata->vif.addr, eth->h_dest))
                goto rx_accept;
 
+       if (!--mesh_hdr->ttl) {
+               if (multicast)
+                       goto rx_accept;
+
+               IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
+               return RX_DROP_MONITOR;
+       }
+
        if (!ifmsh->mshcfg.dot11MeshForwarding) {
                if (is_multicast_ether_addr(eth->h_dest))
                        goto rx_accept;
@@ -2833,6 +2833,9 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
 
                if (skb_cow_head(fwd_skb, hdrlen - sizeof(struct ethhdr)))
                        return RX_DROP_UNUSABLE;
+
+               if (skb_linearize(fwd_skb))
+                       return RX_DROP_UNUSABLE;
        }
 
        fwd_hdr = skb_push(fwd_skb, hdrlen - sizeof(struct ethhdr));
@@ -2847,7 +2850,7 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta
                hdrlen += ETH_ALEN;
        else
                fwd_skb->protocol = htons(fwd_skb->len - hdrlen);
-       skb_set_network_header(fwd_skb, hdrlen);
+       skb_set_network_header(fwd_skb, hdrlen + 2);
 
        info = IEEE80211_SKB_CB(fwd_skb);
        memset(info, 0, sizeof(*info));
@@ -2896,7 +2899,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        __le16 fc = hdr->frame_control;
        struct sk_buff_head frame_list;
-       static ieee80211_rx_result res;
+       ieee80211_rx_result res;
        struct ethhdr ethhdr;
        const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
 
@@ -2930,7 +2933,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
                                          data_offset, true))
                return RX_DROP_UNUSABLE;
 
-       if (rx->sta && rx->sta->amsdu_mesh_control < 0) {
+       if (rx->sta->amsdu_mesh_control < 0) {
                bool valid_std = ieee80211_is_valid_amsdu(skb, true);
                bool valid_nonstd = ieee80211_is_valid_amsdu(skb, false);
 
@@ -3006,7 +3009,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
                }
        }
 
-       if (is_multicast_ether_addr(hdr->addr1))
+       if (is_multicast_ether_addr(hdr->addr1) || !rx->sta)
                return RX_DROP_UNUSABLE;
 
        if (rx->key) {
@@ -3037,7 +3040,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
        struct net_device *dev = sdata->dev;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
        __le16 fc = hdr->frame_control;
-       static ieee80211_rx_result res;
+       ieee80211_rx_result res;
        bool port_control;
        int err;
 
index 7d68dbc..941bda9 100644 (file)
@@ -1264,7 +1264,8 @@ static int __must_check __sta_info_destroy_part1(struct sta_info *sta)
        list_del_rcu(&sta->list);
        sta->removed = true;
 
-       drv_sta_pre_rcu_remove(local, sta->sdata, sta);
+       if (sta->uploaded)
+               drv_sta_pre_rcu_remove(local, sta->sdata, sta);
 
        if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
            rcu_access_pointer(sdata->u.vlan.sta) == sta)
index 3aceb3b..8c39765 100644 (file)
@@ -4906,7 +4906,7 @@ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
                                       &eht_cap->eht_cap_elem,
                                       is_ap);
        return 2 + 1 +
-              sizeof(he_cap->he_cap_elem) + n +
+              sizeof(eht_cap->eht_cap_elem) + n +
               ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
                                      eht_cap->eht_cap_elem.phy_cap_info);
        return 0;
index d237d14..bceaab8 100644 (file)
@@ -9,11 +9,18 @@
 void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow,
                                              struct request_sock *req)
 {
-       struct sock *ssk = subflow->tcp_sock;
-       struct sock *sk = subflow->conn;
+       struct sock *sk, *ssk;
        struct sk_buff *skb;
        struct tcp_sock *tp;
 
+       /* on early fallback the subflow context is deleted by
+        * subflow_syn_recv_sock()
+        */
+       if (!subflow)
+               return;
+
+       ssk = subflow->tcp_sock;
+       sk = subflow->conn;
        tp = tcp_sk(ssk);
 
        subflow->is_mptfo = 1;
index b30cea2..355f798 100644 (file)
@@ -1192,9 +1192,8 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
         */
        if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
                if (mp_opt.data_fin && mp_opt.data_len == 1 &&
-                   mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64) &&
-                   schedule_work(&msk->work))
-                       sock_hold(subflow->conn);
+                   mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64))
+                       mptcp_schedule_work((struct sock *)msk);
 
                return true;
        }
index 60b23b2..06c5872 100644 (file)
@@ -2626,7 +2626,7 @@ static void mptcp_worker(struct work_struct *work)
 
        lock_sock(sk);
        state = sk->sk_state;
-       if (unlikely(state == TCP_CLOSE))
+       if (unlikely((1 << state) & (TCPF_CLOSE | TCPF_LISTEN)))
                goto unlock;
 
        mptcp_check_data_fin_ack(sk);
index a004136..d345888 100644 (file)
@@ -408,9 +408,8 @@ void mptcp_subflow_reset(struct sock *ssk)
 
        tcp_send_active_reset(ssk, GFP_ATOMIC);
        tcp_done(ssk);
-       if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
-           schedule_work(&mptcp_sk(sk)->work))
-               return; /* worker will put sk for us */
+       if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
+               mptcp_schedule_work(sk);
 
        sock_put(sk);
 }
@@ -1118,8 +1117,8 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
                                skb_ext_del(skb, SKB_EXT_MPTCP);
                                return MAPPING_OK;
                        } else {
-                               if (updated && schedule_work(&msk->work))
-                                       sock_hold((struct sock *)msk);
+                               if (updated)
+                                       mptcp_schedule_work((struct sock *)msk);
 
                                return MAPPING_DATA_FIN;
                        }
@@ -1222,17 +1221,12 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
 /* sched mptcp worker to remove the subflow if no more data is pending */
 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
 {
-       struct sock *sk = (struct sock *)msk;
-
        if (likely(ssk->sk_state != TCP_CLOSE))
                return;
 
        if (skb_queue_empty(&ssk->sk_receive_queue) &&
-           !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
-               sock_hold(sk);
-               if (!schedule_work(&msk->work))
-                       sock_put(sk);
-       }
+           !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+               mptcp_schedule_work((struct sock *)msk);
 }
 
 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
index c642776..f365dfd 100644 (file)
@@ -1952,7 +1952,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
        struct scm_cookie scm;
        struct sock *sk = sock->sk;
        struct netlink_sock *nlk = nlk_sk(sk);
-       size_t copied;
+       size_t copied, max_recvmsg_len;
        struct sk_buff *skb, *data_skb;
        int err, ret;
 
@@ -1985,9 +1985,10 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 #endif
 
        /* Record the max length of recvmsg() calls for future allocations */
-       nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
-       nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
-                                    SKB_WITH_OVERHEAD(32768));
+       max_recvmsg_len = max(READ_ONCE(nlk->max_recvmsg_len), len);
+       max_recvmsg_len = min_t(size_t, max_recvmsg_len,
+                               SKB_WITH_OVERHEAD(32768));
+       WRITE_ONCE(nlk->max_recvmsg_len, max_recvmsg_len);
 
        copied = data_skb->len;
        if (len < copied) {
@@ -2236,6 +2237,7 @@ static int netlink_dump(struct sock *sk)
        struct netlink_ext_ack extack = {};
        struct netlink_callback *cb;
        struct sk_buff *skb = NULL;
+       size_t max_recvmsg_len;
        struct module *module;
        int err = -ENOBUFS;
        int alloc_min_size;
@@ -2258,8 +2260,9 @@ static int netlink_dump(struct sock *sk)
        cb = &nlk->cb;
        alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
 
-       if (alloc_min_size < nlk->max_recvmsg_len) {
-               alloc_size = nlk->max_recvmsg_len;
+       max_recvmsg_len = READ_ONCE(nlk->max_recvmsg_len);
+       if (alloc_min_size < max_recvmsg_len) {
+               alloc_size = max_recvmsg_len;
                skb = alloc_skb(alloc_size,
                                (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
                                __GFP_NOWARN | __GFP_NORETRY);
index ca3ebfd..a8cf9a8 100644 (file)
@@ -913,7 +913,7 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
 {
        struct vport *vport = ovs_vport_rcu(dp, out_port);
 
-       if (likely(vport)) {
+       if (likely(vport && netif_carrier_ok(vport->dev))) {
                u16 mru = OVS_CB(skb)->mru;
                u32 cutlen = OVS_CB(skb)->cutlen;
 
index 5c2fb99..76f0434 100644 (file)
@@ -393,10 +393,12 @@ static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
        struct qrtr_node *node;
        unsigned long flags;
 
+       mutex_lock(&qrtr_node_lock);
        spin_lock_irqsave(&qrtr_nodes_lock, flags);
        node = radix_tree_lookup(&qrtr_nodes, nid);
        node = qrtr_node_acquire(node);
        spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
+       mutex_unlock(&qrtr_node_lock);
 
        return node;
 }
@@ -496,6 +498,11 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
        if (!size || len != ALIGN(size, 4) + hdrlen)
                goto err;
 
+       if ((cb->type == QRTR_TYPE_NEW_SERVER ||
+            cb->type == QRTR_TYPE_RESUME_TX) &&
+           size < sizeof(struct qrtr_ctrl_pkt))
+               goto err;
+
        if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
            cb->type != QRTR_TYPE_RESUME_TX)
                goto err;
@@ -508,9 +515,6 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
                /* Remote node endpoint can bridge other distant nodes */
                const struct qrtr_ctrl_pkt *pkt;
 
-               if (size < sizeof(*pkt))
-                       goto err;
-
                pkt = data + hdrlen;
                qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
        }
index 722936f..0f25a38 100644 (file)
@@ -274,7 +274,7 @@ err:
        return NULL;
 }
 
-static int server_del(struct qrtr_node *node, unsigned int port)
+static int server_del(struct qrtr_node *node, unsigned int port, bool bcast)
 {
        struct qrtr_lookup *lookup;
        struct qrtr_server *srv;
@@ -287,7 +287,7 @@ static int server_del(struct qrtr_node *node, unsigned int port)
        radix_tree_delete(&node->servers, port);
 
        /* Broadcast the removal of local servers */
-       if (srv->node == qrtr_ns.local_node)
+       if (srv->node == qrtr_ns.local_node && bcast)
                service_announce_del(&qrtr_ns.bcast_sq, srv);
 
        /* Announce the service's disappearance to observers */
@@ -373,7 +373,7 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
                }
                slot = radix_tree_iter_resume(slot, &iter);
                rcu_read_unlock();
-               server_del(node, srv->port);
+               server_del(node, srv->port, true);
                rcu_read_lock();
        }
        rcu_read_unlock();
@@ -459,10 +459,13 @@ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
                kfree(lookup);
        }
 
-       /* Remove the server belonging to this port */
+       /* Remove the server belonging to this port but don't broadcast
+        * DEL_SERVER. Neighbours would've already removed the server belonging
+        * to this port due to the DEL_CLIENT broadcast from qrtr_port_remove().
+        */
        node = node_get(node_id);
        if (node)
-               server_del(node, port);
+               server_del(node, port, false);
 
        /* Advertise the removal of this client to all local servers */
        local_node = node_get(qrtr_ns.local_node);
@@ -567,7 +570,7 @@ static int ctrl_cmd_del_server(struct sockaddr_qrtr *from,
        if (!node)
                return -ENOENT;
 
-       return server_del(node, port);
+       return server_del(node, port, true);
 }
 
 static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from,
index b91616f..218e098 100644 (file)
@@ -1830,6 +1830,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
                err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
                if (err)
                        goto err;
+               if (unlikely(sinfo->sinfo_stream >= asoc->stream.outcnt)) {
+                       err = -EINVAL;
+                       goto err;
+               }
        }
 
        if (sctp_state(asoc, CLOSED)) {
index 94727fe..b046b11 100644 (file)
@@ -1154,7 +1154,8 @@ static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
 
 #define _sctp_walk_ifwdtsn(pos, chunk, end) \
        for (pos = chunk->subh.ifwdtsn_hdr->skip; \
-            (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
+            (void *)pos <= (void *)chunk->subh.ifwdtsn_hdr->skip + (end) - \
+                           sizeof(struct sctp_ifwdtsn_skip); pos++)
 
 #define sctp_walk_ifwdtsn(pos, ch) \
        _sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
index c6b4a62..50c38b6 100644 (file)
@@ -3270,6 +3270,17 @@ static int __smc_create(struct net *net, struct socket *sock, int protocol,
                        sk_common_release(sk);
                        goto out;
                }
+
+               /* smc_clcsock_release() does not wait smc->clcsock->sk's
+                * destruction;  its sk_state might not be TCP_CLOSE after
+                * smc->sk is close()d, and TCP timers can be fired later,
+                * which need net ref.
+                */
+               sk = smc->clcsock->sk;
+               __netns_tracker_free(net, &sk->ns_tracker, false);
+               sk->sk_net_refcnt = 1;
+               get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
+               sock_inuse_add(net, 1);
        } else {
                smc->clcsock = clcsock;
        }
index ce0541e..95ca783 100644 (file)
@@ -73,7 +73,6 @@ static void checksum_case(struct kunit *test)
 {
        const struct gss_krb5_test_param *param = test->param_value;
        struct xdr_buf buf = {
-               .head[0].iov_base       = param->plaintext->data,
                .head[0].iov_len        = param->plaintext->len,
                .len                    = param->plaintext->len,
        };
@@ -99,6 +98,10 @@ static void checksum_case(struct kunit *test)
        err = crypto_ahash_setkey(tfm, Kc.data, Kc.len);
        KUNIT_ASSERT_EQ(test, err, 0);
 
+       buf.head[0].iov_base = kunit_kzalloc(test, buf.head[0].iov_len, GFP_KERNEL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf.head[0].iov_base);
+       memcpy(buf.head[0].iov_base, param->plaintext->data, buf.head[0].iov_len);
+
        checksum.len = gk5e->cksumlength;
        checksum.data = kunit_kzalloc(test, checksum.len, GFP_KERNEL);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, checksum.data);
@@ -1327,6 +1330,7 @@ static void rfc6803_encrypt_case(struct kunit *test)
        if (!gk5e)
                kunit_skip(test, "Encryption type is not available");
 
+       memset(usage_data, 0, sizeof(usage_data));
        usage.data[3] = param->constant;
 
        Ke.len = gk5e->Ke_length;
index 983c589..4246363 100644 (file)
@@ -416,14 +416,23 @@ static int unix_gid_hash(kuid_t uid)
        return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS);
 }
 
-static void unix_gid_put(struct kref *kref)
+static void unix_gid_free(struct rcu_head *rcu)
 {
-       struct cache_head *item = container_of(kref, struct cache_head, ref);
-       struct unix_gid *ug = container_of(item, struct unix_gid, h);
+       struct unix_gid *ug = container_of(rcu, struct unix_gid, rcu);
+       struct cache_head *item = &ug->h;
+
        if (test_bit(CACHE_VALID, &item->flags) &&
            !test_bit(CACHE_NEGATIVE, &item->flags))
                put_group_info(ug->gi);
-       kfree_rcu(ug, rcu);
+       kfree(ug);
+}
+
+static void unix_gid_put(struct kref *kref)
+{
+       struct cache_head *item = container_of(kref, struct cache_head, ref);
+       struct unix_gid *ug = container_of(item, struct unix_gid, h);
+
+       call_rcu(&ug->rcu, unix_gid_free);
 }
 
 static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
index adcbedc..6cacd70 100644 (file)
@@ -2158,6 +2158,7 @@ static void xs_tcp_shutdown(struct rpc_xprt *xprt)
        switch (skst) {
        case TCP_FIN_WAIT1:
        case TCP_FIN_WAIT2:
+       case TCP_LAST_ACK:
                break;
        case TCP_ESTABLISHED:
        case TCP_CLOSE_WAIT:
index 6564192..ee78b40 100644 (file)
@@ -94,6 +94,11 @@ virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info,
                                         info->op,
                                         info->flags);
 
+       if (info->vsk && !skb_set_owner_sk_safe(skb, sk_vsock(info->vsk))) {
+               WARN_ONCE(1, "failed to allocate skb on vsock socket with sk_refcnt == 0\n");
+               goto out;
+       }
+
        return skb;
 
 out:
@@ -363,6 +368,13 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
        u32 free_space;
 
        spin_lock_bh(&vvs->rx_lock);
+
+       if (WARN_ONCE(skb_queue_empty(&vvs->rx_queue) && vvs->rx_bytes,
+                     "rx_queue is empty, but rx_bytes is non-zero\n")) {
+               spin_unlock_bh(&vvs->rx_lock);
+               return err;
+       }
+
        while (total < len && !skb_queue_empty(&vvs->rx_queue)) {
                skb = skb_peek(&vvs->rx_queue);
 
@@ -1068,7 +1080,7 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
                        memcpy(skb_put(last_skb, skb->len), skb->data, skb->len);
                        free_pkt = true;
                        last_hdr->flags |= hdr->flags;
-                       last_hdr->len = cpu_to_le32(last_skb->len);
+                       le32_add_cpu(&last_hdr->len, len);
                        goto out;
                }
        }
@@ -1296,6 +1308,11 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
                goto free_pkt;
        }
 
+       if (!skb_set_owner_sk_safe(skb, sk)) {
+               WARN_ONCE(1, "receiving vsock socket has sk_refcnt == 0\n");
+               goto free_pkt;
+       }
+
        vsk = vsock_sk(sk);
 
        lock_sock(sk);
index 36eb16a..95cc4d7 100644 (file)
@@ -1842,7 +1842,13 @@ static ssize_t vmci_transport_stream_enqueue(
        struct msghdr *msg,
        size_t len)
 {
-       return vmci_qpair_enquev(vmci_trans(vsk)->qpair, msg, len, 0);
+       ssize_t err;
+
+       err = vmci_qpair_enquev(vmci_trans(vsk)->qpair, msg, len, 0);
+       if (err < 0)
+               err = -ENOMEM;
+
+       return err;
 }
 
 static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk)
index 671e032..89905c0 100644 (file)
@@ -15,7 +15,6 @@
 struct vsock_loopback {
        struct workqueue_struct *workqueue;
 
-       spinlock_t pkt_list_lock; /* protects pkt_list */
        struct sk_buff_head pkt_queue;
        struct work_struct pkt_work;
 };
@@ -32,9 +31,7 @@ static int vsock_loopback_send_pkt(struct sk_buff *skb)
        struct vsock_loopback *vsock = &the_vsock_loopback;
        int len = skb->len;
 
-       spin_lock_bh(&vsock->pkt_list_lock);
        skb_queue_tail(&vsock->pkt_queue, skb);
-       spin_unlock_bh(&vsock->pkt_list_lock);
 
        queue_work(vsock->workqueue, &vsock->pkt_work);
 
@@ -113,9 +110,9 @@ static void vsock_loopback_work(struct work_struct *work)
 
        skb_queue_head_init(&pkts);
 
-       spin_lock_bh(&vsock->pkt_list_lock);
+       spin_lock_bh(&vsock->pkt_queue.lock);
        skb_queue_splice_init(&vsock->pkt_queue, &pkts);
-       spin_unlock_bh(&vsock->pkt_list_lock);
+       spin_unlock_bh(&vsock->pkt_queue.lock);
 
        while ((skb = __skb_dequeue(&pkts))) {
                virtio_transport_deliver_tap_pkt(skb);
@@ -132,7 +129,6 @@ static int __init vsock_loopback_init(void)
        if (!vsock->workqueue)
                return -ENOMEM;
 
-       spin_lock_init(&vsock->pkt_list_lock);
        skb_queue_head_init(&vsock->pkt_queue);
        INIT_WORK(&vsock->pkt_work, vsock_loopback_work);
 
@@ -156,9 +152,7 @@ static void __exit vsock_loopback_exit(void)
 
        flush_work(&vsock->pkt_work);
 
-       spin_lock_bh(&vsock->pkt_list_lock);
        virtio_vsock_skb_queue_purge(&vsock->pkt_queue);
-       spin_unlock_bh(&vsock->pkt_list_lock);
 
        destroy_workqueue(vsock->workqueue);
 }
index 61f72eb..4d90691 100644 (file)
@@ -27,21 +27,6 @@ fi ; \
 tar -I $(KGZIP) -c $(RCS_TAR_IGNORE) -f $(2).tar.gz \
        --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3)
 
-# tarball compression
-# ---------------------------------------------------------------------------
-
-%.tar.gz: %.tar
-       $(call cmd,gzip)
-
-%.tar.bz2: %.tar
-       $(call cmd,bzip2)
-
-%.tar.xz: %.tar
-       $(call cmd,xzmisc)
-
-%.tar.zst: %.tar
-       $(call cmd,zstd)
-
 # Git
 # ---------------------------------------------------------------------------
 
@@ -57,16 +42,24 @@ check-git:
                false; \
        fi
 
+git-config-tar.gz  = -c tar.tar.gz.command="$(KGZIP)"
+git-config-tar.bz2 = -c tar.tar.bz2.command="$(KBZIP2)"
+git-config-tar.xz  = -c tar.tar.xz.command="$(XZ)"
+git-config-tar.zst = -c tar.tar.zst.command="$(ZSTD)"
+
+quiet_cmd_archive = ARCHIVE $@
+      cmd_archive = git -C $(srctree) $(git-config-tar$(suffix $@)) archive \
+                    --output=$$(realpath $@) --prefix=$(basename $@)/ $(archive-args)
+
 # Linux source tarball
 # ---------------------------------------------------------------------------
 
-quiet_cmd_archive_linux = ARCHIVE $@
-      cmd_archive_linux = \
-       git -C $(srctree) archive --output=$$(realpath $@) --prefix=$(basename $@)/ $$(cat $<)
+linux-tarballs := $(addprefix linux, .tar.gz)
 
-targets += linux.tar
-linux.tar: .tmp_HEAD FORCE
-       $(call if_changed,archive_linux)
+targets += $(linux-tarballs)
+$(linux-tarballs): archive-args = $$(cat $<)
+$(linux-tarballs): .tmp_HEAD FORCE
+       $(call if_changed,archive)
 
 # rpm-pkg
 # ---------------------------------------------------------------------------
@@ -94,7 +87,7 @@ binrpm-pkg:
                $(UTS_MACHINE)-linux -bb $(objtree)/binkernel.spec
 
 quiet_cmd_debianize = GEN     $@
-      cmd_debianize = $(srctree)/scripts/package/mkdebian
+      cmd_debianize = $(srctree)/scripts/package/mkdebian $(mkdebian-opts)
 
 debian: FORCE
        $(call cmd,debianize)
@@ -103,6 +96,7 @@ PHONY += debian-orig
 debian-orig: private source = $(shell dpkg-parsechangelog -S Source)
 debian-orig: private version = $(shell dpkg-parsechangelog -S Version | sed 's/-[^-]*$$//')
 debian-orig: private orig-name = $(source)_$(version).orig.tar.gz
+debian-orig: mkdebian-opts = --need-source
 debian-orig: linux.tar.gz debian
        $(Q)if [ "$(df  --output=target .. 2>/dev/null)" = "$(df --output=target $< 2>/dev/null)" ]; then \
                ln -f $< ../$(orig-name); \
@@ -145,10 +139,17 @@ tar-install: FORCE
        $(Q)$(MAKE) -f $(srctree)/Makefile
        +$(Q)$(srctree)/scripts/package/buildtar $@
 
+compress-tar.gz  = -I "$(KGZIP)"
+compress-tar.bz2 = -I "$(KBZIP2)"
+compress-tar.xz  = -I "$(XZ)"
+compress-tar.zst = -I "$(ZSTD)"
+
 quiet_cmd_tar = TAR     $@
-      cmd_tar = cd $<; tar cf ../$@ --owner=root --group=root --sort=name *
+      cmd_tar = cd $<; tar cf ../$@ $(compress-tar$(suffix $@)) --owner=root --group=root --sort=name *
 
-linux-$(KERNELRELEASE)-$(ARCH).tar: tar-install
+dir-tarballs := $(addprefix linux-$(KERNELRELEASE)-$(ARCH), .tar .tar.gz .tar.bz2 .tar.xz .tar.zst)
+
+$(dir-tarballs): tar-install
        $(call cmd,tar)
 
 PHONY += dir-pkg
@@ -180,16 +181,17 @@ quiet_cmd_perf_version_file = GEN     $@
 .tmp_perf/PERF-VERSION-FILE: .tmp_HEAD $(srctree)/tools/perf/util/PERF-VERSION-GEN | .tmp_perf
        $(call cmd,perf_version_file)
 
-quiet_cmd_archive_perf = ARCHIVE $@
-      cmd_archive_perf = \
-       git -C $(srctree) archive --output=$$(realpath $@) --prefix=$(basename $@)/ \
-       --add-file=$$(realpath $(word 2, $^)) \
+perf-archive-args = --add-file=$$(realpath $(word 2, $^)) \
        --add-file=$$(realpath $(word 3, $^)) \
        $$(cat $(word 2, $^))^{tree} $$(cat $<)
 
-targets += perf-$(KERNELVERSION).tar
-perf-$(KERNELVERSION).tar: tools/perf/MANIFEST .tmp_perf/HEAD .tmp_perf/PERF-VERSION-FILE FORCE
-       $(call if_changed,archive_perf)
+
+perf-tarballs := $(addprefix perf-$(KERNELVERSION), .tar .tar.gz .tar.bz2 .tar.xz .tar.zst)
+
+targets += $(perf-tarballs)
+$(perf-tarballs): archive-args = $(perf-archive-args)
+$(perf-tarballs): tools/perf/MANIFEST .tmp_perf/HEAD .tmp_perf/PERF-VERSION-FILE FORCE
+       $(call if_changed,archive)
 
 PHONY += perf-tar-src-pkg
 perf-tar-src-pkg: perf-$(KERNELVERSION).tar
index 32620de..902eb42 100755 (executable)
@@ -145,7 +145,7 @@ for ORIG_MERGE_FILE in $MERGE_LIST ; do
                NEW_VAL=$(grep -w $CFG $MERGE_FILE)
                BUILTIN_FLAG=false
                if [ "$BUILTIN" = "true" ] && [ "${NEW_VAL#CONFIG_*=}" = "m" ] && [ "${PREV_VAL#CONFIG_*=}" = "y" ]; then
-                       ${WARNOVVERIDE} Previous  value: $PREV_VAL
+                       ${WARNOVERRIDE} Previous  value: $PREV_VAL
                        ${WARNOVERRIDE} New value:       $NEW_VAL
                        ${WARNOVERRIDE} -y passed, will not demote y to m
                        ${WARNOVERRIDE}
index efff807..9466b6a 100644 (file)
@@ -1733,7 +1733,7 @@ static void extract_crcs_for_object(const char *object, struct module *mod)
                if (!isdigit(*p))
                        continue;       /* skip this line */
 
-               crc = strtol(p, &p, 0);
+               crc = strtoul(p, &p, 0);
                if (*p != '\n')
                        continue;       /* skip this line */
 
index c5ae571..7b23f52 100755 (executable)
@@ -162,6 +162,7 @@ install_linux_image_dbg () {
 
 install_kernel_headers () {
        pdir=$1
+       version=$2
 
        rm -rf $pdir
 
@@ -229,7 +230,7 @@ do
        linux-libc-dev)
                install_libc_headers debian/linux-libc-dev;;
        linux-headers-*)
-               install_kernel_headers debian/linux-headers;;
+               install_kernel_headers debian/linux-headers ${package#linux-headers-};;
        esac
 done
 
index f842ab5..8a98b7b 100755 (executable)
@@ -1,44 +1,36 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0-only
 
-diff_patch="${1}"
-untracked_patch="${2}"
-srctree=$(dirname $0)/../..
+diff_patch=$1
 
-rm -f ${diff_patch} ${untracked_patch}
+mkdir -p "$(dirname "${diff_patch}")"
 
-if ! ${srctree}/scripts/check-git; then
-       exit
-fi
-
-mkdir -p "$(dirname ${diff_patch})" "$(dirname ${untracked_patch})"
+git -C "${srctree:-.}" diff HEAD > "${diff_patch}"
 
-git -C "${srctree}" diff HEAD > "${diff_patch}"
-
-if [ ! -s "${diff_patch}" ]; then
-       rm -f "${diff_patch}"
+if [ ! -s "${diff_patch}" ] ||
+   [ -z "$(git -C "${srctree:-.}" ls-files --other --exclude-standard | head -n1)" ]; then
        exit
 fi
 
-git -C ${srctree} status --porcelain --untracked-files=all |
-while read stat path
-do
-       if [ "${stat}" = '??' ]; then
-
-               if ! diff -u /dev/null "${srctree}/${path}" > .tmp_diff &&
-                       ! head -n1 .tmp_diff | grep -q "Binary files"; then
-                       {
-                               echo "--- /dev/null"
-                               echo "+++ linux/$path"
-                               cat .tmp_diff | tail -n +3
-                       } >> ${untracked_patch}
-               fi
-       fi
-done
-
-rm -f .tmp_diff
-
-if [ ! -s "${diff_patch}" ]; then
-       rm -f "${diff_patch}"
-       exit
-fi
+# The source tarball, which is generated by 'git archive', contains everything
+# you committed in the repository. If you have local diff ('git diff HEAD'),
+# it will go into ${diff_patch}. If untracked files are remaining, the resulting
+# source package may not be correct.
+#
+# Examples:
+#  - You modified a source file to add #include "new-header.h"
+#    but forgot to add new-header.h
+#  - You modified a Makefile to add 'obj-$(CONFIG_FOO) += new-dirver.o'
+#    but you forgot to add new-driver.c
+#
+# You need to commit them, or at least stage them by 'git add'.
+#
+# This script does not take care of untracked files because doing so would
+# introduce additional complexity. Instead, print a warning message here if
+# untracked files are found.
+# If all untracked files are just garbage, you can ignore this warning.
+echo >&2 "============================ WARNING ============================"
+echo >&2 "Your working tree has diff from HEAD, and also untracked file(s)."
+echo >&2 "Please make sure you did 'git add' for all new files you need in"
+echo >&2 "the source package."
+echo >&2 "================================================================="
index e20a2b5..a4c2c22 100755 (executable)
@@ -84,7 +84,66 @@ set_debarch() {
        fi
 }
 
+# Create debian/source/ if it is a source package build
+gen_source ()
+{
+       mkdir -p debian/source
+
+       echo "3.0 (quilt)" > debian/source/format
+
+       {
+               echo "diff-ignore"
+               echo "extend-diff-ignore = .*"
+       } > debian/source/local-options
+
+       # Add .config as a patch
+       mkdir -p debian/patches
+       {
+               echo "Subject: Add .config"
+               echo "Author: ${maintainer}"
+               echo
+               echo "--- /dev/null"
+               echo "+++ linux/.config"
+               diff -u /dev/null "${KCONFIG_CONFIG}" | tail -n +3
+       } > debian/patches/config.patch
+       echo config.patch > debian/patches/series
+
+       "${srctree}/scripts/package/gen-diff-patch" debian/patches/diff.patch
+       if [ -s debian/patches/diff.patch ]; then
+               sed -i "
+                       1iSubject: Add local diff
+                       1iAuthor: ${maintainer}
+                       1i
+               " debian/patches/diff.patch
+
+               echo diff.patch >> debian/patches/series
+       else
+               rm -f debian/patches/diff.patch
+       fi
+}
+
 rm -rf debian
+mkdir debian
+
+email=${DEBEMAIL-$EMAIL}
+
+# use email string directly if it contains <email>
+if echo "${email}" | grep -q '<.*>'; then
+       maintainer=${email}
+else
+       # or construct the maintainer string
+       user=${KBUILD_BUILD_USER-$(id -nu)}
+       name=${DEBFULLNAME-${user}}
+       if [ -z "${email}" ]; then
+               buildhost=${KBUILD_BUILD_HOST-$(hostname -f 2>/dev/null || hostname)}
+               email="${user}@${buildhost}"
+       fi
+       maintainer="${name} <${email}>"
+fi
+
+if [ "$1" = --need-source ]; then
+       gen_source
+fi
 
 # Some variables and settings used throughout the script
 version=$KERNELRELEASE
@@ -104,22 +163,6 @@ fi
 debarch=
 set_debarch
 
-email=${DEBEMAIL-$EMAIL}
-
-# use email string directly if it contains <email>
-if echo $email | grep -q '<.*>'; then
-       maintainer=$email
-else
-       # or construct the maintainer string
-       user=${KBUILD_BUILD_USER-$(id -nu)}
-       name=${DEBFULLNAME-$user}
-       if [ -z "$email" ]; then
-               buildhost=${KBUILD_BUILD_HOST-$(hostname -f 2>/dev/null || hostname)}
-               email="$user@$buildhost"
-       fi
-       maintainer="$name <$email>"
-fi
-
 # Try to determine distribution
 if [ -n "$KDEB_CHANGELOG_DIST" ]; then
         distribution=$KDEB_CHANGELOG_DIST
@@ -132,34 +175,6 @@ else
         echo >&2 "Install lsb-release or set \$KDEB_CHANGELOG_DIST explicitly"
 fi
 
-mkdir -p debian/source/
-echo "3.0 (quilt)" > debian/source/format
-
-{
-       echo "diff-ignore"
-       echo "extend-diff-ignore = .*"
-} > debian/source/local-options
-
-# Add .config as a patch
-mkdir -p debian/patches
-{
-       echo "Subject: Add .config"
-       echo "Author: ${maintainer}"
-       echo
-       echo "--- /dev/null"
-       echo "+++ linux/.config"
-       diff -u /dev/null "${KCONFIG_CONFIG}" | tail -n +3
-} > debian/patches/config
-echo config > debian/patches/series
-
-$(dirname $0)/gen-diff-patch debian/patches/diff.patch debian/patches/untracked.patch
-if [ -f debian/patches/diff.patch ]; then
-       echo diff.patch >> debian/patches/series
-fi
-if [ -f debian/patches/untracked.patch ]; then
-       echo untracked.patch >> debian/patches/series
-fi
-
 echo $debarch > debian/arch
 extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev:native)"
 extra_build_depends="$extra_build_depends, $(if_enabled_echo CONFIG_SYSTEM_TRUSTED_KEYRING libssl-dev:native)"
index b7d1dc2..fc8ad3f 100755 (executable)
@@ -19,8 +19,7 @@ else
        mkdir -p rpmbuild/SOURCES
        cp linux.tar.gz rpmbuild/SOURCES
        cp "${KCONFIG_CONFIG}" rpmbuild/SOURCES/config
-       $(dirname $0)/gen-diff-patch rpmbuild/SOURCES/diff.patch rpmbuild/SOURCES/untracked.patch
-       touch rpmbuild/SOURCES/diff.patch rpmbuild/SOURCES/untracked.patch
+       "${srctree}/scripts/package/gen-diff-patch" rpmbuild/SOURCES/diff.patch
 fi
 
 if grep -q CONFIG_MODULES=y include/config/auto.conf; then
@@ -56,7 +55,6 @@ sed -e '/^DEL/d' -e 's/^\t*//' <<EOF
 $S     Source0: linux.tar.gz
 $S     Source1: config
 $S     Source2: diff.patch
-$S     Source3: untracked.patch
        Provides: $PROVIDES
 $S     BuildRequires: bc binutils bison dwarves
 $S     BuildRequires: (elfutils-libelf-devel or libelf-devel) flex
@@ -94,12 +92,7 @@ $S$M
 $S     %prep
 $S     %setup -q -n linux
 $S     cp %{SOURCE1} .config
-$S     if [ -s %{SOURCE2} ]; then
-$S             patch -p1 < %{SOURCE2}
-$S     fi
-$S     if [ -s %{SOURCE3} ]; then
-$S             patch -p1 < %{SOURCE3}
-$S     fi
+$S     patch -p1 < %{SOURCE2}
 $S
 $S     %build
 $S     $MAKE %{?_smp_mflags} KERNELRELEASE=$KERNELRELEASE KBUILD_BUILD_VERSION=%{release}
index 8b6aeb8..02fd659 100644 (file)
@@ -2155,6 +2155,8 @@ int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
                ret = substream->ops->ack(substream);
                if (ret < 0) {
                        runtime->control->appl_ptr = old_appl_ptr;
+                       if (ret == -EPIPE)
+                               __snd_pcm_xrun(substream);
                        return ret;
                }
        }
index 53e094c..dfe783d 100644 (file)
@@ -490,7 +490,7 @@ int snd_tscm_stream_start_duplex(struct snd_tscm *tscm, unsigned int rate)
                // packet is important for media clock recovery.
                err = amdtp_domain_start(&tscm->domain, tx_init_skip_cycles, true, true);
                if (err < 0)
-                       return err;
+                       goto error;
 
                if (!amdtp_domain_wait_ready(&tscm->domain, READY_TIMEOUT_MS)) {
                        err = -ETIMEDOUT;
index 65012af..f58b14b 100644 (file)
@@ -561,10 +561,13 @@ int snd_cs8427_iec958_active(struct snd_i2c_device *cs8427, int active)
        if (snd_BUG_ON(!cs8427))
                return -ENXIO;
        chip = cs8427->private_data;
-       if (active)
+       if (active) {
                memcpy(chip->playback.pcm_status,
                       chip->playback.def_status, 24);
-       chip->playback.pcm_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+               chip->playback.pcm_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+       } else {
+               chip->playback.pcm_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+       }
        snd_ctl_notify(cs8427->bus->card,
                       SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
                       &chip->playback.pcm_ctl->id);
index 48af77a..6ec394f 100644 (file)
@@ -1236,7 +1236,7 @@ static int snd_emu10k1_capture_mic_close(struct snd_pcm_substream *substream)
 {
        struct snd_emu10k1 *emu = snd_pcm_substream_chip(substream);
 
-       emu->capture_interrupt = NULL;
+       emu->capture_mic_interrupt = NULL;
        emu->pcm_capture_mic_substream = NULL;
        return 0;
 }
@@ -1344,7 +1344,7 @@ static int snd_emu10k1_capture_efx_close(struct snd_pcm_substream *substream)
 {
        struct snd_emu10k1 *emu = snd_pcm_substream_chip(substream);
 
-       emu->capture_interrupt = NULL;
+       emu->capture_efx_interrupt = NULL;
        emu->pcm_capture_efx_substream = NULL;
        return 0;
 }
@@ -1781,17 +1781,21 @@ int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device)
        struct snd_kcontrol *kctl;
        int err;
 
-       err = snd_pcm_new(emu->card, "emu10k1 efx", device, 8, 1, &pcm);
+       err = snd_pcm_new(emu->card, "emu10k1 efx", device, emu->audigy ? 0 : 8, 1, &pcm);
        if (err < 0)
                return err;
 
        pcm->private_data = emu;
 
-       snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_emu10k1_fx8010_playback_ops);
+       if (!emu->audigy)
+               snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_emu10k1_fx8010_playback_ops);
        snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_emu10k1_capture_efx_ops);
 
        pcm->info_flags = 0;
-       strcpy(pcm->name, "Multichannel Capture/PT Playback");
+       if (emu->audigy)
+               strcpy(pcm->name, "Multichannel Capture");
+       else
+               strcpy(pcm->name, "Multichannel Capture/PT Playback");
        emu->pcm_efx = pcm;
 
        /* EFX capture - record the "FXBUS2" channels, by default we connect the EXTINs 
index 75e1d00..a889ccc 100644 (file)
@@ -980,7 +980,10 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x17aa, 0x3905, "Lenovo G50-30", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
-       SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_PINCFG_LENOVO_NOTEBOOK),
+       /* NOTE: we'd need to extend the quirk for 17aa:3977 as the same
+        * PCI SSID is used on multiple Lenovo models
+        */
+       SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
@@ -1003,6 +1006,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
        { .id = CXT_FIXUP_MUTE_LED_GPIO, .name = "mute-led-gpio" },
        { .id = CXT_FIXUP_HP_ZBOOK_MUTE_LED, .name = "hp-zbook-mute-led" },
        { .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" },
+       { .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" },
        {}
 };
 
index 9ea633f..5c69803 100644 (file)
@@ -81,6 +81,7 @@ struct hdmi_spec_per_pin {
        struct delayed_work work;
        struct hdmi_pcm *pcm; /* pointer to spec->pcm_rec[n] dynamically*/
        int pcm_idx; /* which pcm is attached. -1 means no pcm is attached */
+       int prev_pcm_idx; /* previously assigned pcm index */
        int repoll_count;
        bool setup; /* the stream has been set up by prepare callback */
        bool silent_stream;
@@ -1380,9 +1381,17 @@ static void hdmi_attach_hda_pcm(struct hdmi_spec *spec,
        /* pcm already be attached to the pin */
        if (per_pin->pcm)
                return;
+       /* try the previously used slot at first */
+       idx = per_pin->prev_pcm_idx;
+       if (idx >= 0) {
+               if (!test_bit(idx, &spec->pcm_bitmap))
+                       goto found;
+               per_pin->prev_pcm_idx = -1; /* no longer valid, clear it */
+       }
        idx = hdmi_find_pcm_slot(spec, per_pin);
        if (idx == -EBUSY)
                return;
+ found:
        per_pin->pcm_idx = idx;
        per_pin->pcm = get_hdmi_pcm(spec, idx);
        set_bit(idx, &spec->pcm_bitmap);
@@ -1398,6 +1407,7 @@ static void hdmi_detach_hda_pcm(struct hdmi_spec *spec,
                return;
        idx = per_pin->pcm_idx;
        per_pin->pcm_idx = -1;
+       per_pin->prev_pcm_idx = idx; /* remember the previous index */
        per_pin->pcm = NULL;
        if (idx >= 0 && idx < spec->pcm_used)
                clear_bit(idx, &spec->pcm_bitmap);
@@ -1924,6 +1934,7 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
 
                per_pin->pcm = NULL;
                per_pin->pcm_idx = -1;
+               per_pin->prev_pcm_idx = -1;
                per_pin->pin_nid = pin_nid;
                per_pin->pin_nid_idx = spec->num_nids;
                per_pin->dev_id = i;
@@ -4593,7 +4604,7 @@ HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI",   patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI",  patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862818, "Raptorlake HDMI", patch_i915_tgl_hdmi),
-HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI",        patch_i915_adlp_hdmi),
+HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI",        patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI",        patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_adlp_hdmi),
index f09a1d7..3b9f077 100644 (file)
@@ -2624,6 +2624,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
        SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
        SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
+       SND_PCI_QUIRK(0x1558, 0x3702, "Clevo X370SN[VW]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
@@ -2631,6 +2632,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x65f5, "Clevo PD50PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x66a2, "Clevo PE60RNE", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
@@ -2651,6 +2653,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x96e1, "Clevo P960[ER][CDFN]-K", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x97e2, "Clevo P970RC-M", ALC1220_FIXUP_CLEVO_P950),
+       SND_PCI_QUIRK(0x1558, 0xd502, "Clevo PD50SNE", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
@@ -6957,6 +6960,8 @@ enum {
        ALC269_FIXUP_DELL_M101Z,
        ALC269_FIXUP_SKU_IGNORE,
        ALC269_FIXUP_ASUS_G73JW,
+       ALC269_FIXUP_ASUS_N7601ZM_PINS,
+       ALC269_FIXUP_ASUS_N7601ZM,
        ALC269_FIXUP_LENOVO_EAPD,
        ALC275_FIXUP_SONY_HWEQ,
        ALC275_FIXUP_SONY_DISABLE_AAMIX,
@@ -7253,6 +7258,29 @@ static const struct hda_fixup alc269_fixups[] = {
                        { }
                }
        },
+       [ALC269_FIXUP_ASUS_N7601ZM_PINS] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x03A11050 },
+                       { 0x1a, 0x03A11C30 },
+                       { 0x21, 0x03211420 },
+                       { }
+               }
+       },
+       [ALC269_FIXUP_ASUS_N7601ZM] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       {0x20, AC_VERB_SET_COEF_INDEX, 0x62},
+                       {0x20, AC_VERB_SET_PROC_COEF, 0xa007},
+                       {0x20, AC_VERB_SET_COEF_INDEX, 0x10},
+                       {0x20, AC_VERB_SET_PROC_COEF, 0x8420},
+                       {0x20, AC_VERB_SET_COEF_INDEX, 0x0f},
+                       {0x20, AC_VERB_SET_PROC_COEF, 0x7774},
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_ASUS_N7601ZM_PINS,
+       },
        [ALC269_FIXUP_LENOVO_EAPD] = {
                .type = HDA_FIXUP_VERBS,
                .v.verbs = (const struct hda_verb[]) {
@@ -9260,7 +9288,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0a62, "Dell Precision 5560", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1028, 0x0ac9, "Dell Precision 3260", ALC295_FIXUP_CHROME_BOOK),
+       SND_PCI_QUIRK(0x1028, 0x0ac9, "Dell Precision 3260", ALC283_FIXUP_CHROME_BOOK),
        SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x0b37, "Dell Inspiron 16 Plus 7620 2-in-1", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
@@ -9441,6 +9469,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8b47, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8b66, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8b7a, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b7d, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b87, "HP", ALC236_FIXUP_HP_GPIO_LED),
@@ -9462,6 +9491,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1043, 0x12a3, "Asus N7691ZM", ALC269_FIXUP_ASUS_N7601ZM),
        SND_PCI_QUIRK(0x1043, 0x12af, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
        SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
@@ -9575,6 +9605,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x5101, "Clevo S510WU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x5630, "Clevo NP50RNJS", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x70f2, "Clevo NH79EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -9609,6 +9640,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL5[03]RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -9657,6 +9689,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x22f1, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x17aa, 0x22f2, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x17aa, 0x22f3, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
@@ -9709,6 +9744,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+       SND_PCI_QUIRK(0x17aa, 0x9e56, "Lenovo ZhaoYang CF4620Z", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
        SND_PCI_QUIRK(0x1849, 0xa233, "Positivo Master C6300", ALC269_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
index a794a01..61258b0 100644 (file)
@@ -1707,6 +1707,7 @@ static const struct snd_pci_quirk stac925x_fixup_tbl[] = {
 };
 
 static const struct hda_pintbl ref92hd73xx_pin_configs[] = {
+       // Port A-H
        { 0x0a, 0x02214030 },
        { 0x0b, 0x02a19040 },
        { 0x0c, 0x01a19020 },
@@ -1715,9 +1716,12 @@ static const struct hda_pintbl ref92hd73xx_pin_configs[] = {
        { 0x0f, 0x01014010 },
        { 0x10, 0x01014020 },
        { 0x11, 0x01014030 },
+       // CD in
        { 0x12, 0x02319040 },
+       // Digial Mic ins
        { 0x13, 0x90a000f0 },
        { 0x14, 0x90a000f0 },
+       // Digital outs
        { 0x22, 0x01452050 },
        { 0x23, 0x01452050 },
        {}
@@ -1758,6 +1762,7 @@ static const struct hda_pintbl alienware_m17x_pin_configs[] = {
 };
 
 static const struct hda_pintbl intel_dg45id_pin_configs[] = {
+       // Analog outputs
        { 0x0a, 0x02214230 },
        { 0x0b, 0x02A19240 },
        { 0x0c, 0x01013214 },
@@ -1765,6 +1770,9 @@ static const struct hda_pintbl intel_dg45id_pin_configs[] = {
        { 0x0e, 0x01A19250 },
        { 0x0f, 0x01011212 },
        { 0x10, 0x01016211 },
+       // Digital output
+       { 0x22, 0x01451380 },
+       { 0x23, 0x40f000f0 },
        {}
 };
 
@@ -1955,6 +1963,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
                                "DFI LanParty", STAC_92HD73XX_REF),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101,
                                "DFI LanParty", STAC_92HD73XX_REF),
+       SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5001,
+                               "Intel DP45SG", STAC_92HD73XX_INTEL),
        SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5002,
                                "Intel DG45ID", STAC_92HD73XX_INTEL),
        SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5003,
index 1e198e4..82d4e0f 100644 (file)
@@ -170,7 +170,7 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci,
                return -ENOENT;
        }
 
-       err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
+       err = snd_devm_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
                           sizeof(*chip), &card);
        if (err < 0)
                return err;
index c80114c..b492c32 100644 (file)
@@ -2165,7 +2165,7 @@ static int snd_ymfpci_memalloc(struct snd_ymfpci *chip)
        chip->work_base = ptr;
        chip->work_base_addr = ptr_addr;
        
-       snd_BUG_ON(ptr + chip->work_size !=
+       snd_BUG_ON(ptr + PAGE_ALIGN(chip->work_size) !=
                   chip->work_ptr->area + chip->work_ptr->bytes);
 
        snd_ymfpci_writel(chip, YDSXGR_PLAYCTRLBASE, chip->bank_base_playback_addr);
index 4a69ce7..0acdf01 100644 (file)
@@ -269,6 +269,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "8A43"),
                }
        },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+                       DMI_MATCH(DMI_BOARD_NAME, "8A22"),
+               }
+       },
        {}
 };
 
index 0068780..1c1f211 100644 (file)
@@ -2022,6 +2022,11 @@ static int da7213_i2c_probe(struct i2c_client *i2c)
        return ret;
 }
 
+static void da7213_i2c_remove(struct i2c_client *i2c)
+{
+       pm_runtime_disable(&i2c->dev);
+}
+
 static int __maybe_unused da7213_runtime_suspend(struct device *dev)
 {
        struct da7213_priv *da7213 = dev_get_drvdata(dev);
@@ -2065,6 +2070,7 @@ static struct i2c_driver da7213_i2c_driver = {
                .pm = &da7213_pm,
        },
        .probe_new      = da7213_i2c_probe,
+       .remove         = da7213_i2c_remove,
        .id_table       = da7213_i2c_id,
 };
 
index ed4f7cd..8b6b760 100644 (file)
@@ -436,23 +436,28 @@ static int hdac_hdmi_setup_audio_infoframe(struct hdac_device *hdev,
        return 0;
 }
 
-static int hdac_hdmi_set_tdm_slot(struct snd_soc_dai *dai,
-               unsigned int tx_mask, unsigned int rx_mask,
-               int slots, int slot_width)
+static int hdac_hdmi_set_stream(struct snd_soc_dai *dai,
+                               void *stream, int direction)
 {
        struct hdac_hdmi_priv *hdmi = snd_soc_dai_get_drvdata(dai);
        struct hdac_device *hdev = hdmi->hdev;
        struct hdac_hdmi_dai_port_map *dai_map;
        struct hdac_hdmi_pcm *pcm;
+       struct hdac_stream *hstream;
 
-       dev_dbg(&hdev->dev, "%s: strm_tag: %d\n", __func__, tx_mask);
+       if (!stream)
+               return -EINVAL;
+
+       hstream = (struct hdac_stream *)stream;
+
+       dev_dbg(&hdev->dev, "%s: strm_tag: %d\n", __func__, hstream->stream_tag);
 
        dai_map = &hdmi->dai_map[dai->id];
 
        pcm = hdac_hdmi_get_pcm_from_cvt(hdmi, dai_map->cvt);
 
        if (pcm)
-               pcm->stream_tag = (tx_mask << 4);
+               pcm->stream_tag = (hstream->stream_tag << 4);
 
        return 0;
 }
@@ -1544,7 +1549,7 @@ static const struct snd_soc_dai_ops hdmi_dai_ops = {
        .startup = hdac_hdmi_pcm_open,
        .shutdown = hdac_hdmi_pcm_close,
        .hw_params = hdac_hdmi_set_hw_params,
-       .set_tdm_slot = hdac_hdmi_set_tdm_slot,
+       .set_stream = hdac_hdmi_set_stream,
 };
 
 /*
index a73a7d7..faba423 100644 (file)
@@ -3670,9 +3670,9 @@ static int __maybe_unused rx_macro_runtime_suspend(struct device *dev)
        regcache_cache_only(rx->regmap, true);
        regcache_mark_dirty(rx->regmap);
 
-       clk_disable_unprepare(rx->mclk);
-       clk_disable_unprepare(rx->npl);
        clk_disable_unprepare(rx->fsgen);
+       clk_disable_unprepare(rx->npl);
+       clk_disable_unprepare(rx->mclk);
 
        return 0;
 }
index 473d3cd..589c490 100644 (file)
@@ -2098,9 +2098,9 @@ static int __maybe_unused tx_macro_runtime_suspend(struct device *dev)
        regcache_cache_only(tx->regmap, true);
        regcache_mark_dirty(tx->regmap);
 
-       clk_disable_unprepare(tx->mclk);
-       clk_disable_unprepare(tx->npl);
        clk_disable_unprepare(tx->fsgen);
+       clk_disable_unprepare(tx->npl);
+       clk_disable_unprepare(tx->mclk);
 
        return 0;
 }
index ba7480f..3f6f1bd 100644 (file)
@@ -2506,9 +2506,9 @@ static int __maybe_unused wsa_macro_runtime_suspend(struct device *dev)
        regcache_cache_only(wsa->regmap, true);
        regcache_mark_dirty(wsa->regmap);
 
-       clk_disable_unprepare(wsa->mclk);
-       clk_disable_unprepare(wsa->npl);
        clk_disable_unprepare(wsa->fsgen);
+       clk_disable_unprepare(wsa->npl);
+       clk_disable_unprepare(wsa->mclk);
 
        return 0;
 }
index 79e0039..5a12940 100644 (file)
@@ -533,6 +533,18 @@ static int byt_rt5640_aif1_hw_params(struct snd_pcm_substream *substream,
 
 /* Please keep this list alphabetically sorted */
 static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+       {       /* Acer Iconia One 7 B1-750 */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "VESPA2"),
+               },
+               .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+                                       BYT_RT5640_JD_SRC_JD1_IN4P |
+                                       BYT_RT5640_OVCD_TH_1500UA |
+                                       BYT_RT5640_OVCD_SF_0P75 |
+                                       BYT_RT5640_SSP0_AIF1 |
+                                       BYT_RT5640_MCLK_EN),
+       },
        {       /* Acer Iconia Tab 8 W1-810 */
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
index d2ed807..767fa89 100644 (file)
@@ -213,6 +213,17 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
                                        SOF_SDW_PCH_DMIC |
                                        RT711_JD1),
        },
+       {
+               /* NUC15 'Rooks County' LAPRC510 and LAPRC710 skews */
+               .callback = sof_sdw_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Intel(R) Client Systems"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LAPRC"),
+               },
+               .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+                                       SOF_SDW_PCH_DMIC |
+                                       RT711_JD2_100K),
+       },
        /* TigerLake-SDCA devices */
        {
                .callback = sof_sdw_quirk_cb,
index 28dd204..d8c8004 100644 (file)
@@ -354,6 +354,20 @@ static const struct snd_soc_acpi_link_adr adl_sdw_rt711_link0_rt1316_link3[] = {
        {}
 };
 
+static const struct snd_soc_acpi_link_adr adl_sdw_rt711_link0_rt1316_link2[] = {
+       {
+               .mask = BIT(0),
+               .num_adr = ARRAY_SIZE(rt711_sdca_0_adr),
+               .adr_d = rt711_sdca_0_adr,
+       },
+       {
+               .mask = BIT(2),
+               .num_adr = ARRAY_SIZE(rt1316_2_single_adr),
+               .adr_d = rt1316_2_single_adr,
+       },
+       {}
+};
+
 static const struct snd_soc_acpi_adr_device mx8373_2_adr[] = {
        {
                .adr = 0x000223019F837300ull,
@@ -625,6 +639,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[] = {
                .sof_tplg_filename = "sof-adl-rt711-l0-rt1316-l3.tplg",
        },
        {
+               .link_mask = 0x5, /* 2 active links required */
+               .links = adl_sdw_rt711_link0_rt1316_link2,
+               .drv_name = "sof_sdw",
+               .sof_tplg_filename = "sof-adl-rt711-l0-rt1316-l2.tplg",
+       },
+       {
                .link_mask = 0x1, /* link0 required */
                .links = adl_rvp,
                .drv_name = "sof_sdw",
index 5eb056b..7958c9d 100644 (file)
@@ -1661,10 +1661,14 @@ static void dpcm_runtime_setup_fe(struct snd_pcm_substream *substream)
        struct snd_pcm_hardware *hw = &runtime->hw;
        struct snd_soc_dai *dai;
        int stream = substream->stream;
+       u64 formats = hw->formats;
        int i;
 
        soc_pcm_hw_init(hw);
 
+       if (formats)
+               hw->formats &= formats;
+
        for_each_rtd_cpu_dais(fe, i, dai) {
                struct snd_soc_pcm_stream *cpu_stream;
 
index a623707..669b99a 100644 (file)
@@ -1805,6 +1805,14 @@ static int sof_ipc4_route_setup(struct snd_sof_dev *sdev, struct snd_sof_route *
        u32 header, extension;
        int ret;
 
+       if (!src_fw_module || !sink_fw_module) {
+               /* The NULL module will print as "(efault)" */
+               dev_err(sdev->dev, "source %s or sink %s widget weren't set up properly\n",
+                       src_fw_module->man4_module_entry.name,
+                       sink_fw_module->man4_module_entry.name);
+               return -ENODEV;
+       }
+
        sroute->src_queue_id = sof_ipc4_get_queue_id(src_widget, sink_widget,
                                                     SOF_PIN_TYPE_SOURCE);
        if (sroute->src_queue_id < 0) {
index 8ede4b9..246b56d 100644 (file)
@@ -405,6 +405,9 @@ static int sof_ipc4_tx_msg(struct snd_sof_dev *sdev, void *msg_data, size_t msg_
 static int sof_ipc4_set_get_data(struct snd_sof_dev *sdev, void *data,
                                 size_t payload_bytes, bool set)
 {
+       const struct sof_dsp_power_state target_state = {
+                       .state = SOF_DSP_PM_D0,
+       };
        size_t payload_limit = sdev->ipc->max_payload_size;
        struct sof_ipc4_msg *ipc4_msg = data;
        struct sof_ipc4_msg tx = {{ 0 }};
@@ -435,6 +438,11 @@ static int sof_ipc4_set_get_data(struct snd_sof_dev *sdev, void *data,
 
        tx.extension |= SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK(1);
 
+       /* ensure the DSP is in D0i0 before sending IPC */
+       ret = snd_sof_dsp_set_power_state(sdev, &target_state);
+       if (ret < 0)
+               return ret;
+
        /* Serialise IPC TX */
        mutex_lock(&sdev->ipc->tx_mutex);
 
index 419302e..647fa05 100644 (file)
@@ -455,8 +455,8 @@ static void push_back_to_ready_list(struct snd_usb_endpoint *ep,
  * This function is used both for implicit feedback endpoints and in low-
  * latency playback mode.
  */
-void snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
-                                      bool in_stream_lock)
+int snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
+                                     bool in_stream_lock)
 {
        bool implicit_fb = snd_usb_endpoint_implicit_feedback_sink(ep);
 
@@ -480,7 +480,7 @@ void snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
                spin_unlock_irqrestore(&ep->lock, flags);
 
                if (ctx == NULL)
-                       return;
+                       break;
 
                /* copy over the length information */
                if (implicit_fb) {
@@ -495,11 +495,14 @@ void snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
                        break;
                if (err < 0) {
                        /* push back to ready list again for -EAGAIN */
-                       if (err == -EAGAIN)
+                       if (err == -EAGAIN) {
                                push_back_to_ready_list(ep, ctx);
-                       else
+                               break;
+                       }
+
+                       if (!in_stream_lock)
                                notify_xrun(ep);
-                       return;
+                       return -EPIPE;
                }
 
                err = usb_submit_urb(ctx->urb, GFP_ATOMIC);
@@ -507,13 +510,16 @@ void snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
                        usb_audio_err(ep->chip,
                                      "Unable to submit urb #%d: %d at %s\n",
                                      ctx->index, err, __func__);
-                       notify_xrun(ep);
-                       return;
+                       if (!in_stream_lock)
+                               notify_xrun(ep);
+                       return -EPIPE;
                }
 
                set_bit(ctx->index, &ep->active_mask);
                atomic_inc(&ep->submitted_urbs);
        }
+
+       return 0;
 }
 
 /*
index 924f435..c09f68c 100644 (file)
@@ -52,7 +52,7 @@ int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
 int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep,
                                      struct snd_urb_ctx *ctx, int idx,
                                      unsigned int avail);
-void snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
-                                      bool in_stream_lock);
+int snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
+                                     bool in_stream_lock);
 
 #endif /* __USBAUDIO_ENDPOINT_H */
index 405dc0b..4b1c5ba 100644 (file)
@@ -39,8 +39,12 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
        case UAC_VERSION_1:
        default: {
                struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
-               if (format >= 64)
-                       return 0; /* invalid format */
+               if (format >= 64) {
+                       usb_audio_info(chip,
+                                      "%u:%d: invalid format type 0x%llx is detected, processed as PCM\n",
+                                      fp->iface, fp->altsetting, format);
+                       format = UAC_FORMAT_TYPE_I_PCM;
+               }
                sample_width = fmt->bBitResolution;
                sample_bytes = fmt->bSubframeSize;
                format = 1ULL << format;
index d959da7..eec5232 100644 (file)
@@ -1639,7 +1639,7 @@ static int snd_usb_pcm_playback_ack(struct snd_pcm_substream *substream)
         * outputs here
         */
        if (!ep->active_mask)
-               snd_usb_queue_pending_output_urbs(ep, true);
+               return snd_usb_queue_pending_output_urbs(ep, true);
        return 0;
 }
 
index d4e32b3..00b4ba1 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef __ASM_LOONGARCH_BITSPERLONG_H
 #define __ASM_LOONGARCH_BITSPERLONG_H
 
-#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
+#define __BITS_PER_LONG (__SIZEOF_LONG__ * 8)
 
 #include <asm-generic/bitsperlong.h>
 
index 7271a18..8251a0f 100644 (file)
@@ -167,8 +167,7 @@ void test_xdp_do_redirect(void)
 
        if (!ASSERT_EQ(query_opts.feature_flags,
                       NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
-                      NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
-                      NETDEV_XDP_ACT_NDO_XMIT_SG,
+                      NETDEV_XDP_ACT_RX_SG,
                       "veth_src query_opts.feature_flags"))
                goto out;
 
@@ -178,9 +177,34 @@ void test_xdp_do_redirect(void)
 
        if (!ASSERT_EQ(query_opts.feature_flags,
                       NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+                      NETDEV_XDP_ACT_RX_SG,
+                      "veth_dst query_opts.feature_flags"))
+               goto out;
+
+       /* Enable GRO */
+       SYS("ethtool -K veth_src gro on");
+       SYS("ethtool -K veth_dst gro on");
+
+       err = bpf_xdp_query(ifindex_src, XDP_FLAGS_DRV_MODE, &query_opts);
+       if (!ASSERT_OK(err, "veth_src bpf_xdp_query gro on"))
+               goto out;
+
+       if (!ASSERT_EQ(query_opts.feature_flags,
+                      NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
                       NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
                       NETDEV_XDP_ACT_NDO_XMIT_SG,
-                      "veth_dst query_opts.feature_flags"))
+                      "veth_src query_opts.feature_flags gro on"))
+               goto out;
+
+       err = bpf_xdp_query(ifindex_dst, XDP_FLAGS_DRV_MODE, &query_opts);
+       if (!ASSERT_OK(err, "veth_dst bpf_xdp_query gro on"))
+               goto out;
+
+       if (!ASSERT_EQ(query_opts.feature_flags,
+                      NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+                      NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+                      NETDEV_XDP_ACT_NDO_XMIT_SG,
+                      "veth_dst query_opts.feature_flags gro on"))
                goto out;
 
        memcpy(skel->rodata->expect_dst, &pkt_udp.eth.h_dest, ETH_ALEN);
index aa4beae..8c5e98d 100644 (file)
@@ -273,6 +273,8 @@ static int verify_xsk_metadata(struct xsk *xsk)
        if (!ASSERT_NEQ(meta->rx_hash, 0, "rx_hash"))
                return -1;
 
+       ASSERT_EQ(meta->rx_hash_type, 0, "rx_hash_type");
+
        xsk_ring_cons__release(&xsk->rx, 1);
        refill_rx(xsk, comp_addr);
 
index 4c55b4d..e1c7878 100644 (file)
@@ -12,10 +12,14 @@ struct {
        __type(value, __u32);
 } xsk SEC(".maps");
 
+__u64 pkts_skip = 0;
+__u64 pkts_fail = 0;
+__u64 pkts_redir = 0;
+
 extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx,
                                         __u64 *timestamp) __ksym;
-extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx,
-                                   __u32 *hash) __ksym;
+extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash,
+                                   enum xdp_rss_hash_type *rss_type) __ksym;
 
 SEC("xdp")
 int rx(struct xdp_md *ctx)
@@ -26,7 +30,7 @@ int rx(struct xdp_md *ctx)
        struct udphdr *udp = NULL;
        struct iphdr *iph = NULL;
        struct xdp_meta *meta;
-       int ret;
+       int err;
 
        data = (void *)(long)ctx->data;
        data_end = (void *)(long)ctx->data_end;
@@ -46,17 +50,20 @@ int rx(struct xdp_md *ctx)
                        udp = NULL;
        }
 
-       if (!udp)
+       if (!udp) {
+               __sync_add_and_fetch(&pkts_skip, 1);
                return XDP_PASS;
+       }
 
-       if (udp->dest != bpf_htons(9091))
+       /* Forwarding UDP:9091 to AF_XDP */
+       if (udp->dest != bpf_htons(9091)) {
+               __sync_add_and_fetch(&pkts_skip, 1);
                return XDP_PASS;
+       }
 
-       bpf_printk("forwarding UDP:9091 to AF_XDP");
-
-       ret = bpf_xdp_adjust_meta(ctx, -(int)sizeof(struct xdp_meta));
-       if (ret != 0) {
-               bpf_printk("bpf_xdp_adjust_meta returned %d", ret);
+       err = bpf_xdp_adjust_meta(ctx, -(int)sizeof(struct xdp_meta));
+       if (err) {
+               __sync_add_and_fetch(&pkts_fail, 1);
                return XDP_PASS;
        }
 
@@ -65,20 +72,19 @@ int rx(struct xdp_md *ctx)
        meta = data_meta;
 
        if (meta + 1 > data) {
-               bpf_printk("bpf_xdp_adjust_meta doesn't appear to work");
+               __sync_add_and_fetch(&pkts_fail, 1);
                return XDP_PASS;
        }
 
-       if (!bpf_xdp_metadata_rx_timestamp(ctx, &meta->rx_timestamp))
-               bpf_printk("populated rx_timestamp with %llu", meta->rx_timestamp);
-       else
+       err = bpf_xdp_metadata_rx_timestamp(ctx, &meta->rx_timestamp);
+       if (err)
                meta->rx_timestamp = 0; /* Used by AF_XDP as not avail signal */
 
-       if (!bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash))
-               bpf_printk("populated rx_hash with %u", meta->rx_hash);
-       else
-               meta->rx_hash = 0; /* Used by AF_XDP as not avail signal */
+       err = bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash, &meta->rx_hash_type);
+       if (err < 0)
+               meta->rx_hash_err = err; /* Used by AF_XDP as no hash signal */
 
+       __sync_add_and_fetch(&pkts_redir, 1);
        return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS);
 }
 
index 77678b0..d151d40 100644 (file)
@@ -21,8 +21,8 @@ struct {
 
 extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx,
                                         __u64 *timestamp) __ksym;
-extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx,
-                                   __u32 *hash) __ksym;
+extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash,
+                                   enum xdp_rss_hash_type *rss_type) __ksym;
 
 SEC("xdp")
 int rx(struct xdp_md *ctx)
@@ -56,7 +56,7 @@ int rx(struct xdp_md *ctx)
        if (timestamp == 0)
                meta->rx_timestamp = 1;
 
-       bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash);
+       bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash, &meta->rx_hash_type);
 
        return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS);
 }
index cf69d05..85f88d9 100644 (file)
@@ -5,17 +5,18 @@
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_endian.h>
 
-extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx,
-                                   __u32 *hash) __ksym;
+extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash,
+                                   enum xdp_rss_hash_type *rss_type) __ksym;
 
 int called;
 
 SEC("freplace/rx")
 int freplace_rx(struct xdp_md *ctx)
 {
+       enum xdp_rss_hash_type type = 0;
        u32 hash = 0;
        /* Call _any_ metadata function to make sure we don't crash. */
-       bpf_xdp_metadata_rx_hash(ctx, &hash);
+       bpf_xdp_metadata_rx_hash(ctx, &hash, &type);
        called++;
        return XDP_PASS;
 }
index 1c8acb6..987cf0d 100644 (file)
@@ -141,7 +141,11 @@ static void verify_xdp_metadata(void *data)
        meta = data - sizeof(*meta);
 
        printf("rx_timestamp: %llu\n", meta->rx_timestamp);
-       printf("rx_hash: %u\n", meta->rx_hash);
+       if (meta->rx_hash_err < 0)
+               printf("No rx_hash err=%d\n", meta->rx_hash_err);
+       else
+               printf("rx_hash: 0x%X with RSS type:0x%X\n",
+                      meta->rx_hash, meta->rx_hash_type);
 }
 
 static void verify_skb_metadata(int fd)
@@ -212,7 +216,9 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd)
        while (true) {
                errno = 0;
                ret = poll(fds, rxq + 1, 1000);
-               printf("poll: %d (%d)\n", ret, errno);
+               printf("poll: %d (%d) skip=%llu fail=%llu redir=%llu\n",
+                      ret, errno, bpf_obj->bss->pkts_skip,
+                      bpf_obj->bss->pkts_fail, bpf_obj->bss->pkts_redir);
                if (ret < 0)
                        break;
                if (ret == 0)
index f6780fb..0c4624d 100644 (file)
@@ -12,4 +12,8 @@
 struct xdp_meta {
        __u64 rx_timestamp;
        __u32 rx_hash;
+       union {
+               __u32 rx_hash_type;
+               __s32 rx_hash_err;
+       };
 };
index a39bb25..03f92d7 100644 (file)
@@ -8,11 +8,12 @@ TEST_PROGS := \
        dev_addr_lists.sh \
        mode-1-recovery-updelay.sh \
        mode-2-recovery-updelay.sh \
-       option_prio.sh \
+       bond_options.sh \
        bond-eth-type-change.sh
 
 TEST_FILES := \
        lag_lib.sh \
+       bond_topo_3d1c.sh \
        net_forwarding_lib.sh
 
 include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/bonding/bond_options.sh b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
new file mode 100755 (executable)
index 0000000..db29a31
--- /dev/null
@@ -0,0 +1,264 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test bonding options with mode 1,5,6
+
+ALL_TESTS="
+       prio
+       arp_validate
+"
+
+REQUIRE_MZ=no
+NUM_NETIFS=0
+lib_dir=$(dirname "$0")
+source ${lib_dir}/net_forwarding_lib.sh
+source ${lib_dir}/bond_topo_3d1c.sh
+
+skip_prio()
+{
+       local skip=1
+
+       # check if iproute support prio option
+       ip -n ${s_ns} link set eth0 type bond_slave prio 10
+       [[ $? -ne 0 ]] && skip=0
+
+       # check if kernel support prio option
+       ip -n ${s_ns} -d link show eth0 | grep -q "prio 10"
+       [[ $? -ne 0 ]] && skip=0
+
+       return $skip
+}
+
+skip_ns()
+{
+       local skip=1
+
+       # check if iproute support ns_ip6_target option
+       ip -n ${s_ns} link add bond1 type bond ns_ip6_target ${g_ip6}
+       [[ $? -ne 0 ]] && skip=0
+
+       # check if kernel support ns_ip6_target option
+       ip -n ${s_ns} -d link show bond1 | grep -q "ns_ip6_target ${g_ip6}"
+       [[ $? -ne 0 ]] && skip=0
+
+       ip -n ${s_ns} link del bond1
+
+       return $skip
+}
+
+active_slave=""
+check_active_slave()
+{
+       local target_active_slave=$1
+       active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave")
+       test "$active_slave" = "$target_active_slave"
+       check_err $? "Current active slave is $active_slave but not $target_active_slave"
+}
+
+
+# Test bonding prio option
+prio_test()
+{
+       local param="$1"
+       RET=0
+
+       # create bond
+       bond_reset "${param}"
+
+       # check bonding member prio value
+       ip -n ${s_ns} link set eth0 type bond_slave prio 0
+       ip -n ${s_ns} link set eth1 type bond_slave prio 10
+       ip -n ${s_ns} link set eth2 type bond_slave prio 11
+       cmd_jq "ip -n ${s_ns} -d -j link show eth0" \
+               ".[].linkinfo.info_slave_data | select (.prio == 0)" "-e" &> /dev/null
+       check_err $? "eth0 prio is not 0"
+       cmd_jq "ip -n ${s_ns} -d -j link show eth1" \
+               ".[].linkinfo.info_slave_data | select (.prio == 10)" "-e" &> /dev/null
+       check_err $? "eth1 prio is not 10"
+       cmd_jq "ip -n ${s_ns} -d -j link show eth2" \
+               ".[].linkinfo.info_slave_data | select (.prio == 11)" "-e" &> /dev/null
+       check_err $? "eth2 prio is not 11"
+
+       bond_check_connection "setup"
+
+       # active slave should be the primary slave
+       check_active_slave eth1
+
+       # active slave should be the higher prio slave
+       ip -n ${s_ns} link set $active_slave down
+       bond_check_connection "fail over"
+       check_active_slave eth2
+
+       # when only 1 slave is up
+       ip -n ${s_ns} link set $active_slave down
+       bond_check_connection "only 1 slave up"
+       check_active_slave eth0
+
+       # when a higher prio slave change to up
+       ip -n ${s_ns} link set eth2 up
+       bond_check_connection "higher prio slave up"
+       case $primary_reselect in
+               "0")
+                       check_active_slave "eth2"
+                       ;;
+               "1")
+                       check_active_slave "eth0"
+                       ;;
+               "2")
+                       check_active_slave "eth0"
+                       ;;
+       esac
+       local pre_active_slave=$active_slave
+
+       # when the primary slave change to up
+       ip -n ${s_ns} link set eth1 up
+       bond_check_connection "primary slave up"
+       case $primary_reselect in
+               "0")
+                       check_active_slave "eth1"
+                       ;;
+               "1")
+                       check_active_slave "$pre_active_slave"
+                       ;;
+               "2")
+                       check_active_slave "$pre_active_slave"
+                       ip -n ${s_ns} link set $active_slave down
+                       bond_check_connection "pre_active slave down"
+                       check_active_slave "eth1"
+                       ;;
+       esac
+
+       # Test changing bond slave prio
+       if [[ "$primary_reselect" == "0" ]];then
+               ip -n ${s_ns} link set eth0 type bond_slave prio 1000000
+               ip -n ${s_ns} link set eth1 type bond_slave prio 0
+               ip -n ${s_ns} link set eth2 type bond_slave prio -50
+               ip -n ${s_ns} -d link show eth0 | grep -q 'prio 1000000'
+               check_err $? "eth0 prio is not 1000000"
+               ip -n ${s_ns} -d link show eth1 | grep -q 'prio 0'
+               check_err $? "eth1 prio is not 0"
+               ip -n ${s_ns} -d link show eth2 | grep -q 'prio -50'
+               check_err $? "eth3 prio is not -50"
+               check_active_slave "eth1"
+
+               ip -n ${s_ns} link set $active_slave down
+               bond_check_connection "change slave prio"
+               check_active_slave "eth0"
+       fi
+}
+
+prio_miimon()
+{
+       local primary_reselect
+       local mode=$1
+
+       for primary_reselect in 0 1 2; do
+               prio_test "mode $mode miimon 100 primary eth1 primary_reselect $primary_reselect"
+               log_test "prio" "$mode miimon primary_reselect $primary_reselect"
+       done
+}
+
+prio_arp()
+{
+       local primary_reselect
+       local mode=$1
+
+       for primary_reselect in 0 1 2; do
+               prio_test "mode active-backup arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect"
+               log_test "prio" "$mode arp_ip_target primary_reselect $primary_reselect"
+       done
+}
+
+prio_ns()
+{
+       local primary_reselect
+       local mode=$1
+
+       if skip_ns; then
+               log_test_skip "prio ns" "Current iproute or kernel doesn't support bond option 'ns_ip6_target'."
+               return 0
+       fi
+
+       for primary_reselect in 0 1 2; do
+               prio_test "mode active-backup arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect"
+               log_test "prio" "$mode ns_ip6_target primary_reselect $primary_reselect"
+       done
+}
+
+prio()
+{
+       local mode modes="active-backup balance-tlb balance-alb"
+
+       if skip_prio; then
+               log_test_skip "prio" "Current iproute or kernel doesn't support bond option 'prio'."
+               return 0
+       fi
+
+       for mode in $modes; do
+               prio_miimon $mode
+               prio_arp $mode
+               prio_ns $mode
+       done
+}
+
+arp_validate_test()
+{
+       local param="$1"
+       RET=0
+
+       # create bond
+       bond_reset "${param}"
+
+       bond_check_connection
+       [ $RET -ne 0 ] && log_test "arp_validate" "$retmsg"
+
+       # wait for a while to make sure the mii status stable
+       sleep 5
+       for i in $(seq 0 2); do
+               mii_status=$(cmd_jq "ip -n ${s_ns} -j -d link show eth$i" ".[].linkinfo.info_slave_data.mii_status")
+               if [ ${mii_status} != "UP" ]; then
+                       RET=1
+                       log_test "arp_validate" "interface eth$i mii_status $mii_status"
+               fi
+       done
+}
+
+arp_validate_arp()
+{
+       local mode=$1
+       local val
+       for val in $(seq 0 6); do
+               arp_validate_test "mode $mode arp_interval 100 arp_ip_target ${g_ip4} arp_validate $val"
+               log_test "arp_validate" "$mode arp_ip_target arp_validate $val"
+       done
+}
+
+arp_validate_ns()
+{
+       local mode=$1
+       local val
+
+       if skip_ns; then
+               log_test_skip "arp_validate ns" "Current iproute or kernel doesn't support bond option 'ns_ip6_target'."
+               return 0
+       fi
+
+       for val in $(seq 0 6); do
+               arp_validate_test "mode $mode arp_interval 100 ns_ip6_target ${g_ip6} arp_validate $val"
+               log_test "arp_validate" "$mode ns_ip6_target arp_validate $val"
+       done
+}
+
+arp_validate()
+{
+       arp_validate_arp "active-backup"
+       arp_validate_ns "active-backup"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh b/tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh
new file mode 100644 (file)
index 0000000..4045ca9
--- /dev/null
@@ -0,0 +1,143 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Topology for Bond mode 1,5,6 testing
+#
+#  +-------------------------------------+
+#  |                bond0                |
+#  |                  +                  |  Server
+#  |      eth0        | eth1   eth2      |  192.0.2.1/24
+#  |        +-------------------+        |  2001:db8::1/24
+#  |        |         |         |        |
+#  +-------------------------------------+
+#           |         |         |
+#  +-------------------------------------+
+#  |        |         |         |        |
+#  |    +---+---------+---------+---+    |  Gateway
+#  |    |            br0            |    |  192.0.2.254/24
+#  |    +-------------+-------------+    |  2001:db8::254/24
+#  |                  |                  |
+#  +-------------------------------------+
+#                     |
+#  +-------------------------------------+
+#  |                  |                  |  Client
+#  |                  +                  |  192.0.2.10/24
+#  |                eth0                 |  2001:db8::10/24
+#  +-------------------------------------+
+
+s_ns="s-$(mktemp -u XXXXXX)"
+c_ns="c-$(mktemp -u XXXXXX)"
+g_ns="g-$(mktemp -u XXXXXX)"
+s_ip4="192.0.2.1"
+c_ip4="192.0.2.10"
+g_ip4="192.0.2.254"
+s_ip6="2001:db8::1"
+c_ip6="2001:db8::10"
+g_ip6="2001:db8::254"
+
+gateway_create()
+{
+       ip netns add ${g_ns}
+       ip -n ${g_ns} link add br0 type bridge
+       ip -n ${g_ns} link set br0 up
+       ip -n ${g_ns} addr add ${g_ip4}/24 dev br0
+       ip -n ${g_ns} addr add ${g_ip6}/24 dev br0
+}
+
+gateway_destroy()
+{
+       ip -n ${g_ns} link del br0
+       ip netns del ${g_ns}
+}
+
+server_create()
+{
+       ip netns add ${s_ns}
+       ip -n ${s_ns} link add bond0 type bond mode active-backup miimon 100
+
+       for i in $(seq 0 2); do
+               ip -n ${s_ns} link add eth${i} type veth peer name s${i} netns ${g_ns}
+
+               ip -n ${g_ns} link set s${i} up
+               ip -n ${g_ns} link set s${i} master br0
+               ip -n ${s_ns} link set eth${i} master bond0
+       done
+
+       ip -n ${s_ns} link set bond0 up
+       ip -n ${s_ns} addr add ${s_ip4}/24 dev bond0
+       ip -n ${s_ns} addr add ${s_ip6}/24 dev bond0
+       sleep 2
+}
+
+# Reset bond with new mode and options
+bond_reset()
+{
+       local param="$1"
+
+       ip -n ${s_ns} link set bond0 down
+       ip -n ${s_ns} link del bond0
+
+       ip -n ${s_ns} link add bond0 type bond $param
+       for i in $(seq 0 2); do
+               ip -n ${s_ns} link set eth$i master bond0
+       done
+
+       ip -n ${s_ns} link set bond0 up
+       ip -n ${s_ns} addr add ${s_ip4}/24 dev bond0
+       ip -n ${s_ns} addr add ${s_ip6}/24 dev bond0
+       sleep 2
+}
+
+server_destroy()
+{
+       for i in $(seq 0 2); do
+               ip -n ${s_ns} link del eth${i}
+       done
+       ip netns del ${s_ns}
+}
+
+client_create()
+{
+       ip netns add ${c_ns}
+       ip -n ${c_ns} link add eth0 type veth peer name c0 netns ${g_ns}
+
+       ip -n ${g_ns} link set c0 up
+       ip -n ${g_ns} link set c0 master br0
+
+       ip -n ${c_ns} link set eth0 up
+       ip -n ${c_ns} addr add ${c_ip4}/24 dev eth0
+       ip -n ${c_ns} addr add ${c_ip6}/24 dev eth0
+}
+
+client_destroy()
+{
+       ip -n ${c_ns} link del eth0
+       ip netns del ${c_ns}
+}
+
+setup_prepare()
+{
+       gateway_create
+       server_create
+       client_create
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       client_destroy
+       server_destroy
+       gateway_destroy
+}
+
+bond_check_connection()
+{
+       local msg=${1:-"check connection"}
+
+       sleep 2
+       ip netns exec ${s_ns} ping ${c_ip4} -c5 -i 0.1 &>/dev/null
+       check_err $? "${msg}: ping failed"
+       ip netns exec ${s_ns} ping6 ${c_ip6} -c5 -i 0.1 &>/dev/null
+       check_err $? "${msg}: ping6 failed"
+}
diff --git a/tools/testing/selftests/drivers/net/bonding/option_prio.sh b/tools/testing/selftests/drivers/net/bonding/option_prio.sh
deleted file mode 100755 (executable)
index c32eebf..0000000
+++ /dev/null
@@ -1,245 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-#
-# Test bonding option prio
-#
-
-ALL_TESTS="
-       prio_arp_ip_target_test
-       prio_miimon_test
-"
-
-REQUIRE_MZ=no
-REQUIRE_JQ=no
-NUM_NETIFS=0
-lib_dir=$(dirname "$0")
-source "$lib_dir"/net_forwarding_lib.sh
-
-destroy()
-{
-       ip link del bond0 &>/dev/null
-       ip link del br0 &>/dev/null
-       ip link del veth0 &>/dev/null
-       ip link del veth1 &>/dev/null
-       ip link del veth2 &>/dev/null
-       ip netns del ns1 &>/dev/null
-       ip link del veth3 &>/dev/null
-}
-
-cleanup()
-{
-       pre_cleanup
-
-       destroy
-}
-
-skip()
-{
-        local skip=1
-       ip link add name bond0 type bond mode 1 miimon 100 &>/dev/null
-       ip link add name veth0 type veth peer name veth0_p
-       ip link set veth0 master bond0
-
-       # check if iproute support prio option
-       ip link set dev veth0 type bond_slave prio 10
-       [[ $? -ne 0 ]] && skip=0
-
-       # check if bonding support prio option
-       ip -d link show veth0 | grep -q "prio 10"
-       [[ $? -ne 0 ]] && skip=0
-
-       ip link del bond0 &>/dev/null
-       ip link del veth0
-
-       return $skip
-}
-
-active_slave=""
-check_active_slave()
-{
-       local target_active_slave=$1
-       active_slave="$(cat /sys/class/net/bond0/bonding/active_slave)"
-       test "$active_slave" = "$target_active_slave"
-       check_err $? "Current active slave is $active_slave but not $target_active_slave"
-}
-
-
-# Test bonding prio option with mode=$mode monitor=$monitor
-# and primary_reselect=$primary_reselect
-prio_test()
-{
-       RET=0
-
-       local monitor=$1
-       local mode=$2
-       local primary_reselect=$3
-
-       local bond_ip4="192.169.1.2"
-       local peer_ip4="192.169.1.1"
-       local bond_ip6="2009:0a:0b::02"
-       local peer_ip6="2009:0a:0b::01"
-
-
-       # create veths
-       ip link add name veth0 type veth peer name veth0_p
-       ip link add name veth1 type veth peer name veth1_p
-       ip link add name veth2 type veth peer name veth2_p
-
-       # create bond
-       if [[ "$monitor" == "miimon" ]];then
-               ip link add name bond0 type bond mode $mode miimon 100 primary veth1 primary_reselect $primary_reselect
-       elif [[ "$monitor" == "arp_ip_target" ]];then
-               ip link add name bond0 type bond mode $mode arp_interval 1000 arp_ip_target $peer_ip4 primary veth1 primary_reselect $primary_reselect
-       elif [[ "$monitor" == "ns_ip6_target" ]];then
-               ip link add name bond0 type bond mode $mode arp_interval 1000 ns_ip6_target $peer_ip6 primary veth1 primary_reselect $primary_reselect
-       fi
-       ip link set bond0 up
-       ip link set veth0 master bond0
-       ip link set veth1 master bond0
-       ip link set veth2 master bond0
-       # check bonding member prio value
-       ip link set dev veth0 type bond_slave prio 0
-       ip link set dev veth1 type bond_slave prio 10
-       ip link set dev veth2 type bond_slave prio 11
-       ip -d link show veth0 | grep -q 'prio 0'
-       check_err $? "veth0 prio is not 0"
-       ip -d link show veth1 | grep -q 'prio 10'
-       check_err $? "veth0 prio is not 10"
-       ip -d link show veth2 | grep -q 'prio 11'
-       check_err $? "veth0 prio is not 11"
-
-       ip link set veth0 up
-       ip link set veth1 up
-       ip link set veth2 up
-       ip link set veth0_p up
-       ip link set veth1_p up
-       ip link set veth2_p up
-
-       # prepare ping target
-       ip link add name br0 type bridge
-       ip link set br0 up
-       ip link set veth0_p master br0
-       ip link set veth1_p master br0
-       ip link set veth2_p master br0
-       ip link add name veth3 type veth peer name veth3_p
-       ip netns add ns1
-       ip link set veth3_p master br0 up
-       ip link set veth3 netns ns1 up
-       ip netns exec ns1 ip addr add $peer_ip4/24 dev veth3
-       ip netns exec ns1 ip addr add $peer_ip6/64 dev veth3
-       ip addr add $bond_ip4/24 dev bond0
-       ip addr add $bond_ip6/64 dev bond0
-       sleep 5
-
-       ping $peer_ip4 -c5 -I bond0 &>/dev/null
-       check_err $? "ping failed 1."
-       ping6 $peer_ip6 -c5 -I bond0 &>/dev/null
-       check_err $? "ping6 failed 1."
-
-       # active salve should be the primary slave
-       check_active_slave veth1
-
-       # active slave should be the higher prio slave
-       ip link set $active_slave down
-       ping $peer_ip4 -c5 -I bond0 &>/dev/null
-       check_err $? "ping failed 2."
-       check_active_slave veth2
-
-       # when only 1 slave is up
-       ip link set $active_slave down
-       ping $peer_ip4 -c5 -I bond0 &>/dev/null
-       check_err $? "ping failed 3."
-       check_active_slave veth0
-
-       # when a higher prio slave change to up
-       ip link set veth2 up
-       ping $peer_ip4 -c5 -I bond0 &>/dev/null
-       check_err $? "ping failed 4."
-       case $primary_reselect in
-               "0")
-                       check_active_slave "veth2"
-                       ;;
-               "1")
-                       check_active_slave "veth0"
-                       ;;
-               "2")
-                       check_active_slave "veth0"
-                       ;;
-       esac
-       local pre_active_slave=$active_slave
-
-       # when the primary slave change to up
-       ip link set veth1 up
-       ping $peer_ip4 -c5 -I bond0 &>/dev/null
-       check_err $? "ping failed 5."
-       case $primary_reselect in
-               "0")
-                       check_active_slave "veth1"
-                       ;;
-               "1")
-                       check_active_slave "$pre_active_slave"
-                       ;;
-               "2")
-                       check_active_slave "$pre_active_slave"
-                       ip link set $active_slave down
-                       ping $peer_ip4 -c5 -I bond0 &>/dev/null
-                       check_err $? "ping failed 6."
-                       check_active_slave "veth1"
-                       ;;
-       esac
-
-       # Test changing bond salve prio
-       if [[ "$primary_reselect" == "0" ]];then
-               ip link set dev veth0 type bond_slave prio 1000000
-               ip link set dev veth1 type bond_slave prio 0
-               ip link set dev veth2 type bond_slave prio -50
-               ip -d link show veth0 | grep -q 'prio 1000000'
-               check_err $? "veth0 prio is not 1000000"
-               ip -d link show veth1 | grep -q 'prio 0'
-               check_err $? "veth1 prio is not 0"
-               ip -d link show veth2 | grep -q 'prio -50'
-               check_err $? "veth3 prio is not -50"
-               check_active_slave "veth1"
-
-               ip link set $active_slave down
-               ping $peer_ip4 -c5 -I bond0 &>/dev/null
-               check_err $? "ping failed 7."
-               check_active_slave "veth0"
-       fi
-
-       cleanup
-
-       log_test "prio_test" "Test bonding option 'prio' with mode=$mode monitor=$monitor and primary_reselect=$primary_reselect"
-}
-
-prio_miimon_test()
-{
-       local mode
-       local primary_reselect
-
-       for mode in 1 5 6; do
-               for primary_reselect in 0 1 2; do
-                       prio_test "miimon" $mode $primary_reselect
-               done
-       done
-}
-
-prio_arp_ip_target_test()
-{
-       local primary_reselect
-
-       for primary_reselect in 0 1 2; do
-               prio_test "arp_ip_target" 1 $primary_reselect
-       done
-}
-
-if skip;then
-       log_test_skip "option_prio.sh" "Current iproute doesn't support 'prio'."
-       exit 0
-fi
-
-trap cleanup EXIT
-
-tests_run
-
-exit "$EXIT_STATUS"
index 582669c..c6a8c73 100644 (file)
@@ -18,6 +18,7 @@
 #include <grp.h>
 #include <stdbool.h>
 #include <stdarg.h>
+#include <linux/mount.h>
 
 #include "../kselftest_harness.h"
 
index cc9fd55..2529226 100644 (file)
@@ -48,3 +48,4 @@ CONFIG_BAREUDP=m
 CONFIG_IPV6_IOAM6_LWTUNNEL=y
 CONFIG_CRYPTO_SM4_GENERIC=y
 CONFIG_AMT=m
+CONFIG_IP_SCTP=m
index 48e52f9..b1eb7bc 100755 (executable)
@@ -913,6 +913,7 @@ test_listener()
                $client4_port > /dev/null 2>&1 &
        local listener_pid=$!
 
+       sleep 0.5
        verify_listener_events $client_evts $LISTENER_CREATED $AF_INET 10.0.2.2 $client4_port
 
        # ADD_ADDR from client to server machine reusing the subflow port
@@ -928,6 +929,7 @@ test_listener()
        # Delete the listener from the client ns, if one was created
        kill_wait $listener_pid
 
+       sleep 0.5
        verify_listener_events $client_evts $LISTENER_CLOSED $AF_INET 10.0.2.2 $client4_port
 }
 
index 3243c90..5d467d1 100644 (file)
@@ -62,7 +62,7 @@ class OvsDatapath(GenericNetlinkSocket):
         nla_map = (
             ("OVS_DP_ATTR_UNSPEC", "none"),
             ("OVS_DP_ATTR_NAME", "asciiz"),
-            ("OVS_DP_ATTR_UPCALL_PID", "uint32"),
+            ("OVS_DP_ATTR_UPCALL_PID", "array(uint32)"),
             ("OVS_DP_ATTR_STATS", "dpstats"),
             ("OVS_DP_ATTR_MEGAFLOW_STATS", "megaflowstats"),
             ("OVS_DP_ATTR_USER_FEATURES", "uint32"),
index 0fd0d2d..a26c562 100755 (executable)
@@ -60,6 +60,7 @@ ip link set dev $VETH up
 ip -n $NETNS link set dev $VETH up
 chk_rps "changing rps_default_mask affect newly created devices" "" $VETH 3
 chk_rps "changing rps_default_mask don't affect newly child netns[II]" $NETNS $VETH 0
+ip link del dev $VETH
 ip netns del $NETNS
 
 setup
diff --git a/tools/testing/selftests/sigaltstack/current_stack_pointer.h b/tools/testing/selftests/sigaltstack/current_stack_pointer.h
new file mode 100644 (file)
index 0000000..ea9bdf3
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if __alpha__
+register unsigned long sp asm("$30");
+#elif __arm__ || __aarch64__ || __csky__ || __m68k__ || __mips__ || __riscv
+register unsigned long sp asm("sp");
+#elif __i386__
+register unsigned long sp asm("esp");
+#elif __loongarch64
+register unsigned long sp asm("$sp");
+#elif __ppc__
+register unsigned long sp asm("r1");
+#elif __s390x__
+register unsigned long sp asm("%15");
+#elif __sh__
+register unsigned long sp asm("r15");
+#elif __x86_64__
+register unsigned long sp asm("rsp");
+#elif __XTENSA__
+register unsigned long sp asm("a1");
+#else
+#error "implement current_stack_pointer equivalent"
+#endif
index c53b070..98d37cb 100644 (file)
@@ -20,6 +20,7 @@
 #include <sys/auxv.h>
 
 #include "../kselftest.h"
+#include "current_stack_pointer.h"
 
 #ifndef SS_AUTODISARM
 #define SS_AUTODISARM  (1U << 31)
@@ -46,12 +47,6 @@ void my_usr1(int sig, siginfo_t *si, void *u)
        stack_t stk;
        struct stk_data *p;
 
-#if __s390x__
-       register unsigned long sp asm("%15");
-#else
-       register unsigned long sp asm("sp");
-#endif
-
        if (sp < (unsigned long)sstack ||
                        sp >= (unsigned long)sstack + stack_size) {
                ksft_exit_fail_msg("SP is not on sigaltstack\n");
index 3de10db..12b97c9 100644 (file)
@@ -968,6 +968,91 @@ static void test_seqpacket_inv_buf_server(const struct test_opts *opts)
        test_inv_buf_server(opts, false);
 }
 
+#define HELLO_STR "HELLO"
+#define WORLD_STR "WORLD"
+
+static void test_stream_virtio_skb_merge_client(const struct test_opts *opts)
+{
+       ssize_t res;
+       int fd;
+
+       fd = vsock_stream_connect(opts->peer_cid, 1234);
+       if (fd < 0) {
+               perror("connect");
+               exit(EXIT_FAILURE);
+       }
+
+       /* Send first skbuff. */
+       res = send(fd, HELLO_STR, strlen(HELLO_STR), 0);
+       if (res != strlen(HELLO_STR)) {
+               fprintf(stderr, "unexpected send(2) result %zi\n", res);
+               exit(EXIT_FAILURE);
+       }
+
+       control_writeln("SEND0");
+       /* Peer reads part of first skbuff. */
+       control_expectln("REPLY0");
+
+       /* Send second skbuff, it will be appended to the first. */
+       res = send(fd, WORLD_STR, strlen(WORLD_STR), 0);
+       if (res != strlen(WORLD_STR)) {
+               fprintf(stderr, "unexpected send(2) result %zi\n", res);
+               exit(EXIT_FAILURE);
+       }
+
+       control_writeln("SEND1");
+       /* Peer reads merged skbuff packet. */
+       control_expectln("REPLY1");
+
+       close(fd);
+}
+
+static void test_stream_virtio_skb_merge_server(const struct test_opts *opts)
+{
+       unsigned char buf[64];
+       ssize_t res;
+       int fd;
+
+       fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL);
+       if (fd < 0) {
+               perror("accept");
+               exit(EXIT_FAILURE);
+       }
+
+       control_expectln("SEND0");
+
+       /* Read skbuff partially. */
+       res = recv(fd, buf, 2, 0);
+       if (res != 2) {
+               fprintf(stderr, "expected recv(2) returns 2 bytes, got %zi\n", res);
+               exit(EXIT_FAILURE);
+       }
+
+       control_writeln("REPLY0");
+       control_expectln("SEND1");
+
+       res = recv(fd, buf + 2, sizeof(buf) - 2, 0);
+       if (res != 8) {
+               fprintf(stderr, "expected recv(2) returns 8 bytes, got %zi\n", res);
+               exit(EXIT_FAILURE);
+       }
+
+       res = recv(fd, buf, sizeof(buf) - 8 - 2, MSG_DONTWAIT);
+       if (res != -1) {
+               fprintf(stderr, "expected recv(2) failure, got %zi\n", res);
+               exit(EXIT_FAILURE);
+       }
+
+       if (memcmp(buf, HELLO_STR WORLD_STR, strlen(HELLO_STR WORLD_STR))) {
+               fprintf(stderr, "pattern mismatch\n");
+               exit(EXIT_FAILURE);
+       }
+
+       control_writeln("REPLY1");
+
+       close(fd);
+}
+
 static struct test_case test_cases[] = {
        {
                .name = "SOCK_STREAM connection reset",
@@ -1038,6 +1123,11 @@ static struct test_case test_cases[] = {
                .run_client = test_seqpacket_inv_buf_client,
                .run_server = test_seqpacket_inv_buf_server,
        },
+       {
+               .name = "SOCK_STREAM virtio skb merge",
+               .run_client = test_stream_virtio_skb_merge_client,
+               .run_server = test_stream_virtio_skb_merge_server,
+       },
        {},
 };
 
index b64845b..4fb9368 100644 (file)
@@ -61,7 +61,7 @@ and
       id=channel0,name=agent-ctl-path\
  ##data path##
      -chardev pipe,id=charchannel1,path=/tmp/virtio-trace/trace-path-cpu0\
-     -device virtserialport,bus=virtio-serial0.0,nr=2,chardev=charchannel0,\
+     -device virtserialport,bus=virtio-serial0.0,nr=2,chardev=charchannel1,\
       id=channel1,name=trace-path-cpu0\
       ...
 
index ee01e40..6123053 100644 (file)
@@ -353,6 +353,12 @@ static int cpio_mkfile(const char *name, const char *location,
                buf.st_mtime = 0xffffffff;
        }
 
+       if (buf.st_mtime < 0) {
+               fprintf(stderr, "%s: Timestamp negative, clipping.\n",
+                       location);
+               buf.st_mtime = 0;
+       }
+
        if (buf.st_size > 0xffffffff) {
                fprintf(stderr, "%s: Size exceeds maximum cpio file size\n",
                        location);
@@ -602,10 +608,10 @@ int main (int argc, char *argv[])
        /*
         * Timestamps after 2106-02-07 06:28:15 UTC have an ascii hex time_t
         * representation that exceeds 8 chars and breaks the cpio header
-        * specification.
+        * specification. Negative timestamps similarly exceed 8 chars.
         */
-       if (default_mtime > 0xffffffff) {
-               fprintf(stderr, "ERROR: Timestamp too large for cpio format\n");
+       if (default_mtime > 0xffffffff || default_mtime < 0) {
+               fprintf(stderr, "ERROR: Timestamp out of range for cpio format\n");
                exit(1);
        }
 
index 2a3ed40..b0af834 100644 (file)
@@ -55,6 +55,15 @@ irqfd_inject(struct work_struct *work)
                            irqfd->gsi, 1, false);
 }
 
+static void irqfd_resampler_notify(struct kvm_kernel_irqfd_resampler *resampler)
+{
+       struct kvm_kernel_irqfd *irqfd;
+
+       list_for_each_entry_srcu(irqfd, &resampler->list, resampler_link,
+                                srcu_read_lock_held(&resampler->kvm->irq_srcu))
+               eventfd_signal(irqfd->resamplefd, 1);
+}
+
 /*
  * Since resampler irqfds share an IRQ source ID, we de-assert once
  * then notify all of the resampler irqfds using this GSI.  We can't
@@ -65,7 +74,6 @@ irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
 {
        struct kvm_kernel_irqfd_resampler *resampler;
        struct kvm *kvm;
-       struct kvm_kernel_irqfd *irqfd;
        int idx;
 
        resampler = container_of(kian,
@@ -76,11 +84,7 @@ irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
                    resampler->notifier.gsi, 0, false);
 
        idx = srcu_read_lock(&kvm->irq_srcu);
-
-       list_for_each_entry_srcu(irqfd, &resampler->list, resampler_link,
-           srcu_read_lock_held(&kvm->irq_srcu))
-               eventfd_signal(irqfd->resamplefd, 1);
-
+       irqfd_resampler_notify(resampler);
        srcu_read_unlock(&kvm->irq_srcu, idx);
 }
 
@@ -96,8 +100,12 @@ irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
        synchronize_srcu(&kvm->irq_srcu);
 
        if (list_empty(&resampler->list)) {
-               list_del(&resampler->link);
+               list_del_rcu(&resampler->link);
                kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
+               /*
+                * synchronize_srcu(&kvm->irq_srcu) already called
+                * in kvm_unregister_irq_ack_notifier().
+                */
                kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
                            resampler->notifier.gsi, 0, false);
                kfree(resampler);
@@ -369,7 +377,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
                        resampler->notifier.irq_acked = irqfd_resampler_ack;
                        INIT_LIST_HEAD(&resampler->link);
 
-                       list_add(&resampler->link, &kvm->irqfds.resampler_list);
+                       list_add_rcu(&resampler->link, &kvm->irqfds.resampler_list);
                        kvm_register_irq_ack_notifier(kvm,
                                                      &resampler->notifier);
                        irqfd->resampler = resampler;
@@ -644,6 +652,31 @@ void kvm_irq_routing_update(struct kvm *kvm)
        spin_unlock_irq(&kvm->irqfds.lock);
 }
 
+bool kvm_notify_irqfd_resampler(struct kvm *kvm,
+                               unsigned int irqchip,
+                               unsigned int pin)
+{
+       struct kvm_kernel_irqfd_resampler *resampler;
+       int gsi, idx;
+
+       idx = srcu_read_lock(&kvm->irq_srcu);
+       gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
+       if (gsi != -1) {
+               list_for_each_entry_srcu(resampler,
+                                        &kvm->irqfds.resampler_list, link,
+                                        srcu_read_lock_held(&kvm->irq_srcu)) {
+                       if (resampler->notifier.gsi == gsi) {
+                               irqfd_resampler_notify(resampler);
+                               srcu_read_unlock(&kvm->irq_srcu, idx);
+                               return true;
+                       }
+               }
+       }
+       srcu_read_unlock(&kvm->irq_srcu, idx);
+
+       return false;
+}
+
 /*
  * create a host-wide workqueue for issuing deferred shutdown requests
  * aggregated from all vm* instances. We need our own isolated
index d255964..b1679d0 100644 (file)
@@ -4479,7 +4479,6 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
 #endif
 #ifdef CONFIG_HAVE_KVM_IRQFD
        case KVM_CAP_IRQFD:
-       case KVM_CAP_IRQFD_RESAMPLE:
 #endif
        case KVM_CAP_IOEVENTFD_ANY_LENGTH:
        case KVM_CAP_CHECK_EXTENSION_VM: