Merge tag 'qcom-drivers-for-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorArnd Bergmann <arnd@arndb.de>
Wed, 18 Aug 2021 13:33:01 +0000 (15:33 +0200)
committerArnd Bergmann <arnd@arndb.de>
Wed, 18 Aug 2021 13:33:02 +0000 (15:33 +0200)
Qualcomm driver updates for v5.15

This fixes the "shared memory state machine" (SMSM) interrupt logic to
avoid missing transitions happening while the interrupts are masked.

SM6115 support is added to smd-rpm and rpmpd.

The Qualcomm SCM firmware driver is once again made possible to compile
and load as a kernel module.

An out-of-bounds error related to the cooling devices of the AOSS driver
is corrected. The binding is converted to YAML and a generic compatible
is introduced to reduce the driver churn.

The GENI wrapper gains a helper function used in I2C and SPI for
switching the serial engine hardware to use the wrapper's DMA-engine.

Lastly it contains a number of cleanups and smaller fixes for rpmhpd,
socinfo, CPR, mdt_loader and the GENI DT binding.

* tag 'qcom-drivers-for-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux:
  soc: qcom: smsm: Fix missed interrupts if state changes while masked
  soc: qcom: smsm: Implement support for get_irqchip_state
  soc: qcom: mdt_loader: be more informative on errors
  dt-bindings: qcom: geni-se: document iommus
  soc: qcom: smd-rpm: Add SM6115 compatible
  soc: qcom: geni: Add support for gpi dma
  soc: qcom: geni: move GENI_IF_DISABLE_RO to common header
  PM: AVS: qcom-cpr: Use nvmem_cell_read_variable_le_u32()
  drivers: soc: qcom: rpmpd: Add SM6115 RPM Power Domains
  dt-bindings: power: rpmpd: Add SM6115 to rpmpd binding
  dt-bindings: soc: qcom: smd-rpm: Add SM6115 compatible
  soc: qcom: aoss: Fix the out of bound usage of cooling_devs
  firmware: qcom_scm: Allow qcom_scm driver to be loadable as a permenent module
  soc: qcom: socinfo: Don't print anything if nothing found
  soc: qcom: rpmhpd: Use corner in power_off
  soc: qcom: aoss: Add generic compatible
  dt-bindings: soc: qcom: aoss: Convert to YAML
  dt-bindings: soc: qcom: aoss: Add SC8180X and generic compatible
  firmware: qcom_scm: remove a duplicative condition
  firmware: qcom_scm: Mark string array const

Link: https://lore.kernel.org/r/20210816214840.581244-1-bjorn.andersson@linaro.org
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
1008 files changed:
Documentation/ABI/testing/sysfs-ptp
Documentation/arm64/tagged-address-abi.rst
Documentation/dev-tools/kunit/running_tips.rst
Documentation/devicetree/bindings/ata/intel,ixp4xx-compact-flash.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/bus/intel,ixp4xx-expansion-bus-controller.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/clock/brcm,iproc-clocks.yaml
Documentation/devicetree/bindings/display/renesas,du.yaml
Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
Documentation/devicetree/bindings/firmware/arm,scmi.yaml
Documentation/devicetree/bindings/hwmon/adt7475.yaml
Documentation/devicetree/bindings/iommu/arm,smmu.yaml
Documentation/devicetree/bindings/iommu/rockchip,iommu.yaml
Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml
Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml
Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
Documentation/devicetree/bindings/net/gpmc-eth.txt
Documentation/devicetree/bindings/net/imx-dwmac.txt [deleted file]
Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/net/smsc,lan9115.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/net/smsc911x.txt [deleted file]
Documentation/devicetree/bindings/net/snps,dwmac.yaml
Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml
Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml
Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
Documentation/devicetree/bindings/rtc/faraday,ftrtc010.yaml
Documentation/devicetree/bindings/soc/ti/ti,pruss.yaml
Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
Documentation/devicetree/bindings/spi/spi-controller.yaml
Documentation/devicetree/bindings/usb/nxp,isp1760.yaml
Documentation/driver-api/early-userspace/early_userspace_support.rst
Documentation/features/core/thread-info-in-task/arch-support.txt [new file with mode: 0644]
Documentation/features/time/arch-tick-broadcast/arch-support.txt
Documentation/filesystems/ramfs-rootfs-initramfs.rst
Documentation/networking/af_xdp.rst
Documentation/networking/ethtool-netlink.rst
Documentation/networking/ip-sysctl.rst
Documentation/networking/nf_conntrack-sysctl.rst
Documentation/networking/tipc.rst
Documentation/trace/histogram.rst
Documentation/translations/zh_CN/process/2.Process.rst
Documentation/virt/kvm/api.rst
LICENSES/dual/CC-BY-4.0
MAINTAINERS
Makefile
arch/alpha/Kconfig
arch/alpha/boot/bootp.c
arch/alpha/boot/bootpz.c
arch/alpha/boot/misc.c
arch/alpha/configs/defconfig
arch/alpha/include/asm/compiler.h
arch/alpha/include/asm/syscall.h
arch/alpha/kernel/osf_sys.c
arch/alpha/kernel/perf_event.c
arch/alpha/kernel/process.c
arch/alpha/kernel/setup.c
arch/alpha/kernel/smp.c
arch/alpha/kernel/sys_nautilus.c
arch/alpha/kernel/traps.c
arch/alpha/math-emu/math.c
arch/arm/Kconfig
arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts
arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
arch/arm/boot/dts/imx6q.dtsi
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
arch/arm/boot/dts/versatile-ab.dts
arch/arm/boot/dts/versatile-pb.dts
arch/arm/configs/integrator_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/realview_defconfig
arch/arm/configs/shmobile_defconfig
arch/arm/configs/u8500_defconfig
arch/arm/configs/versatile_defconfig
arch/arm/configs/vexpress_defconfig
arch/arm/mach-davinci/Kconfig
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-rpc/riscpc.c
arch/arm/mach-tegra/pm.c
arch/arm/mach-tegra/pm.h
arch/arm/mach-tegra/tegra.c
arch/arm/net/bpf_jit_32.c
arch/arm64/Kconfig
arch/arm64/boot/dts/freescale/imx8mp.dtsi
arch/arm64/boot/dts/nvidia/tegra194.dtsi
arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
arch/arm64/boot/dts/qcom/ipq8074.dtsi
arch/arm64/boot/dts/qcom/msm8996.dtsi
arch/arm64/boot/dts/qcom/msm8998.dtsi
arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
arch/arm64/boot/dts/qcom/qcs404.dtsi
arch/arm64/boot/dts/qcom/sc7180.dtsi
arch/arm64/boot/dts/qcom/sdm845.dtsi
arch/arm64/boot/dts/qcom/sm8150.dtsi
arch/arm64/boot/dts/renesas/r9a07g044.dtsi
arch/arm64/include/asm/cache.h
arch/arm64/include/asm/smp_plat.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry-common.c
arch/arm64/kernel/mte.c
arch/arm64/kernel/smccc-call.S
arch/arm64/kvm/mmu.c
arch/arm64/lib/copy_from_user.S
arch/arm64/lib/copy_in_user.S
arch/arm64/lib/copy_to_user.S
arch/arm64/lib/strlen.S
arch/arm64/mm/mmu.c
arch/arm64/net/bpf_jit_comp.c
arch/h8300/Kconfig.cpu
arch/ia64/Kconfig
arch/m68k/Kconfig
arch/m68k/Kconfig.machine
arch/m68k/coldfire/m525x.c
arch/mips/Kconfig
arch/mips/include/asm/fpu.h
arch/mips/mm/tlbex.c
arch/mips/net/ebpf_jit.c
arch/nds32/mm/mmap.c
arch/parisc/Kconfig
arch/powerpc/Kconfig
arch/powerpc/kernel/vdso64/Makefile
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_nested.c
arch/powerpc/kvm/book3s_hv_p9_entry.c
arch/powerpc/kvm/book3s_rtas.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/mm/nohash/8xx.c
arch/powerpc/net/bpf_jit_comp32.c
arch/powerpc/net/bpf_jit_comp64.c
arch/powerpc/platforms/pasemi/idle.c
arch/powerpc/platforms/powermac/smp.c
arch/powerpc/platforms/pseries/setup.c
arch/riscv/include/asm/efi.h
arch/riscv/kernel/stacktrace.c
arch/riscv/lib/uaccess.S
arch/riscv/mm/init.c
arch/riscv/net/bpf_jit_comp32.c
arch/riscv/net/bpf_jit_comp64.c
arch/s390/boot/text_dma.S
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/ftrace.h
arch/s390/include/asm/kvm_host.h
arch/s390/kernel/ftrace.c
arch/s390/kernel/mcount.S
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/uprobes.c
arch/s390/kernel/vdso32/Makefile
arch/s390/kvm/diag.c
arch/s390/kvm/kvm-s390.c
arch/s390/net/bpf_jit_comp.c
arch/sh/Kconfig
arch/sparc/Kconfig
arch/sparc/net/bpf_jit_comp_64.c
arch/x86/Kconfig
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/jump_label.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/ioapic.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging.h [new file with mode: 0644]
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/mmu/spte.h
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/svm/svm_onhyperv.h
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/mm/pgtable.c
arch/x86/net/bpf_jit_comp.c
arch/x86/net/bpf_jit_comp32.c
arch/xtensa/Kconfig
block/blk-iocost.c
block/blk-mq-sched.c
block/genhd.c
drivers/acpi/Kconfig
drivers/acpi/dptf/dptf_pch_fivr.c
drivers/acpi/resource.c
drivers/acpi/utils.c
drivers/acpi/x86/s2idle.c
drivers/ata/libata-sff.c
drivers/ata/pata_ixp4xx_cf.c
drivers/base/auxiliary.c
drivers/base/core.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/paride/pd.c
drivers/block/rbd.c
drivers/block/xen-blkfront.c
drivers/bus/Kconfig
drivers/bus/Makefile
drivers/bus/intel-ixp4xx-eb.c [new file with mode: 0644]
drivers/bus/mhi/core/main.c
drivers/bus/mhi/pci_generic.c
drivers/bus/ti-sysc.c
drivers/char/powernv-op-panel.c
drivers/clk/renesas/r9a07g044-cpg.c
drivers/clk/renesas/renesas-rzg2l-cpg.c
drivers/clk/renesas/renesas-rzg2l-cpg.h
drivers/clocksource/timer-ixp4xx.c
drivers/cpufreq/longhaul.c
drivers/dma-buf/sync_file.c
drivers/dma/imx-sdma.c
drivers/dma/ipu/ipu_idmac.c
drivers/dma/mpc512x_dma.c
drivers/dma/ti/k3-udma.c
drivers/edac/Kconfig
drivers/firmware/Kconfig
drivers/firmware/arm_ffa/bus.c
drivers/firmware/arm_ffa/driver.c
drivers/firmware/arm_scmi/Kconfig [new file with mode: 0644]
drivers/firmware/arm_scmi/Makefile
drivers/firmware/arm_scmi/bus.c
drivers/firmware/arm_scmi/common.h
drivers/firmware/arm_scmi/driver.c
drivers/firmware/arm_scmi/mailbox.c
drivers/firmware/arm_scmi/msg.c [new file with mode: 0644]
drivers/firmware/arm_scmi/notify.c
drivers/firmware/arm_scmi/sensors.c
drivers/firmware/arm_scmi/smc.c
drivers/firmware/arm_scmi/virtio.c [new file with mode: 0644]
drivers/firmware/efi/dev-path-parser.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/firmware/efi/mokvar-table.c
drivers/firmware/efi/tpm.c
drivers/firmware/tegra/bpmp-debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h [deleted file]
drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h [deleted file]
drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
drivers/gpu/drm/amd/pm/inc/smu_types.h
drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h [deleted file]
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c [deleted file]
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/i915/display/intel_bios.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
drivers/gpu/drm/msm/dp/dp_catalog.c
drivers/gpu/drm/msm/dp/dp_ctrl.c
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/panel/panel-novatek-nt35510.c
drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/qxl/qxl_ttm.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_device.c
drivers/gpu/drm/ttm/ttm_module.c
drivers/gpu/drm/ttm/ttm_range_manager.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
drivers/hid/Kconfig
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
drivers/hid/hid-apple.c
drivers/hid/hid-asus.c
drivers/hid/hid-ft260.c
drivers/hid/intel-ish-hid/ishtp-hid-client.c
drivers/hid/intel-ish-hid/ishtp-hid.h
drivers/hid/intel-ish-hid/ishtp/bus.c
drivers/hid/usbhid/Kconfig
drivers/hid/wacom_wac.c
drivers/hv/channel_mgmt.c
drivers/i2c/busses/i2c-mpc.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/bnxt_re/qplib_res.c
drivers/infiniband/hw/bnxt_re/qplib_res.h
drivers/infiniband/hw/irdma/ctrl.c
drivers/infiniband/hw/irdma/hw.c
drivers/infiniband/hw/irdma/main.c
drivers/infiniband/hw/irdma/type.h
drivers/infiniband/hw/irdma/uk.c
drivers/infiniband/hw/irdma/verbs.c
drivers/infiniband/sw/rxe/rxe_mr.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
drivers/iommu/arm/arm-smmu/qcom_iommu.c
drivers/iommu/intel/iommu.c
drivers/iommu/rockchip-iommu.c
drivers/media/pci/intel/ipu3/cio2-bridge.c
drivers/media/pci/ngene/ngene-core.c
drivers/media/pci/ngene/ngene.h
drivers/memory/omap-gpmc.c
drivers/memory/tegra/tegra186.c
drivers/misc/eeprom/at24.c
drivers/mmc/core/block.c
drivers/mmc/core/host.c
drivers/mmc/host/jz4740_mmc.c
drivers/mtd/chips/cfi_util.c
drivers/net/bonding/bond_main.c
drivers/net/caif/Kconfig
drivers/net/caif/Makefile
drivers/net/caif/caif_hsi.c [deleted file]
drivers/net/can/spi/hi311x.c
drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
drivers/net/can/usb/ems_usb.c
drivers/net/can/usb/esd_usb2.c
drivers/net/can/usb/mcba_usb.c
drivers/net/can/usb/peak_usb/pcan_usb.c
drivers/net/can/usb/usb_8dev.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/mt7530.c
drivers/net/dsa/mt7530.h
drivers/net/dsa/mv88e6xxx/Kconfig
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/serdes.c
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
drivers/net/ethernet/dec/tulip/winbond-840.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
drivers/net/ethernet/freescale/fman/mac.c
drivers/net/ethernet/google/gve/gve_main.c
drivers/net/ethernet/google/gve/gve_rx_dqo.c
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igc/igc.h
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/ipsec.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/octeontx2/af/Makefile
drivers/net/ethernet/marvell/octeontx2/af/cgx.c
drivers/net/ethernet/marvell/octeontx2/af/cgx.h
drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
drivers/net/ethernet/marvell/octeontx2/af/npc.h
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c [new file with mode: 0644]
drivers/net/ethernet/marvell/octeontx2/nic/Makefile
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c [new file with mode: 0644]
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/microchip/sparx5/Kconfig
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/mscc/ocelot_net.c
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_lif.h
drivers/net/ethernet/pensando/ionic/ionic_phc.c
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
drivers/net/ethernet/qlogic/qede/qede_filter.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qualcomm/emac/emac.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/renesas/ravb.h
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/sfc/efx_channels.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/ti/tlan.c
drivers/net/ethernet/xscale/ptp_ixp46x.c
drivers/net/fddi/defza.c
drivers/net/netdevsim/ipsec.c
drivers/net/phy/broadcom.c
drivers/net/phy/marvell10g.c
drivers/net/usb/asix_devices.c
drivers/net/usb/hso.c
drivers/net/usb/r8152.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/wan/hdlc_cisco.c
drivers/net/wan/hdlc_fr.c
drivers/net/wan/hdlc_ppp.c
drivers/net/wan/hdlc_raw.c
drivers/net/wan/hdlc_raw_eth.c
drivers/net/wan/hdlc_x25.c
drivers/net/wireless/mediatek/mt76/mt7921/main.c
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
drivers/net/wireless/virt_wifi.c
drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
drivers/net/wwan/iosm/iosm_ipc_uevent.c
drivers/net/wwan/iosm/iosm_ipc_wwan.c
drivers/net/wwan/wwan_core.c
drivers/nfc/nfcsim.c
drivers/nfc/s3fwrn5/firmware.c
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/tcp.c
drivers/nvme/host/trace.h
drivers/pci/proc.c
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/gigabyte-wmi.c
drivers/platform/x86/intel-hid.c
drivers/platform/x86/think-lmi.c
drivers/platform/x86/think-lmi.h
drivers/platform/x86/wireless-hotkey.c
drivers/power/supply/ab8500_fg.c
drivers/power/supply/abx500_chargalg.c
drivers/ptp/Makefile
drivers/ptp/ptp_clock.c
drivers/ptp/ptp_private.h
drivers/ptp/ptp_sysfs.c
drivers/ptp/ptp_vclock.c [new file with mode: 0644]
drivers/pwm/pwm-berlin.c
drivers/pwm/pwm-ep93xx.c
drivers/pwm/pwm-spear.c
drivers/pwm/pwm-sprd.c
drivers/pwm/pwm-tiecap.c
drivers/regulator/bd9576-regulator.c
drivers/regulator/hi6421-regulator.c
drivers/regulator/hi6421v600-regulator.c
drivers/regulator/mtk-dvfsrc-regulator.c
drivers/regulator/rtmv20-regulator.c
drivers/s390/char/tape_char.c
drivers/s390/net/ctcm_fsms.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/scsi/zfcp_sysfs.c
drivers/scsi/arm/acornscsi.c
drivers/scsi/arm/fas216.c
drivers/scsi/hosts.c
drivers/scsi/libsas/sas_discover.c
drivers/scsi/mpi3mr/mpi3mr_fw.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/pm8001/pm8001_ctl.c
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/pm8001/pm8001_sas.c
drivers/scsi/pm8001/pm80xx_hwi.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/ufs/ufshcd.h
drivers/soc/mediatek/mt8173-pm-domains.h
drivers/soc/mediatek/mt8183-mmsys.h
drivers/soc/mediatek/mt8365-mmsys.h [new file with mode: 0644]
drivers/soc/mediatek/mtk-mmsys.c
drivers/soc/mediatek/mtk-mmsys.h
drivers/soc/mediatek/mtk-pm-domains.h
drivers/soc/renesas/Kconfig
drivers/soc/renesas/r8a779a0-sysc.c
drivers/soc/renesas/rcar-sysc.c
drivers/soc/renesas/renesas-soc.c
drivers/soc/tegra/fuse/fuse-tegra.c
drivers/soc/tegra/fuse/fuse-tegra20.c
drivers/soc/tegra/fuse/fuse-tegra30.c
drivers/soc/tegra/fuse/fuse.h
drivers/soc/tegra/pmc.c
drivers/soc/tegra/powergate-bpmp.c
drivers/soc/ti/pruss.c
drivers/soc/ti/smartreflex.c
drivers/spi/spi-atmel.c
drivers/spi/spi-bcm2835.c
drivers/spi/spi-cadence-quadspi.c
drivers/spi/spi-cadence.c
drivers/spi/spi-imx.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi-stm32.c
drivers/target/target_core_sbc.c
drivers/target/target_core_transport.c
drivers/usb/class/cdc-wdm.c
drivers/usb/core/devio.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/core_intr.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc2/params.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/ep0.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/u_serial.c
drivers/usb/gadget/udc/fsl_qe_udc.c
drivers/usb/gadget/udc/tegra-xudc.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/max3421-hcd.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci-renesas.c
drivers/usb/host/xhci-pci.c
drivers/usb/phy/phy.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/option.c
drivers/usb/storage/unusual_uas.h
drivers/usb/typec/stusb160x.c
drivers/usb/typec/tipd/core.c
drivers/video/fbdev/core/fbmem.c
drivers/video/fbdev/xilinxfb.c
drivers/watchdog/Kconfig
drivers/watchdog/ixp4xx_wdt.c
fs/Kconfig.binfmt
fs/Makefile
fs/afs/cmservice.c
fs/afs/dir.c
fs/afs/write.c
fs/binfmt_em86.c [deleted file]
fs/block_dev.c
fs/btrfs/backref.c
fs/btrfs/backref.h
fs/btrfs/block-group.c
fs/btrfs/block-group.h
fs/btrfs/compression.c
fs/btrfs/ctree.c
fs/btrfs/delayed-ref.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/inode.c
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.h
fs/btrfs/qgroup.c
fs/btrfs/qgroup.h
fs/btrfs/tests/qgroup-tests.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/btrfs/zoned.c
fs/ceph/mds_client.c
fs/cifs/cifs_dfs_ref.c
fs/cifs/cifsglob.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dfs_cache.c
fs/cifs/dfs_cache.h
fs/cifs/dns_resolve.c
fs/cifs/dns_resolve.h
fs/cifs/file.c
fs/cifs/fs_context.c
fs/cifs/misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.h
fs/configfs/file.c
fs/ext2/dir.c
fs/ext2/ext2.h
fs/ext2/namei.c
fs/fcntl.c
fs/fs-writeback.c
fs/fs_context.c
fs/hfs/bfind.c
fs/hfs/bnode.c
fs/hfs/btree.h
fs/hfs/super.c
fs/hugetlbfs/inode.c
fs/internal.h
fs/io-wq.c
fs/io_uring.c
fs/iomap/buffered-io.c
fs/iomap/seek.c
fs/ocfs2/file.c
fs/pipe.c
fs/reiserfs/stree.c
fs/reiserfs/super.c
fs/seq_file.c
fs/userfaultfd.c
fs/vboxsf/dir.c
fs/vboxsf/file.c
fs/vboxsf/vfsmod.h
fs/xfs/libxfs/xfs_ag.c
fs/xfs/libxfs/xfs_attr.c
fs/xfs/libxfs/xfs_ialloc.c
fs/xfs/libxfs/xfs_ialloc.h
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/libxfs/xfs_log_format.h
fs/xfs/libxfs/xfs_trans_inode.c
fs/xfs/scrub/inode.c
fs/xfs/xfs_buf_item_recover.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode_item_recover.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_rtalloc.c
fs/xfs/xfs_trace.h
fs/zonefs/super.c
include/acpi/acpi_bus.h
include/drm/drm_ioctl.h
include/dt-bindings/clock/r9a07g044-cpg.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/bpf_types.h
include/linux/bpf_verifier.h
include/linux/ethtool.h
include/linux/filter.h
include/linux/fs_context.h
include/linux/highmem.h
include/linux/intel-ish-client-if.h
include/linux/kasan.h
include/linux/marvell_phy.h
include/linux/memblock.h
include/linux/mfd/rt5033-private.h
include/linux/migrate.h
include/linux/mm.h
include/linux/omap-gpmc.h
include/linux/pgtable.h
include/linux/platform_data/pata_ixp4xx_cf.h
include/linux/power/smartreflex.h
include/linux/ptp_clock_kernel.h
include/linux/rmap.h
include/linux/scmi_protocol.h
include/linux/scpi_protocol.h
include/linux/skmsg.h
include/linux/stmmac.h
include/math-emu/op-common.h
include/net/bonding.h
include/net/busy_poll.h
include/net/caif/caif_hsi.h [deleted file]
include/net/dst_metadata.h
include/net/ip6_route.h
include/net/llc_pdu.h
include/net/mptcp.h
include/net/netfilter/nf_conntrack_core.h
include/net/netns/conntrack.h
include/net/sctp/constants.h
include/net/sctp/structs.h
include/net/sock.h
include/net/tcp.h
include/soc/tegra/mc.h
include/soc/tegra/pm.h
include/sound/soc.h
include/trace/events/afs.h
include/trace/events/net.h
include/trace/events/qdisc.h
include/uapi/linux/ethtool_netlink.h
include/uapi/linux/idxd.h
include/uapi/linux/net_tstamp.h
include/uapi/linux/netfilter/nfnetlink_log.h
include/uapi/linux/netfilter/nfnetlink_queue.h
include/uapi/linux/virtio_ids.h
include/uapi/linux/virtio_scmi.h [new file with mode: 0644]
include/uapi/rdma/irdma-abi.h
init/Kconfig
kernel/bpf/core.c
kernel/bpf/devmap.c
kernel/bpf/disasm.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup-v1.c
kernel/debug/gdbstub.c
kernel/dma/ops_helpers.c
kernel/rcu/refscale.c
kernel/rcu/tasks.h
kernel/rcu/tree_stall.h
kernel/scftorture.c
kernel/smpboot.c
kernel/time/posix-cpu-timers.c
kernel/time/timer.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_events_synth.c
kernel/trace/trace_synth.h
kernel/tracepoint.c
kernel/workqueue.c
lib/Kconfig
lib/Kconfig.debug
lib/test_hmm.c
mm/backing-dev.c
mm/hugetlb.c
mm/kasan/kasan.h
mm/kfence/core.c
mm/kfence/kfence_test.c
mm/memblock.c
mm/memcontrol.c
mm/memory.c
mm/migrate.c
mm/mmap_lock.c
mm/page_alloc.c
mm/rmap.c
mm/secretmem.c
mm/slab.h
mm/slub.c
mm/util.c
net/802/garp.c
net/802/mrp.c
net/bpf/test_run.c
net/bridge/br_fdb.c
net/bridge/br_if.c
net/bridge/br_multicast.c
net/caif/caif_socket.c
net/can/j1939/transport.c
net/can/raw.c
net/core/dev.c
net/core/devlink.c
net/core/flow_dissector.c
net/core/skbuff.c
net/core/skmsg.c
net/core/sock.c
net/decnet/af_decnet.c
net/dsa/slave.c
net/dsa/switch.c
net/dsa/tag_ksz.c
net/ethtool/Makefile
net/ethtool/common.c
net/ethtool/netlink.c
net/ethtool/netlink.h
net/ethtool/phc_vclocks.c [new file with mode: 0644]
net/ipv4/fib_frontend.c
net/ipv4/inet_diag.c
net/ipv4/ip_tunnel.c
net/ipv4/ipmr.c
net/ipv4/raw_diag.c
net/ipv4/tcp.c
net/ipv4/tcp_bpf.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv4/udp_bpf.c
net/ipv4/udp_diag.c
net/ipv4/udp_offload.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/xfrm6_output.c
net/iucv/iucv.c
net/llc/af_llc.c
net/llc/llc_s_ac.c
net/mac80211/cfg.c
net/mac80211/ieee80211_i.h
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/tx.c
net/mptcp/mib.c
net/mptcp/mib.h
net/mptcp/mptcp_diag.c
net/mptcp/options.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/sockopt.c
net/mptcp/subflow.c
net/mptcp/syncookies.c
net/ncsi/Kconfig
net/ncsi/internal.h
net/ncsi/ncsi-manage.c
net/ncsi/ncsi-rsp.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_proto_gre.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_hook.c
net/netfilter/nft_last.c
net/netfilter/nft_nat.c
net/netlink/af_netlink.c
net/netrom/nr_timer.c
net/openvswitch/flow_table.c
net/qrtr/qrtr.c
net/sched/act_ct.c
net/sched/act_skbmod.c
net/sched/cls_api.c
net/sched/cls_tcindex.c
net/sched/sch_taprio.c
net/sctp/auth.c
net/sctp/diag.c
net/sctp/input.c
net/sctp/ipv6.c
net/sctp/output.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/sctp/transport.c
net/socket.c
net/tipc/crypto.c
net/tipc/socket.c
net/unix/af_unix.c
net/unix/diag.c
net/wireless/nl80211.c
net/wireless/scan.c
samples/bpf/Makefile
samples/bpf/xdpsock_user.c
scripts/Makefile.build
scripts/setlocalversion
scripts/spdxcheck.py
sound/core/pcm_native.c
sound/hda/intel-dsp-config.c
sound/isa/sb/sb16_csp.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/acp-da7219-max98357a.c
sound/soc/codecs/Kconfig
sound/soc/codecs/rt5631.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/tlv320aic31xx.c
sound/soc/codecs/tlv320aic31xx.h
sound/soc/codecs/tlv320aic32x4.c
sound/soc/codecs/wcd938x.c
sound/soc/codecs/wm_adsp.c
sound/soc/intel/boards/sof_sdw_max98373.c
sound/soc/mediatek/mt8183/mt8183-dai-adda.c
sound/soc/soc-pcm.c
sound/soc/sof/intel/pci-tgl.c
sound/soc/tegra/tegra_pcm.c
sound/soc/ti/j721e-evm.c
sound/usb/mixer.c
sound/usb/quirks.c
tools/arch/arm64/include/uapi/asm/unistd.h
tools/bpf/Makefile
tools/bpf/bpftool/common.c
tools/bpf/bpftool/jit_disasm.c
tools/bpf/runqslower/runqslower.bpf.c
tools/include/linux/kconfig.h
tools/include/uapi/asm-generic/unistd.h
tools/lib/bpf/libbpf.c
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
tools/perf/builtin-inject.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-trace.c
tools/perf/tests/bpf.c
tools/perf/tests/event_update.c
tools/perf/tests/evsel-roundtrip-name.c
tools/perf/tests/maps.c
tools/perf/tests/parse-events.c
tools/perf/tests/perf-time-to-tsc.c
tools/perf/tests/topology.c
tools/perf/util/cs-etm.c
tools/perf/util/data.c
tools/perf/util/dso.c
tools/perf/util/dwarf-aux.c
tools/perf/util/dwarf-aux.h
tools/perf/util/env.c
tools/perf/util/lzma.c
tools/perf/util/pfm.c
tools/perf/util/pmu.c
tools/perf/util/probe-event.c
tools/perf/util/probe-event.h
tools/perf/util/probe-file.c
tools/perf/util/probe-finder.c
tools/perf/util/probe-finder.h
tools/perf/util/session.c
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/stat-display.c
tools/testing/kunit/kunit.py
tools/testing/kunit/kunit_kernel.py
tools/testing/kunit/kunit_parser.py
tools/testing/kunit/kunit_tool_test.py
tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log [moved from tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log with 100% similarity]
tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/tailcalls.c
tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
tools/testing/selftests/bpf/verifier/value_ptr_arith.c
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/aarch64/get-reg-list.c
tools/testing/selftests/kvm/access_tracking_perf_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/dirty_log_perf_test.c
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/lib/aarch64/processor.c
tools/testing/selftests/kvm/lib/guest_modes.c
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/set_memory_region_test.c
tools/testing/selftests/kvm/steal_time.c
tools/testing/selftests/kvm/x86_64/hyperv_features.c
tools/testing/selftests/kvm/x86_64/mmu_role_test.c
tools/testing/selftests/kvm/x86_64/smm_test.c
tools/testing/selftests/memory-hotplug/mem-on-off-test.sh
tools/testing/selftests/net/icmp_redirect.sh
tools/testing/selftests/net/mptcp/mptcp_join.sh
tools/testing/selftests/net/nettest.c
tools/testing/selftests/net/pmtu.sh
tools/testing/selftests/net/timestamping.c
tools/testing/selftests/netfilter/Makefile
tools/testing/selftests/netfilter/conntrack_tcp_unreplied.sh [new file with mode: 0755]
tools/testing/selftests/vm/userfaultfd.c
virt/kvm/coalesced_mmio.c
virt/kvm/kvm_main.c

index 2363ad8..d378f57 100644 (file)
@@ -33,6 +33,13 @@ Description:
                frequency adjustment value (a positive integer) in
                parts per billion.
 
+What:          /sys/class/ptp/ptpN/max_vclocks
+Date:          May 2021
+Contact:       Yangbo Lu <yangbo.lu@nxp.com>
+Description:
+               This file contains the maximum number of ptp vclocks.
+               Write integer to re-configure it.
+
 What:          /sys/class/ptp/ptpN/n_alarms
 Date:          September 2010
 Contact:       Richard Cochran <richardcochran@gmail.com>
@@ -61,6 +68,19 @@ Description:
                This file contains the number of programmable pins
                offered by the PTP hardware clock.
 
+What:          /sys/class/ptp/ptpN/n_vclocks
+Date:          May 2021
+Contact:       Yangbo Lu <yangbo.lu@nxp.com>
+Description:
+               This file contains the number of virtual PTP clocks in
+               use.  By default, the value is 0 meaning that only the
+               physical clock is in use.  Setting the value creates
+               the corresponding number of virtual clocks and causes
+               the physical clock to become free running.  Setting the
+               value back to 0 deletes the virtual clocks and
+               switches the physical clock back to normal, adjustable
+               operation.
+
 What:          /sys/class/ptp/ptpN/pins
 Date:          March 2014
 Contact:       Richard Cochran <richardcochran@gmail.com>
index 459e6b6..0c9120e 100644 (file)
@@ -45,14 +45,24 @@ how the user addresses are used by the kernel:
 
 1. User addresses not accessed by the kernel but used for address space
    management (e.g. ``mprotect()``, ``madvise()``). The use of valid
-   tagged pointers in this context is allowed with the exception of
-   ``brk()``, ``mmap()`` and the ``new_address`` argument to
-   ``mremap()`` as these have the potential to alias with existing
-   user addresses.
-
-   NOTE: This behaviour changed in v5.6 and so some earlier kernels may
-   incorrectly accept valid tagged pointers for the ``brk()``,
-   ``mmap()`` and ``mremap()`` system calls.
+   tagged pointers in this context is allowed with these exceptions:
+
+   - ``brk()``, ``mmap()`` and the ``new_address`` argument to
+     ``mremap()`` as these have the potential to alias with existing
+      user addresses.
+
+     NOTE: This behaviour changed in v5.6 and so some earlier kernels may
+     incorrectly accept valid tagged pointers for the ``brk()``,
+     ``mmap()`` and ``mremap()`` system calls.
+
+   - The ``range.start``, ``start`` and ``dst`` arguments to the
+     ``UFFDIO_*`` ``ioctl()``s used on a file descriptor obtained from
+     ``userfaultfd()``, as fault addresses subsequently obtained by reading
+     the file descriptor will be untagged, which may otherwise confuse
+     tag-unaware programs.
+
+     NOTE: This behaviour changed in v5.14 and so some earlier kernels may
+     incorrectly accept valid tagged pointers for this system call.
 
 2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
    relaxation is disabled by default and the application thread needs to
index 7d99386..d1626d5 100644 (file)
@@ -86,19 +86,7 @@ Generating code coverage reports under UML
 .. note::
        TODO(brendanhiggins@google.com): There are various issues with UML and
        versions of gcc 7 and up. You're likely to run into missing ``.gcda``
-       files or compile errors. We know one `faulty GCC commit
-       <https://github.com/gcc-mirror/gcc/commit/8c9434c2f9358b8b8bad2c1990edf10a21645f9d>`_
-       but not how we'd go about getting this fixed. The compile errors still
-       need some investigation.
-
-.. note::
-       TODO(brendanhiggins@google.com): for recent versions of Linux
-       (5.10-5.12, maybe earlier), there's a bug with gcov counters not being
-       flushed in UML. This translates to very low (<1%) reported coverage. This is
-       related to the above issue and can be worked around by replacing the
-       one call to ``uml_abort()`` (it's in ``os_dump_core()``) with a plain
-       ``exit()``.
-
+       files or compile errors.
 
 This is different from the "normal" way of getting coverage information that is
 documented in Documentation/dev-tools/gcov.rst.
diff --git a/Documentation/devicetree/bindings/ata/intel,ixp4xx-compact-flash.yaml b/Documentation/devicetree/bindings/ata/intel,ixp4xx-compact-flash.yaml
new file mode 100644 (file)
index 0000000..52e1860
--- /dev/null
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/intel,ixp4xx-compact-flash.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel IXP4xx CompactFlash Card Controller
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+  The IXP4xx network processors have a CompactFlash interface that presents
+  a CompactFlash card to the system as a true IDE (parallel ATA) device. The
+  device is always connected to the expansion bus of the IXP4xx SoCs using one
+  or two chip select areas and address translating logic on the board. The
+  node must be placed inside a chip select node on the IXP4xx expansion bus.
+
+properties:
+  compatible:
+    const: intel,ixp4xx-compact-flash
+
+  reg:
+    items:
+      - description: Command interface registers
+      - description: Control interface registers
+
+  interrupts:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+
+allOf:
+  - $ref: pata-common.yaml#
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    bus@c4000000 {
+      compatible = "intel,ixp43x-expansion-bus-controller", "syscon";
+      reg = <0xc4000000 0x1000>;
+      native-endian;
+      #address-cells = <2>;
+      #size-cells = <1>;
+      ranges = <0 0x0 0x50000000 0x01000000>, <1 0x0 0x51000000 0x01000000>;
+      dma-ranges = <0 0x0 0x50000000 0x01000000>, <1 0x0 0x51000000 0x01000000>;
+      ide@1,0 {
+        compatible = "intel,ixp4xx-compact-flash";
+        reg = <1 0x00000000 0x1000>, <1 0x00040000 0x1000>;
+        interrupt-parent = <&gpio0>;
+        interrupts = <12 IRQ_TYPE_EDGE_RISING>;
+      };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/bus/intel,ixp4xx-expansion-bus-controller.yaml b/Documentation/devicetree/bindings/bus/intel,ixp4xx-expansion-bus-controller.yaml
new file mode 100644 (file)
index 0000000..5fb4e7b
--- /dev/null
@@ -0,0 +1,168 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bus/intel,ixp4xx-expansion-bus-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel IXP4xx Expansion Bus Controller
+
+description: |
+  The IXP4xx expansion bus controller handles access to devices on the
+  memory-mapped expansion bus on the Intel IXP4xx family of system on chips,
+  including IXP42x, IXP43x, IXP45x and IXP46x.
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+properties:
+  $nodename:
+    pattern: '^bus@[0-9a-f]+$'
+
+  compatible:
+    items:
+      - enum:
+          - intel,ixp42x-expansion-bus-controller
+          - intel,ixp43x-expansion-bus-controller
+          - intel,ixp45x-expansion-bus-controller
+          - intel,ixp46x-expansion-bus-controller
+      - const: syscon
+
+  reg:
+    description: Control registers for the expansion bus, these are not
+      inside the memory range handled by the expansion bus.
+    maxItems: 1
+
+  native-endian:
+    $ref: /schemas/types.yaml#/definitions/flag
+    description: The IXP4xx has a peculiar MMIO access scheme, as it changes
+      the access pattern for words (swizzling) on the bus depending on whether
+      the SoC is running in big-endian or little-endian mode. Thus the
+      registers must always be accessed using native endianness.
+
+  "#address-cells":
+    description: |
+      The first cell is the chip select number.
+      The second cell is the address offset within the bank.
+    const: 2
+
+  "#size-cells":
+    const: 1
+
+  ranges: true
+  dma-ranges: true
+
+patternProperties:
+  "^.*@[0-7],[0-9a-f]+$":
+    description: Devices attached to chip selects are represented as
+      subnodes.
+    type: object
+
+    properties:
+      intel,ixp4xx-eb-t1:
+        description: Address timing, extend address phase with n cycles.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        maximum: 3
+
+      intel,ixp4xx-eb-t2:
+        description: Setup chip select timing, extend setup phase with n cycles.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        maximum: 3
+
+      intel,ixp4xx-eb-t3:
+        description: Strobe timing, extend strobe phase with n cycles.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        maximum: 15
+
+      intel,ixp4xx-eb-t4:
+        description: Hold timing, extend hold phase with n cycles.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        maximum: 3
+
+      intel,ixp4xx-eb-t5:
+        description: Recovery timing, extend recovery phase with n cycles.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        maximum: 15
+
+      intel,ixp4xx-eb-cycle-type:
+        description: The type of cycles to use on the expansion bus for this
+          chip select. 0 = Intel cycles, 1 = Motorola cycles, 2 = HPI cycles.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        enum: [0, 1, 2]
+
+      intel,ixp4xx-eb-byte-access-on-halfword:
+        description: Allow byte read access on half word devices.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        enum: [0, 1]
+
+      intel,ixp4xx-eb-hpi-hrdy-pol-high:
+        description: Set HPI HRDY polarity to active high when using HPI.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        enum: [0, 1]
+
+      intel,ixp4xx-eb-mux-address-and-data:
+        description: Multiplex address and data on the data bus.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        enum: [0, 1]
+
+      intel,ixp4xx-eb-ahb-split-transfers:
+        description: Enable AHB split transfers.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        enum: [0, 1]
+
+      intel,ixp4xx-eb-write-enable:
+        description: Enable write cycles.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        enum: [0, 1]
+
+      intel,ixp4xx-eb-byte-access:
+        description: Expansion bus uses only 8 bits. The default is to use
+          16 bits.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        enum: [0, 1]
+
+required:
+  - compatible
+  - reg
+  - native-endian
+  - "#address-cells"
+  - "#size-cells"
+  - ranges
+  - dma-ranges
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    bus@50000000 {
+        compatible = "intel,ixp42x-expansion-bus-controller", "syscon";
+        reg = <0xc4000000 0x28>;
+        native-endian;
+        #address-cells = <2>;
+        #size-cells = <1>;
+        ranges = <0 0x0 0x50000000 0x01000000>,
+                 <1 0x0 0x51000000 0x01000000>;
+        dma-ranges = <0 0x0 0x50000000 0x01000000>,
+                     <1 0x0 0x51000000 0x01000000>;
+        flash@0,0 {
+            compatible = "intel,ixp4xx-flash", "cfi-flash";
+            bank-width = <2>;
+            reg = <0 0x00000000 0x1000000>;
+            intel,ixp4xx-eb-t3 = <3>;
+            intel,ixp4xx-eb-cycle-type = <0>;
+            intel,ixp4xx-eb-byte-access-on-halfword = <1>;
+            intel,ixp4xx-eb-write-enable = <1>;
+            intel,ixp4xx-eb-byte-access = <0>;
+        };
+        serial@1,0 {
+            compatible = "exar,xr16l2551", "ns8250";
+            reg = <1 0x00000000 0x10>;
+            interrupt-parent = <&gpio0>;
+            interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+            clock-frequency = <1843200>;
+            intel,ixp4xx-eb-t3 = <3>;
+            intel,ixp4xx-eb-cycle-type = <1>;
+            intel,ixp4xx-eb-write-enable = <1>;
+            intel,ixp4xx-eb-byte-access = <1>;
+        };
+    };
index 8dc7b40..1174c9a 100644 (file)
@@ -50,7 +50,6 @@ properties:
 
   reg:
     minItems: 1
-    maxItems: 3
     items:
       - description: base register
       - description: power register
index c9e9740..12c316f 100644 (file)
@@ -9,6 +9,7 @@ Required properties:
       "fsl,imx53-sdma"
       "fsl,imx6q-sdma"
       "fsl,imx7d-sdma"
+      "fsl,imx6ul-sdma"
       "fsl,imx8mq-sdma"
       "fsl,imx8mm-sdma"
       "fsl,imx8mn-sdma"
index cebf6ff..5c4c678 100644 (file)
@@ -34,6 +34,10 @@ properties:
       - description: SCMI compliant firmware with ARM SMC/HVC transport
         items:
           - const: arm,scmi-smc
+      - description: SCMI compliant firmware with SCMI Virtio transport.
+                     The virtio transport only supports a single device.
+        items:
+          - const: arm,scmi-virtio
 
   interrupts:
     description:
@@ -172,6 +176,7 @@ patternProperties:
       Each sub-node represents a protocol supported. If the platform
       supports a dedicated communication channel for a particular protocol,
       then the corresponding transport properties must be present.
+      The virtio transport does not support a dedicated communication channel.
 
     properties:
       reg:
@@ -195,7 +200,6 @@ patternProperties:
 
 required:
   - compatible
-  - shmem
 
 if:
   properties:
@@ -209,6 +213,7 @@ then:
 
   required:
     - mboxes
+    - shmem
 
 else:
   if:
@@ -219,6 +224,7 @@ else:
   then:
     required:
       - arm,smc-id
+      - shmem
 
 examples:
   - |
index ad0ec9f..7d9c083 100644 (file)
@@ -39,17 +39,7 @@ properties:
   reg:
     maxItems: 1
 
-patternProperties:
-  "^adi,bypass-attenuator-in[0-4]$":
-    description: |
-      Configures bypassing the individual voltage input attenuator. If
-      set to 1 the attenuator is bypassed if set to 0 the attenuator is
-      not bypassed. If the property is absent then the attenuator
-      retains it's configuration from the bios/bootloader.
-    $ref: /schemas/types.yaml#/definitions/uint32
-    enum: [0, 1]
-
-  "^adi,pwm-active-state$":
+  adi,pwm-active-state:
     description: |
       Integer array, represents the active state of the pwm outputs If set to 0
       the pwm uses a logic low output for 100% duty cycle. If set to 1 the pwm
@@ -61,6 +51,16 @@ patternProperties:
       enum: [0, 1]
       default: 1
 
+patternProperties:
+  "^adi,bypass-attenuator-in[0-4]$":
+    description: |
+      Configures bypassing the individual voltage input attenuator. If
+      set to 1 the attenuator is bypassed if set to 0 the attenuator is
+      not bypassed. If the property is absent then the attenuator
+      retains it's configuration from the bios/bootloader.
+    $ref: /schemas/types.yaml#/definitions/uint32
+    enum: [0, 1]
+
 required:
   - compatible
   - reg
index 1181b59..03f2b2d 100644 (file)
@@ -52,16 +52,14 @@ properties:
         items:
           - const: marvell,ap806-smmu-500
           - const: arm,mmu-500
-      - description: NVIDIA SoCs that program two ARM MMU-500s identically
-        items:
       - description: NVIDIA SoCs that require memory controller interaction
           and may program multiple ARM MMU-500s identically with the memory
           controller interleaving translations between multiple instances
           for improved performance.
         items:
           - enum:
-              - const: nvidia,tegra194-smmu
-              - const: nvidia,tegra186-smmu
+              - nvidia,tegra194-smmu
+              - nvidia,tegra186-smmu
           - const: nvidia,smmu-500
       - items:
           - const: arm,mmu-500
index d2e28a9..ba9124f 100644 (file)
@@ -28,14 +28,12 @@ properties:
       - description: configuration registers for MMU instance 0
       - description: configuration registers for MMU instance 1
     minItems: 1
-    maxItems: 2
 
   interrupts:
     items:
       - description: interruption for MMU instance 0
       - description: interruption for MMU instance 1
     minItems: 1
-    maxItems: 2
 
   clocks:
     items:
index 7a63c85..01c9acf 100644 (file)
@@ -57,7 +57,6 @@ properties:
 
   ranges:
     minItems: 1
-    maxItems: 3
     description: |
       Memory bus areas for interacting with the devices. Reflects
       the memory layout with four integer values following:
index e5f1a33..dd5a649 100644 (file)
@@ -84,7 +84,6 @@ properties:
 
   interrupts:
     minItems: 1
-    maxItems: 3
     items:
       - description: NAND CTLRDY interrupt
       - description: FLASH_DMA_DONE if flash DMA is available
@@ -92,7 +91,6 @@ properties:
 
   interrupt-names:
     minItems: 1
-    maxItems: 3
     items:
       - const: nand_ctlrdy
       - const: flash_dma_done
@@ -148,8 +146,6 @@ allOf:
     then:
       properties:
         reg-names:
-          minItems: 2
-          maxItems: 2
           items:
             - const: nand
             - const: nand-int-base
@@ -161,8 +157,6 @@ allOf:
     then:
       properties:
         reg-names:
-          minItems: 3
-          maxItems: 3
           items:
             - const: nand
             - const: nand-int-base
@@ -175,8 +169,6 @@ allOf:
     then:
       properties:
         reg-names:
-          minItems: 3
-          maxItems: 3
           items:
             - const: nand
             - const: iproc-idm
index 0b8a05d..f978f87 100644 (file)
@@ -67,8 +67,8 @@ properties:
           reg:
             oneOf:
               - enum:
-                - 0
-                - 1
+                  - 0
+                  - 1
 
         required:
           - compatible
index f7da3d7..3282106 100644 (file)
@@ -13,7 +13,7 @@ Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
 
 For the properties relevant to the ethernet controller connected to the GPMC
 refer to the binding documentation of the device. For example, the documentation
-for the SMSC 911x is Documentation/devicetree/bindings/net/smsc911x.txt
+for the SMSC 911x is Documentation/devicetree/bindings/net/smsc,lan9115.yaml
 
 Child nodes need to specify the GPMC bus address width using the "bank-width"
 property but is possible that an ethernet controller also has a property to
diff --git a/Documentation/devicetree/bindings/net/imx-dwmac.txt b/Documentation/devicetree/bindings/net/imx-dwmac.txt
deleted file mode 100644 (file)
index 921d522..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-IMX8 glue layer controller, NXP imx8 families support Synopsys MAC 5.10a IP.
-
-This file documents platform glue layer for IMX.
-Please see stmmac.txt for the other unchanged properties.
-
-The device node has following properties.
-
-Required properties:
-- compatible:  Should be "nxp,imx8mp-dwmac-eqos" to select glue layer
-              and "snps,dwmac-5.10a" to select IP version.
-- clocks: Must contain a phandle for each entry in clock-names.
-- clock-names: Should be "stmmaceth" for the host clock.
-              Should be "pclk" for the MAC apb clock.
-              Should be "ptp_ref" for the MAC timer clock.
-              Should be "tx" for the MAC RGMII TX clock:
-              Should be "mem" for EQOS MEM clock.
-               - "mem" clock is required for imx8dxl platform.
-               - "mem" clock is not required for imx8mp platform.
-- interrupt-names: Should contain a list of interrupt names corresponding to
-                  the interrupts in the interrupts property, if available.
-                  Should be "macirq" for the main MAC IRQ
-                  Should be "eth_wake_irq" for the IT which wake up system
-- intf_mode: Should be phandle/offset pair. The phandle to the syscon node which
-            encompases the GPR register, and the offset of the GPR register.
-               - required for imx8mp platform.
-               - is optional for imx8dxl platform.
-
-Optional properties:
-- intf_mode: is optional for imx8dxl platform.
-- snps,rmii_refclk_ext: to select RMII reference clock from external.
-
-Example:
-       eqos: ethernet@30bf0000 {
-               compatible = "nxp,imx8mp-dwmac-eqos", "snps,dwmac-5.10a";
-               reg = <0x30bf0000 0x10000>;
-               interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
-               interrupt-names = "eth_wake_irq", "macirq";
-               clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>,
-                        <&clk IMX8MP_CLK_QOS_ENET_ROOT>,
-                        <&clk IMX8MP_CLK_ENET_QOS_TIMER>,
-                        <&clk IMX8MP_CLK_ENET_QOS>;
-               clock-names = "stmmaceth", "pclk", "ptp_ref", "tx";
-               assigned-clocks = <&clk IMX8MP_CLK_ENET_AXI>,
-                                 <&clk IMX8MP_CLK_ENET_QOS_TIMER>,
-                                 <&clk IMX8MP_CLK_ENET_QOS>;
-               assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_266M>,
-                                        <&clk IMX8MP_SYS_PLL2_100M>,
-                                        <&clk IMX8MP_SYS_PLL2_125M>;
-               assigned-clock-rates = <0>, <100000000>, <125000000>;
-               nvmem-cells = <&eth_mac0>;
-               nvmem-cell-names = "mac-address";
-               nvmem_macaddr_swap;
-               intf_mode = <&gpr 0x4>;
-               status = "disabled";
-       };
diff --git a/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml b/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
new file mode 100644 (file)
index 0000000..5629b2e
--- /dev/null
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/nxp,dwmac-imx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP i.MX8 DWMAC glue layer Device Tree Bindings
+
+maintainers:
+  - Joakim Zhang <qiangqing.zhang@nxp.com>
+
+# We need a select here so we don't match all nodes with 'snps,dwmac'
+select:
+  properties:
+    compatible:
+      contains:
+        enum:
+          - nxp,imx8mp-dwmac-eqos
+          - nxp,imx8dxl-dwmac-eqos
+  required:
+    - compatible
+
+allOf:
+  - $ref: "snps,dwmac.yaml#"
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+          - enum:
+              - nxp,imx8mp-dwmac-eqos
+              - nxp,imx8dxl-dwmac-eqos
+          - const: snps,dwmac-5.10a
+
+  clocks:
+    minItems: 3
+    maxItems: 5
+    items:
+      - description: MAC host clock
+      - description: MAC apb clock
+      - description: MAC timer clock
+      - description: MAC RGMII TX clock
+      - description: EQOS MEM clock
+
+  clock-names:
+    minItems: 3
+    maxItems: 5
+    contains:
+      enum:
+        - stmmaceth
+        - pclk
+        - ptp_ref
+        - tx
+        - mem
+
+  intf_mode:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    description:
+      Should be phandle/offset pair. The phandle to the syscon node which
+      encompases the GPR register, and the offset of the GPR register.
+
+  snps,rmii_refclk_ext:
+    $ref: /schemas/types.yaml#/definitions/flag
+    description:
+      To select RMII reference clock from external.
+
+required:
+  - compatible
+  - clocks
+  - clock-names
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/clock/imx8mp-clock.h>
+
+    eqos: ethernet@30bf0000 {
+            compatible = "nxp,imx8mp-dwmac-eqos","snps,dwmac-5.10a";
+            reg = <0x30bf0000 0x10000>;
+            interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+                         <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+            interrupt-names = "macirq", "eth_wake_irq";
+            clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>,
+                     <&clk IMX8MP_CLK_QOS_ENET_ROOT>,
+                     <&clk IMX8MP_CLK_ENET_QOS_TIMER>,
+                     <&clk IMX8MP_CLK_ENET_QOS>;
+            clock-names = "stmmaceth", "pclk", "ptp_ref", "tx";
+            phy-mode = "rgmii";
+            status = "disabled";
+    };
diff --git a/Documentation/devicetree/bindings/net/smsc,lan9115.yaml b/Documentation/devicetree/bindings/net/smsc,lan9115.yaml
new file mode 100644 (file)
index 0000000..f86667c
--- /dev/null
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/smsc,lan9115.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Smart Mixed-Signal Connectivity (SMSC) LAN911x/912x Controller
+
+maintainers:
+  - Shawn Guo <shawnguo@kernel.org>
+
+allOf:
+  - $ref: ethernet-controller.yaml#
+
+properties:
+  compatible:
+    oneOf:
+      - const: smsc,lan9115
+      - items:
+          - enum:
+              - smsc,lan89218
+              - smsc,lan9117
+              - smsc,lan9118
+              - smsc,lan9220
+              - smsc,lan9221
+          - const: smsc,lan9115
+
+  reg:
+    maxItems: 1
+
+  reg-shift: true
+
+  reg-io-width:
+    enum: [ 2, 4 ]
+    default: 2
+
+  interrupts:
+    minItems: 1
+    items:
+      - description:
+          LAN interrupt line
+      - description:
+          Optional PME (power management event) interrupt that is able to wake
+          up the host system with a 50ms pulse on network activity
+
+  clocks:
+    maxItems: 1
+
+  phy-mode: true
+
+  smsc,irq-active-high:
+    type: boolean
+    description: Indicates the IRQ polarity is active-high
+
+  smsc,irq-push-pull:
+    type: boolean
+    description: Indicates the IRQ type is push-pull
+
+  smsc,force-internal-phy:
+    type: boolean
+    description: Forces SMSC LAN controller to use internal PHY
+
+  smsc,force-external-phy:
+    type: boolean
+    description: Forces SMSC LAN controller to use external PHY
+
+  smsc,save-mac-address:
+    type: boolean
+    description:
+      Indicates that MAC address needs to be saved before resetting the
+      controller
+
+  reset-gpios:
+    maxItems: 1
+    description:
+      A GPIO line connected to the RESET (active low) signal of the device.
+      On many systems this is wired high so the device goes out of reset at
+      power-on, but if it is under program control, this optional GPIO can
+      wake up in response to it.
+
+  vdd33a-supply:
+    description: 3.3V analog power supply
+
+  vddvario-supply:
+    description: IO logic power supply
+
+required:
+  - compatible
+  - reg
+  - interrupts
+
+# There are lots of bus-specific properties ("qcom,*", "samsung,*", "fsl,*",
+# "gpmc,*", ...) to be found, that actually depend on the compatible value of
+# the parent node.
+additionalProperties: true
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    ethernet@f4000000 {
+            compatible = "smsc,lan9220", "smsc,lan9115";
+            reg = <0xf4000000 0x2000000>;
+            phy-mode = "mii";
+            interrupt-parent = <&gpio1>;
+            interrupts = <31>, <32>;
+            reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
+            reg-io-width = <4>;
+            smsc,irq-push-pull;
+    };
diff --git a/Documentation/devicetree/bindings/net/smsc911x.txt b/Documentation/devicetree/bindings/net/smsc911x.txt
deleted file mode 100644 (file)
index acfafc8..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-* Smart Mixed-Signal Connectivity (SMSC) LAN911x/912x Controller
-
-Required properties:
-- compatible : Should be "smsc,lan<model>", "smsc,lan9115"
-- reg : Address and length of the io space for SMSC LAN
-- interrupts : one or two interrupt specifiers
-  - The first interrupt is the SMSC LAN interrupt line
-  - The second interrupt (if present) is the PME (power
-    management event) interrupt that is able to wake up the host
-     system with a 50ms pulse on network activity
-- phy-mode : See ethernet.txt file in the same directory
-
-Optional properties:
-- reg-shift : Specify the quantity to shift the register offsets by
-- reg-io-width : Specify the size (in bytes) of the IO accesses that
-  should be performed on the device.  Valid value for SMSC LAN is
-  2 or 4.  If it's omitted or invalid, the size would be 2.
-- smsc,irq-active-high : Indicates the IRQ polarity is active-high
-- smsc,irq-push-pull : Indicates the IRQ type is push-pull
-- smsc,force-internal-phy : Forces SMSC LAN controller to use
-  internal PHY
-- smsc,force-external-phy : Forces SMSC LAN controller to use
-  external PHY
-- smsc,save-mac-address : Indicates that mac address needs to be saved
-  before resetting the controller
-- reset-gpios : a GPIO line connected to the RESET (active low) signal
-  of the device. On many systems this is wired high so the device goes
-  out of reset at power-on, but if it is under program control, this
-  optional GPIO can wake up in response to it.
-- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies
-
-Examples:
-
-lan9220@f4000000 {
-       compatible = "smsc,lan9220", "smsc,lan9115";
-       reg = <0xf4000000 0x2000000>;
-       phy-mode = "mii";
-       interrupt-parent = <&gpio1>;
-       interrupts = <31>, <32>;
-       reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
-       reg-io-width = <4>;
-       smsc,irq-push-pull;
-};
index d765259..42689b7 100644 (file)
@@ -28,6 +28,7 @@ select:
           - snps,dwmac-4.00
           - snps,dwmac-4.10a
           - snps,dwmac-4.20a
+          - snps,dwmac-5.10a
           - snps,dwxgmac
           - snps,dwxgmac-2.10
 
@@ -82,6 +83,7 @@ properties:
         - snps,dwmac-4.00
         - snps,dwmac-4.10a
         - snps,dwmac-4.20a
+        - snps,dwmac-5.10a
         - snps,dwxgmac
         - snps,dwxgmac-2.10
 
@@ -375,6 +377,7 @@ allOf:
               - snps,dwmac-4.00
               - snps,dwmac-4.10a
               - snps,dwmac-4.20a
+              - snps,dwmac-5.10a
               - snps,dwxgmac
               - snps,dwxgmac-2.10
               - st,spear600-gmac
index 5272b6f..dcd6390 100644 (file)
@@ -77,6 +77,34 @@ properties:
       Type-C spec states minimum CC pin debounce of 100 ms and maximum
       of 200 ms. However, some solutions might need more than 200 ms.
 
+  refclk-dig:
+    type: object
+    description: |
+      WIZ node should have subnode for refclk_dig to select the reference
+      clock source for the reference clock used in the PHY and PMA digital
+      logic.
+    properties:
+      clocks:
+        minItems: 2
+        maxItems: 4
+        description: Phandle to two (Torrent) or four (Sierra) clock nodes representing
+          the inputs to refclk_dig
+
+      "#clock-cells":
+        const: 0
+
+      assigned-clocks:
+        maxItems: 1
+
+      assigned-clock-parents:
+        maxItems: 1
+
+    required:
+      - clocks
+      - "#clock-cells"
+      - assigned-clocks
+      - assigned-clock-parents
+
 patternProperties:
   "^pll[0|1]-refclk$":
     type: object
@@ -121,34 +149,6 @@ patternProperties:
       - clocks
       - "#clock-cells"
 
-  "^refclk-dig$":
-    type: object
-    description: |
-      WIZ node should have subnode for refclk_dig to select the reference
-      clock source for the reference clock used in the PHY and PMA digital
-      logic.
-    properties:
-      clocks:
-        minItems: 2
-        maxItems: 4
-        description: Phandle to two (Torrent) or four (Sierra) clock nodes representing
-          the inputs to refclk_dig
-
-      "#clock-cells":
-        const: 0
-
-      assigned-clocks:
-        maxItems: 1
-
-      assigned-clock-parents:
-        maxItems: 1
-
-    required:
-      - clocks
-      - "#clock-cells"
-      - assigned-clocks
-      - assigned-clock-parents
-
   "^serdes@[0-9a-f]+$":
     type: object
     description: |
index 8850c01..9b131c6 100644 (file)
@@ -57,12 +57,14 @@ properties:
     maxItems: 1
 
   power-domains:
+    deprecated: true
     description:
       Power domain to use for enable control. This binding is only
       available if the compatible is chosen to regulator-fixed-domain.
     maxItems: 1
 
   required-opps:
+    deprecated: true
     description:
       Performance state to use for enable control. This binding is only
       available if the compatible is chosen to regulator-fixed-domain. The
index 12b8963..c2e8c54 100644 (file)
@@ -36,12 +36,12 @@ properties:
           switching frequency must be one of following corresponding value
           1.1MHz, 1.65MHz, 2.2MHz, 2.75MHz
 
-    patternProperties:
-      "^ldo[1-4]$":
+      ldortc:
         type: object
         $ref: regulator.yaml#
 
-      "^ldortc$":
+    patternProperties:
+      "^ldo[1-4]$":
         type: object
         $ref: regulator.yaml#
 
index 8761437..aabf50f 100644 (file)
@@ -83,7 +83,8 @@ properties:
 
         unevaluatedProperties: false
 
-      "^vsnvs$":
+    properties:
+      vsnvs:
         type: object
         $ref: regulator.yaml#
         description:
index 657c13b..056d42d 100644 (file)
@@ -30,7 +30,6 @@ properties:
     maxItems: 1
 
   clocks:
-    minItems: 2
     items:
       - description: PCLK clocks
       - description: EXTCLK clocks. Faraday calls it CLK1HZ and says the clock
index 9790617..9d128b9 100644 (file)
@@ -68,6 +68,7 @@ properties:
       - ti,k2g-pruss     # for 66AK2G SoC family
       - ti,am654-icssg   # for K3 AM65x SoC family
       - ti,j721e-icssg   # for K3 J721E SoC family
+      - ti,am642-icssg   # for K3 AM64x SoC family
 
   reg:
     maxItems: 1
@@ -84,6 +85,8 @@ properties:
   dma-ranges:
     maxItems: 1
 
+  dma-coherent: true
+
   power-domains:
     description: |
       This property is as per sci-pm-domain.txt.
@@ -231,8 +234,8 @@ patternProperties:
     description: |
       Industrial Ethernet Peripheral to manage/generate Industrial Ethernet
       functions such as time stamping. Each PRUSS has either 1 IEP (on AM335x,
-      AM437x, AM57xx & 66AK2G SoCs) or 2 IEPs (on K3 AM65x & J721E SoCs ). IEP
-      is used for creating PTP clocks and generating PPS signals.
+      AM437x, AM57xx & 66AK2G SoCs) or 2 IEPs (on K3 AM65x, J721E & AM64x SoCs).
+      IEP is used for creating PTP clocks and generating PPS signals.
 
     type: object
 
@@ -323,17 +326,29 @@ additionalProperties: false
 # - interrupt-controller
 # - pru
 
-if:
-  properties:
-    compatible:
-      contains:
-        enum:
-          - ti,k2g-pruss
-          - ti,am654-icssg
-          - ti,j721e-icssg
-then:
-  required:
-    - power-domains
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - ti,k2g-pruss
+              - ti,am654-icssg
+              - ti,j721e-icssg
+              - ti,am642-icssg
+    then:
+      required:
+        - power-domains
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - ti,k2g-pruss
+    then:
+      required:
+        - dma-coherent
 
 examples:
   - |
index ee936d1..c2930d6 100644 (file)
@@ -114,7 +114,7 @@ properties:
 
   ports:
     $ref: /schemas/graph.yaml#/properties/ports
-    properties:
+    patternProperties:
       port(@[0-9a-f]+)?:
         $ref: audio-graph-port.yaml#
         unevaluatedProperties: false
index faef4f6..8246891 100644 (file)
@@ -79,22 +79,7 @@ properties:
     description:
       The SPI controller acts as a slave, instead of a master.
 
-allOf:
-  - if:
-      not:
-        required:
-          - spi-slave
-    then:
-      properties:
-        "#address-cells":
-          const: 1
-    else:
-      properties:
-        "#address-cells":
-          const: 0
-
-patternProperties:
-  "^slave$":
+  slave:
     type: object
 
     properties:
@@ -105,6 +90,7 @@ patternProperties:
     required:
       - compatible
 
+patternProperties:
   "^.*@[0-9a-f]+$":
     type: object
 
@@ -180,6 +166,20 @@ patternProperties:
       - compatible
       - reg
 
+allOf:
+  - if:
+      not:
+        required:
+          - spi-slave
+    then:
+      properties:
+        "#address-cells":
+          const: 1
+    else:
+      properties:
+        "#address-cells":
+          const: 0
+
 additionalProperties: true
 
 examples:
index a88f99a..f238848 100644 (file)
@@ -25,14 +25,12 @@ properties:
 
   interrupts:
     minItems: 1
-    maxItems: 2
     items:
       - description: Host controller interrupt
       - description: Device controller interrupt in isp1761
 
   interrupt-names:
     minItems: 1
-    maxItems: 2
     items:
       - const: host
       - const: peripheral
index 8a58c61..61bdeac 100644 (file)
@@ -69,17 +69,17 @@ early userspace image can be built by an unprivileged user.
 
 As a technical note, when directories and files are specified, the
 entire CONFIG_INITRAMFS_SOURCE is passed to
-usr/gen_initramfs_list.sh.  This means that CONFIG_INITRAMFS_SOURCE
+usr/gen_initramfs.sh.  This means that CONFIG_INITRAMFS_SOURCE
 can really be interpreted as any legal argument to
-gen_initramfs_list.sh.  If a directory is specified as an argument then
+gen_initramfs.sh.  If a directory is specified as an argument then
 the contents are scanned, uid/gid translation is performed, and
 usr/gen_init_cpio file directives are output.  If a directory is
-specified as an argument to usr/gen_initramfs_list.sh then the
+specified as an argument to usr/gen_initramfs.sh then the
 contents of the file are simply copied to the output.  All of the output
 directives from directory scanning and file contents copying are
 processed by usr/gen_init_cpio.
 
-See also 'usr/gen_initramfs_list.sh -h'.
+See also 'usr/gen_initramfs.sh -h'.
 
 Where's this all leading?
 =========================
diff --git a/Documentation/features/core/thread-info-in-task/arch-support.txt b/Documentation/features/core/thread-info-in-task/arch-support.txt
new file mode 100644 (file)
index 0000000..9f0259b
--- /dev/null
@@ -0,0 +1,32 @@
+#
+# Feature name:          thread-info-in-task
+#         Kconfig:       THREAD_INFO_IN_TASK
+#         description:   arch makes use of the core kernel facility to embedd thread_info in task_struct
+#
+    -----------------------
+    |         arch |status|
+    -----------------------
+    |       alpha: | TODO |
+    |         arc: | TODO |
+    |         arm: | TODO |
+    |       arm64: |  ok  |
+    |        csky: | TODO |
+    |       h8300: | TODO |
+    |     hexagon: | TODO |
+    |        ia64: | TODO |
+    |        m68k: | TODO |
+    |  microblaze: | TODO |
+    |        mips: | TODO |
+    |       nds32: |  ok  |
+    |       nios2: | TODO |
+    |    openrisc: | TODO |
+    |      parisc: | TODO |
+    |     powerpc: |  ok  |
+    |       riscv: |  ok  |
+    |        s390: |  ok  |
+    |          sh: | TODO |
+    |       sparc: | TODO |
+    |          um: | TODO |
+    |         x86: |  ok  |
+    |      xtensa: | TODO |
+    -----------------------
index 8639fe8..8dcaab0 100644 (file)
@@ -22,7 +22,7 @@
     |    openrisc: | TODO |
     |      parisc: | TODO |
     |     powerpc: |  ok  |
-    |       riscv: | TODO |
+    |       riscv: |  ok  |
     |        s390: | TODO |
     |          sh: |  ok  |
     |       sparc: | TODO |
index 4598b0d..1649606 100644 (file)
@@ -170,7 +170,7 @@ Documentation/driver-api/early-userspace/early_userspace_support.rst for more de
 The kernel does not depend on external cpio tools.  If you specify a
 directory instead of a configuration file, the kernel's build infrastructure
 creates a configuration file from that directory (usr/Makefile calls
-usr/gen_initramfs_list.sh), and proceeds to package up that directory
+usr/gen_initramfs.sh), and proceeds to package up that directory
 using the config file (by feeding it to usr/gen_init_cpio, which is created
 from usr/gen_init_cpio.c).  The kernel's build-time cpio creation code is
 entirely self-contained, and the kernel's boot-time extractor is also
index 4257688..60b217b 100644 (file)
@@ -243,8 +243,8 @@ Configuration Flags and Socket Options
 These are the various configuration flags that can be used to control
 and monitor the behavior of AF_XDP sockets.
 
-XDP_COPY and XDP_ZERO_COPY bind flags
--------------------------------------
+XDP_COPY and XDP_ZEROCOPY bind flags
+------------------------------------
 
 When you bind to a socket, the kernel will first try to use zero-copy
 copy. If zero-copy is not supported, it will fall back on using copy
@@ -252,7 +252,7 @@ mode, i.e. copying all packets out to user space. But if you would
 like to force a certain mode, you can use the following flags. If you
 pass the XDP_COPY flag to the bind call, the kernel will force the
 socket into copy mode. If it cannot use copy mode, the bind call will
-fail with an error. Conversely, the XDP_ZERO_COPY flag will force the
+fail with an error. Conversely, the XDP_ZEROCOPY flag will force the
 socket into zero-copy mode or fail.
 
 XDP_SHARED_UMEM bind flag
index 6ea91e4..c86628e 100644 (file)
@@ -212,6 +212,7 @@ Userspace to kernel:
   ``ETHTOOL_MSG_FEC_SET``               set FEC settings
   ``ETHTOOL_MSG_MODULE_EEPROM_GET``     read SFP module EEPROM
   ``ETHTOOL_MSG_STATS_GET``             get standard statistics
+  ``ETHTOOL_MSG_PHC_VCLOCKS_GET``       get PHC virtual clocks info
   ===================================== ================================
 
 Kernel to userspace:
@@ -250,6 +251,7 @@ Kernel to userspace:
   ``ETHTOOL_MSG_FEC_NTF``                  FEC settings
   ``ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY``  read SFP module EEPROM
   ``ETHTOOL_MSG_STATS_GET_REPLY``          standard statistics
+  ``ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY``    PHC virtual clocks info
   ======================================== =================================
 
 ``GET`` requests are sent by userspace applications to retrieve device
@@ -1477,6 +1479,25 @@ Low and high bounds are inclusive, for example:
  etherStatsPkts512to1023Octets 512  1023
  ============================= ==== ====
 
+PHC_VCLOCKS_GET
+===============
+
+Query device PHC virtual clocks information.
+
+Request contents:
+
+  ====================================  ======  ==========================
+  ``ETHTOOL_A_PHC_VCLOCKS_HEADER``      nested  request header
+  ====================================  ======  ==========================
+
+Kernel response contents:
+
+  ====================================  ======  ==========================
+  ``ETHTOOL_A_PHC_VCLOCKS_HEADER``      nested  reply header
+  ``ETHTOOL_A_PHC_VCLOCKS_NUM``         u32     PHC virtual clocks number
+  ``ETHTOOL_A_PHC_VCLOCKS_INDEX``       s32     PHC index array
+  ====================================  ======  ==========================
+
 Request translation
 ===================
 
@@ -1575,4 +1596,5 @@ are netlink only.
   n/a                                 ``ETHTOOL_MSG_CABLE_TEST_ACT``
   n/a                                 ``ETHTOOL_MSG_CABLE_TEST_TDR_ACT``
   n/a                                 ``ETHTOOL_MSG_TUNNEL_INFO_GET``
+  n/a                                 ``ETHTOOL_MSG_PHC_VCLOCKS_GET``
   =================================== =====================================
index b3fa522..316c7df 100644 (file)
@@ -826,7 +826,7 @@ tcp_fastopen_blackhole_timeout_sec - INTEGER
        initial value when the blackhole issue goes away.
        0 to disable the blackhole detection.
 
-       By default, it is set to 1hr.
+       By default, it is set to 0 (feature is disabled).
 
 tcp_fastopen_key - list of comma separated 32-digit hexadecimal INTEGERs
        The list consists of a primary key and an optional backup key. The
index 0467b30..d31ed6c 100644 (file)
@@ -110,6 +110,12 @@ nf_conntrack_tcp_be_liberal - BOOLEAN
        Be conservative in what you do, be liberal in what you accept from others.
        If it's non-zero, we mark only out of window RST segments as INVALID.
 
+nf_conntrack_tcp_ignore_invalid_rst - BOOLEAN
+       - 0 - disabled (default)
+       - 1 - enabled
+
+       If it's 1, we don't mark out of window RST segments as INVALID.
+
 nf_conntrack_tcp_loose - BOOLEAN
        - 0 - disabled
        - not 0 - enabled (default)
index 76775f2..ab63d29 100644 (file)
 Linux Kernel TIPC
 =================
 
-TIPC (Transparent Inter Process Communication) is a protocol that is
-specially designed for intra-cluster communication.
+Introduction
+============
 
-For more information about TIPC, see http://tipc.sourceforge.net.
+TIPC (Transparent Inter Process Communication) is a protocol that is specially
+designed for intra-cluster communication. It can be configured to transmit
+messages either on UDP or directly across Ethernet. Message delivery is
+sequence guaranteed, loss free and flow controlled. Latency times are shorter
+than with any other known protocol, while maximal throughput is comparable to
+that of TCP.
+
+TIPC Features
+-------------
+
+- Cluster wide IPC service
+
+  Have you ever wished you had the convenience of Unix Domain Sockets even when
+  transmitting data between cluster nodes? Where you yourself determine the
+  addresses you want to bind to and use? Where you don't have to perform DNS
+  lookups and worry about IP addresses? Where you don't have to start timers
+  to monitor the continuous existence of peer sockets? And yet without the
+  downsides of that socket type, such as the risk of lingering inodes?
+
+  Welcome to the Transparent Inter Process Communication service, TIPC in short,
+  which gives you all of this, and a lot more.
+
+- Service Addressing
+
+  A fundamental concept in TIPC is that of Service Addressing which makes it
+  possible for a programmer to chose his own address, bind it to a server
+  socket and let client programs use only that address for sending messages.
+
+- Service Tracking
+
+  A client wanting to wait for the availability of a server, uses the Service
+  Tracking mechanism to subscribe for binding and unbinding/close events for
+  sockets with the associated service address.
+
+  The service tracking mechanism can also be used for Cluster Topology Tracking,
+  i.e., subscribing for availability/non-availability of cluster nodes.
+
+  Likewise, the service tracking mechanism can be used for Cluster Connectivity
+  Tracking, i.e., subscribing for up/down events for individual links between
+  cluster nodes.
+
+- Transmission Modes
+
+  Using a service address, a client can send datagram messages to a server socket.
+
+  Using the same address type, it can establish a connection towards an accepting
+  server socket.
+
+  It can also use a service address to create and join a Communication Group,
+  which is the TIPC manifestation of a brokerless message bus.
+
+  Multicast with very good performance and scalability is available both in
+  datagram mode and in communication group mode.
+
+- Inter Node Links
+
+  Communication between any two nodes in a cluster is maintained by one or two
+  Inter Node Links, which both guarantee data traffic integrity and monitor
+  the peer node's availability.
+
+- Cluster Scalability
+
+  By applying the Overlapping Ring Monitoring algorithm on the inter node links
+  it is possible to scale TIPC clusters up to 1000 nodes with a maintained
+  neighbor failure discovery time of 1-2 seconds. For smaller clusters this
+  time can be made much shorter.
+
+- Neighbor Discovery
+
+  Neighbor Node Discovery in the cluster is done by Ethernet broadcast or UDP
+  multicast, when any of those services are available. If not, configured peer
+  IP addresses can be used.
+
+- Configuration
+
+  When running TIPC in single node mode no configuration whatsoever is needed.
+  When running in cluster mode TIPC must as a minimum be given a node address
+  (before Linux 4.17) and told which interface to attach to. The "tipc"
+  configuration tool makes is possible to add and maintain many more
+  configuration parameters.
+
+- Performance
+
+  TIPC message transfer latency times are better than in any other known protocol.
+  Maximal byte throughput for inter-node connections is still somewhat lower than
+  for TCP, while they are superior for intra-node and inter-container throughput
+  on the same host.
+
+- Language Support
+
+  The TIPC user API has support for C, Python, Perl, Ruby, D and Go.
+
+More Information
+----------------
+
+- How to set up TIPC:
+
+  http://tipc.io/getting_started.html
+
+- How to program with TIPC:
+
+  http://tipc.io/programming.html
+
+- How to contribute to TIPC:
+
+- http://tipc.io/contacts.html
+
+- More details about TIPC specification:
+
+  http://tipc.io/protocol.html
+
+
+Implementation
+==============
+
+TIPC is implemented as a kernel module in net/tipc/ directory.
 
 TIPC Base Types
 ---------------
index b71e09f..f99be80 100644 (file)
@@ -191,7 +191,7 @@ Documentation written by Tom Zanussi
                                 with the event, in nanoseconds.  May be
                                modified by .usecs to have timestamps
                                interpreted as microseconds.
-    cpu                    int  the cpu on which the event occurred.
+    common_cpu             int  the cpu on which the event occurred.
     ====================== ==== =======================================
 
 Extended error information
index 229629e..4a6ed02 100644 (file)
@@ -47,7 +47,7 @@
 (顺便说一句,值得注意的是,合并窗口期间集成的更改并不是凭空产生的;它们是经
 提前收集、测试和分级的。稍后将详细描述该过程的工作方式。)
 
-合并窗口持续大约两周。在这段时间结束时,LinusTorvalds将声明窗口已关闭,并
+合并窗口持续大约两周。在这段时间结束时,Linus Torvalds将声明窗口已关闭,并
 释放第一个“rc”内核。例如,对于目标为5.6的内核,在合并窗口结束时发生的释放
 将被称为5.6-rc1。-rc1 版本是一个信号,表示合并新特性的时间已经过去,稳定下一
 个内核的时间已经到来。
@@ -168,7 +168,7 @@ Greg Kroah-Hartman领导。稳定团队将使用5.x.y编号方案不定期地发
 补丁如何进入内核
 ----------------
 
-只有一个人可以将补丁合并到主线内核存储库中:LinusTorvalds。但是,在进入
+只有一个人可以将补丁合并到主线内核存储库中:Linus Torvalds。但是,在进入
 2.6.38内核的9500多个补丁中,只有112个(大约1.3%)是由Linus自己直接选择的。
 内核项目已经发展到一个没有一个开发人员可以在没有支持的情况下检查和选择每个
 补丁的规模。内核开发人员处理这种增长的方式是使用围绕信任链构建的助理系统。
index c7b165c..dae68e6 100644 (file)
@@ -855,7 +855,7 @@ in-kernel irqchip (GIC), and for in-kernel irqchip can tell the GIC to
 use PPIs designated for specific cpus.  The irq field is interpreted
 like this::
 
 bits:  |  31 ... 28  | 27 ... 24 | 23  ... 16 | 15 ... 0 |
 bits:  |  31 ... 28  | 27 ... 24 | 23  ... 16 | 15 ... 0 |
   field: | vcpu2_index | irq_type  | vcpu_index |  irq_id  |
 
 The irq_type field has the following values:
@@ -2149,10 +2149,10 @@ prior to calling the KVM_RUN ioctl.
 Errors:
 
   ======   ============================================================
 ENOENT   no such register
 EINVAL   invalid register ID, or no such register or used with VMs in
 ENOENT   no such register
 EINVAL   invalid register ID, or no such register or used with VMs in
            protected virtualization mode on s390
 EPERM    (arm64) register access not allowed before vcpu finalization
 EPERM    (arm64) register access not allowed before vcpu finalization
   ======   ============================================================
 
 (These error codes are indicative only: do not rely on a specific error
@@ -2590,10 +2590,10 @@ following id bit patterns::
 Errors include:
 
   ======== ============================================================
 ENOENT   no such register
 EINVAL   invalid register ID, or no such register or used with VMs in
 ENOENT   no such register
 EINVAL   invalid register ID, or no such register or used with VMs in
            protected virtualization mode on s390
 EPERM    (arm64) register access not allowed before vcpu finalization
 EPERM    (arm64) register access not allowed before vcpu finalization
   ======== ============================================================
 
 (These error codes are indicative only: do not rely on a specific error
@@ -3112,13 +3112,13 @@ current state.  "addr" is ignored.
 Errors:
 
   ======     =================================================================
 EINVAL     the target is unknown, or the combination of features is invalid.
 ENOENT     a features bit specified is unknown.
 EINVAL     the target is unknown, or the combination of features is invalid.
 ENOENT     a features bit specified is unknown.
   ======     =================================================================
 
 This tells KVM what type of CPU to present to the guest, and what
-optional features it should have.  This will cause a reset of the cpu
-registers to their initial values.  If this is not called, KVM_RUN will
+optional features it should have.  This will cause a reset of the cpu
+registers to their initial values.  If this is not called, KVM_RUN will
 return ENOEXEC for that vcpu.
 
 The initial values are defined as:
@@ -3239,8 +3239,8 @@ VCPU matching underlying host.
 Errors:
 
   =====      ==============================================================
 E2BIG      the reg index list is too big to fit in the array specified by
            the user (the number required will be written into n).
 E2BIG      the reg index list is too big to fit in the array specified by
            the user (the number required will be written into n).
   =====      ==============================================================
 
 ::
@@ -3288,7 +3288,7 @@ specific device.
 ARM/arm64 divides the id field into two parts, a device id and an
 address type id specific to the individual device::
 
 bits:  | 63        ...       32 | 31    ...    16 | 15    ...    0 |
 bits:  | 63        ...       32 | 31    ...    16 | 15    ...    0 |
   field: |        0x00000000      |     device id   |  addr type id  |
 
 ARM/arm64 currently only require this when using the in-kernel GIC
@@ -7049,7 +7049,7 @@ In combination with KVM_CAP_X86_USER_SPACE_MSR, this allows user space to
 trap and emulate MSRs that are outside of the scope of KVM as well as
 limit the attack surface on KVM's MSR emulation code.
 
-8.28 KVM_CAP_ENFORCE_PV_CPUID
+8.28 KVM_CAP_ENFORCE_PV_FEATURE_CPUID
 -----------------------------
 
 Architectures: x86
index 45a81b8..869cad3 100644 (file)
@@ -392,7 +392,7 @@ Section 8 -- Interpretation.
 Creative Commons is not a party to its public
 licenses. Notwithstanding, Creative Commons may elect to apply one of
 its public licenses to material it publishes and in those instances
-will be considered the “Licensor.” The text of the Creative Commons
+will be considered the "Licensor." The text of the Creative Commons
 public licenses is dedicated to the public domain under the CC0 Public
 Domain Dedication. Except for the limited purpose of indicating that
 material is shared under a Creative Commons public license or as
index a61f4f3..65d34f1 100644 (file)
@@ -445,7 +445,7 @@ F:  drivers/platform/x86/wmi.c
 F:     include/uapi/linux/wmi.h
 
 ACRN HYPERVISOR SERVICE MODULE
-M:     Shuo Liu <shuo.a.liu@intel.com>
+M:     Fei Li <fei1.li@intel.com>
 L:     acrn-dev@lists.projectacrn.org (subscribers-only)
 S:     Supported
 W:     https://projectacrn.org
@@ -933,6 +933,7 @@ F:  drivers/video/fbdev/geode/
 
 AMD IOMMU (AMD-VI)
 M:     Joerg Roedel <joro@8bytes.org>
+R:     Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
 L:     iommu@lists.linux-foundation.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
@@ -1487,7 +1488,7 @@ M:        Miquel Raynal <miquel.raynal@bootlin.com@bootlin.com>
 M:     Naga Sureshkumar Relli <nagasure@xilinx.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-F:     Documentation/devicetree/bindings/mtd/arm,pl353-smc.yaml
+F:     Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml
 F:     drivers/memory/pl353-smc.c
 
 ARM PRIMECELL CLCD PL110 DRIVER
@@ -2009,10 +2010,12 @@ M:      Krzysztof Halasa <khalasa@piap.pl>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     Documentation/devicetree/bindings/arm/intel-ixp4xx.yaml
+F:     Documentation/devicetree/bindings/bus/intel,ixp4xx-expansion-bus-controller.yaml
 F:     Documentation/devicetree/bindings/gpio/intel,ixp4xx-gpio.txt
 F:     Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
 F:     Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
 F:     arch/arm/mach-ixp4xx/
+F:     drivers/bus/intel-ixp4xx-eb.c
 F:     drivers/clocksource/timer-ixp4xx.c
 F:     drivers/crypto/ixp4xx_crypto.c
 F:     drivers/gpio/gpio-ixp4xx.c
@@ -7857,9 +7860,9 @@ S:        Maintained
 F:     drivers/input/touchscreen/goodix.c
 
 GOOGLE ETHERNET DRIVERS
-M:     Catherine Sullivan <csully@google.com>
-R:     Sagi Shahar <sagis@google.com>
-R:     Jon Olson <jonolson@google.com>
+M:     Jeroen de Borst <jeroendb@google.com>
+R:     Catherine Sullivan <csully@google.com>
+R:     David Awogbemila <awogbemila@google.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/networking/device_drivers/ethernet/google/gve.rst
@@ -11326,6 +11329,12 @@ W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
 F:     drivers/media/radio/radio-maxiradio*
 
+MCAB MICROCHIP CAN BUS ANALYZER TOOL DRIVER
+R:     Yasushi SHOJI <yashi@spacecubics.com>
+L:     linux-can@vger.kernel.org
+S:     Maintained
+F:     drivers/net/can/usb/mcba_usb.c
+
 MCAN MMIO DEVICE DRIVER
 M:     Chandrasekar Ramakrishnan <rcsekar@samsung.com>
 L:     linux-can@vger.kernel.org
@@ -11757,6 +11766,7 @@ F:      drivers/char/hw_random/mtk-rng.c
 MEDIATEK SWITCH DRIVER
 M:     Sean Wang <sean.wang@mediatek.com>
 M:     Landen Chao <Landen.Chao@mediatek.com>
+M:     DENG Qingfang <dqfext@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/dsa/mt7530.*
@@ -15009,6 +15019,13 @@ F:     drivers/net/phy/dp83640*
 F:     drivers/ptp/*
 F:     include/linux/ptp_cl*
 
+PTP VIRTUAL CLOCK SUPPORT
+M:     Yangbo Lu <yangbo.lu@nxp.com>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/ptp/ptp_vclock.c
+F:     net/ethtool/phc_vclocks.c
+
 PTRACE SUPPORT
 M:     Oleg Nesterov <oleg@redhat.com>
 S:     Maintained
@@ -15459,6 +15476,8 @@ M:      Pan, Xinhui <Xinhui.Pan@amd.com>
 L:     amd-gfx@lists.freedesktop.org
 S:     Supported
 T:     git https://gitlab.freedesktop.org/agd5f/linux.git
+B:     https://gitlab.freedesktop.org/drm/amd/-/issues
+C:     irc://irc.oftc.net/radeon
 F:     drivers/gpu/drm/amd/
 F:     drivers/gpu/drm/radeon/
 F:     include/uapi/drm/amdgpu_drm.h
@@ -17940,6 +17959,7 @@ F:      drivers/regulator/scmi-regulator.c
 F:     drivers/reset/reset-scmi.c
 F:     include/linux/sc[mp]i_protocol.h
 F:     include/trace/events/scmi.h
+F:     include/uapi/linux/virtio_scmi.h
 
 SYSTEM RESET/SHUTDOWN DRIVERS
 M:     Sebastian Reichel <sre@kernel.org>
@@ -19114,7 +19134,7 @@ M:      Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/phy/hisilicon,hi3670-usb3.yaml
-F:     drivers/phy/hisilicon/phy-kirin970-usb3.c
+F:     drivers/phy/hisilicon/phy-hi3670-usb3.c
 
 USB ISP116X DRIVER
 M:     Olav Kongas <ok@artecdesign.ee>
@@ -19792,6 +19812,14 @@ L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/ptp/ptp_vmw.c
 
+VMWARE VMCI DRIVER
+M:     Jorgen Hansen <jhansen@vmware.com>
+M:     Vishnu Dasa <vdasa@vmware.com>
+L:     linux-kernel@vger.kernel.org
+L:     pv-drivers@vmware.com (private)
+S:     Maintained
+F:     drivers/misc/vmw_vmci/
+
 VMWARE VMMOUSE SUBDRIVER
 M:     "VMware Graphics" <linux-graphics-maintainer@vmware.com>
 M:     "VMware, Inc." <pv-drivers@vmware.com>
index c3f9bd1..27a072c 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 14
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc4
 NAME = Opossums on Parade
 
 # *DOCUMENTATION*
@@ -728,11 +728,12 @@ $(KCONFIG_CONFIG):
 # This exploits the 'multi-target pattern rule' trick.
 # The syncconfig should be executed only once to make all the targets.
 # (Note: use the grouped target '&:' when we bump to GNU Make 4.3)
-quiet_cmd_syncconfig = SYNC    $@
-      cmd_syncconfig = $(MAKE) -f $(srctree)/Makefile syncconfig
-
+#
+# Do not use $(call cmd,...) here. That would suppress prompts from syncconfig,
+# so you cannot notice that Kconfig is waiting for the user input.
 %/config/auto.conf %/config/auto.conf.cmd %/generated/autoconf.h: $(KCONFIG_CONFIG)
-       +$(call cmd,syncconfig)
+       $(Q)$(kecho) "  SYNC    $@"
+       $(Q)$(MAKE) -f $(srctree)/Makefile syncconfig
 else # !may-sync-config
 # External modules and some install targets need include/generated/autoconf.h
 # and include/config/auto.conf but do not care if they are up-to-date.
@@ -802,7 +803,7 @@ else
 # Warn about unmarked fall-throughs in switch statement.
 # Disabled for clang while comment to attribute conversion happens and
 # https://github.com/ClangBuiltLinux/linux/issues/636 is discussed.
-KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,)
+KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough=5,)
 endif
 
 # These warnings generated too much noise in a regular build.
index 77d3280..6c50877 100644 (file)
@@ -14,7 +14,6 @@ config ALPHA
        select PCI_SYSCALL if PCI
        select HAVE_AOUT
        select HAVE_ASM_MODVERSIONS
-       select HAVE_IDE
        select HAVE_PCSPKR_PLATFORM
        select HAVE_PERF_EVENTS
        select NEED_DMA_MAP_STATE
@@ -532,7 +531,7 @@ config SMP
          will run faster if you say N here.
 
          See also the SMP-HOWTO available at
-         <http://www.tldp.org/docs.html#howto>.
+         <https://www.tldp.org/docs.html#howto>.
 
          If you don't know what to do here, say N.
 
index 00266e6..b4faba2 100644 (file)
@@ -23,7 +23,7 @@
 #include "ksize.h"
 
 extern unsigned long switch_to_osf_pal(unsigned long nr,
-       struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa,
+       struct pcb_struct *pcb_va, struct pcb_struct *pcb_pa,
        unsigned long *vptb);
 
 extern void move_stack(unsigned long new_stack);
index 43af718..90a2b34 100644 (file)
@@ -200,7 +200,7 @@ extern char _end;
        START_ADDR      KSEG address of the entry point of kernel code.
 
        ZERO_PGE        KSEG address of page full of zeroes, but 
-                       upon entry to kerne cvan be expected
+                       upon entry to kernel, it can be expected
                        to hold the parameter list and possible
                        INTRD information.
 
index d651922..325d4dd 100644 (file)
@@ -30,7 +30,7 @@ extern long srm_printk(const char *, ...)
      __attribute__ ((format (printf, 1, 2)));
 
 /*
- * gzip delarations
+ * gzip declarations
  */
 #define OF(args)  args
 #define STATIC static
index dd2dd9f..7f1ca30 100644 (file)
@@ -70,3 +70,4 @@ CONFIG_DEBUG_INFO=y
 CONFIG_ALPHA_LEGACY_START_ADDRESS=y
 CONFIG_MATHEMU=y
 CONFIG_CRYPTO_HMAC=y
+CONFIG_DEVTMPFS=y
index 5159ba2..ae64595 100644 (file)
@@ -4,15 +4,4 @@
 
 #include <uapi/asm/compiler.h>
 
-/* Some idiots over in <linux/compiler.h> thought inline should imply
-   always_inline.  This breaks stuff.  We'll include this file whenever
-   we run into such problems.  */
-
-#include <linux/compiler.h>
-#undef inline
-#undef __inline__
-#undef __inline
-#undef __always_inline
-#define __always_inline                inline __attribute__((always_inline))
-
 #endif /* __ALPHA_COMPILER_H */
index 11c688c..f21baba 100644 (file)
@@ -9,4 +9,10 @@ static inline int syscall_get_arch(struct task_struct *task)
        return AUDIT_ARCH_ALPHA;
 }
 
+static inline long syscall_get_return_value(struct task_struct *task,
+                                           struct pt_regs *regs)
+{
+       return regs->r0;
+}
+
 #endif /* _ASM_ALPHA_SYSCALL_H */
index d5367a1..d31167e 100644 (file)
@@ -834,7 +834,7 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
                        return -EFAULT;
                state = &current_thread_info()->ieee_state;
 
-               /* Update softare trap enable bits.  */
+               /* Update software trap enable bits.  */
                *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK);
 
                /* Update the real fpcr.  */
@@ -854,7 +854,7 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
                state = &current_thread_info()->ieee_state;
                exc &= IEEE_STATUS_MASK;
 
-               /* Update softare trap enable bits.  */
+               /* Update software trap enable bits.  */
                swcr = (*state & IEEE_SW_MASK) | exc;
                *state |= exc;
 
index e7a59d9..efcf732 100644 (file)
@@ -574,7 +574,7 @@ static void alpha_pmu_start(struct perf_event *event, int flags)
  * Check that CPU performance counters are supported.
  * - currently support EV67 and later CPUs.
  * - actually some later revisions of the EV6 have the same PMC model as the
- *     EV67 but we don't do suffiently deep CPU detection to detect them.
+ *     EV67 but we don't do sufficiently deep CPU detection to detect them.
  *     Bad luck to the very few people who might have one, I guess.
  */
 static int supported_cpu(void)
index ef0c08e..a5123ea 100644 (file)
@@ -256,7 +256,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
                childstack->r26 = (unsigned long) ret_from_kernel_thread;
                childstack->r9 = usp;   /* function */
                childstack->r10 = kthread_arg;
-               childregs->hae = alpha_mv.hae_cache,
+               childregs->hae = alpha_mv.hae_cache;
                childti->pcb.usp = 0;
                return 0;
        }
index 7d56c21..b4fbbba 100644 (file)
@@ -319,18 +319,19 @@ setup_memory(void *kernel_end)
                       i, cluster->usage, cluster->start_pfn,
                       cluster->start_pfn + cluster->numpages);
 
-               /* Bit 0 is console/PALcode reserved.  Bit 1 is
-                  non-volatile memory -- we might want to mark
-                  this for later.  */
-               if (cluster->usage & 3)
-                       continue;
-
                end = cluster->start_pfn + cluster->numpages;
                if (end > max_low_pfn)
                        max_low_pfn = end;
 
                memblock_add(PFN_PHYS(cluster->start_pfn),
                             cluster->numpages << PAGE_SHIFT);
+
+               /* Bit 0 is console/PALcode reserved.  Bit 1 is
+                  non-volatile memory -- we might want to mark
+                  this for later.  */
+               if (cluster->usage & 3)
+                       memblock_reserve(PFN_PHYS(cluster->start_pfn),
+                                        cluster->numpages << PAGE_SHIFT);
        }
 
        /*
index 4b2575f..cb64e47 100644 (file)
@@ -582,7 +582,7 @@ void
 smp_send_stop(void)
 {
        cpumask_t to_whom;
-       cpumask_copy(&to_whom, cpu_possible_mask);
+       cpumask_copy(&to_whom, cpu_online_mask);
        cpumask_clear_cpu(smp_processor_id(), &to_whom);
 #ifdef DEBUG_IPI_MSG
        if (hard_smp_processor_id() != boot_cpu_id)
index 53adf43..96fd6ff 100644 (file)
@@ -212,7 +212,7 @@ nautilus_init_pci(void)
 
        /* Use default IO. */
        pci_add_resource(&bridge->windows, &ioport_resource);
-       /* Irongate PCI memory aperture, calculate requred size before
+       /* Irongate PCI memory aperture, calculate required size before
           setting it up. */
        pci_add_resource(&bridge->windows, &irongate_mem);
 
index 921d4b6..5398f98 100644 (file)
@@ -730,7 +730,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
        long error;
 
        /* Check the UAC bits to decide what the user wants us to do
-          with the unaliged access.  */
+          with the unaligned access.  */
 
        if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
                if (__ratelimit(&ratelimit)) {
index d568cd9..f7cef66 100644 (file)
@@ -65,7 +65,7 @@ static long (*save_emul) (unsigned long pc);
 long do_alpha_fp_emul_imprecise(struct pt_regs *, unsigned long);
 long do_alpha_fp_emul(unsigned long);
 
-int init_module(void)
+static int alpha_fp_emul_init_module(void)
 {
        save_emul_imprecise = alpha_fp_emul_imprecise;
        save_emul = alpha_fp_emul;
@@ -73,12 +73,14 @@ int init_module(void)
        alpha_fp_emul = do_alpha_fp_emul;
        return 0;
 }
+module_init(alpha_fp_emul_init_module);
 
-void cleanup_module(void)
+static void alpha_fp_emul_cleanup_module(void)
 {
        alpha_fp_emul_imprecise = save_emul_imprecise;
        alpha_fp_emul = save_emul;
 }
+module_exit(alpha_fp_emul_cleanup_module);
 
 #undef  alpha_fp_emul_imprecise
 #define alpha_fp_emul_imprecise                do_alpha_fp_emul_imprecise
@@ -401,3 +403,5 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask)
 egress:
        return si_code;
 }
+
+EXPORT_SYMBOL(__udiv_qrnnd);
index 3ea1c41..2fb7012 100644 (file)
@@ -95,7 +95,6 @@ config ARM
        select HAVE_FUNCTION_TRACER if !XIP_KERNEL
        select HAVE_GCC_PLUGINS
        select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
-       select HAVE_IDE if PCI || ISA || PCMCIA
        select HAVE_IRQ_TIME_ACCOUNTING
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZ4
@@ -361,7 +360,6 @@ config ARCH_FOOTBRIDGE
        bool "FootBridge"
        select CPU_SA110
        select FOOTBRIDGE
-       select HAVE_IDE
        select NEED_MACH_IO_H if !MMU
        select NEED_MACH_MEMORY_H
        help
@@ -395,7 +393,7 @@ config ARCH_IXP4XX
        select IXP4XX_IRQ
        select IXP4XX_TIMER
        # With the new PCI driver this is not needed
-       select NEED_MACH_IO_H if PCI_IXP4XX_LEGACY
+       select NEED_MACH_IO_H if IXP4XX_PCI_LEGACY
        select USB_EHCI_BIG_ENDIAN_DESC
        select USB_EHCI_BIG_ENDIAN_MMIO
        help
@@ -430,7 +428,6 @@ config ARCH_PXA
        select GENERIC_IRQ_MULTI_HANDLER
        select GPIO_PXA
        select GPIOLIB
-       select HAVE_IDE
        select IRQ_DOMAIN
        select PLAT_PXA
        select SPARSE_IRQ
@@ -446,7 +443,6 @@ config ARCH_RPC
        select ARM_HAS_SG_CHAIN
        select CPU_SA110
        select FIQ
-       select HAVE_IDE
        select HAVE_PATA_PLATFORM
        select ISA_DMA_API
        select LEGACY_TIMER_TICK
@@ -469,7 +465,6 @@ config ARCH_SA1100
        select CPU_SA1100
        select GENERIC_IRQ_MULTI_HANDLER
        select GPIOLIB
-       select HAVE_IDE
        select IRQ_DOMAIN
        select ISA
        select NEED_MACH_MEMORY_H
@@ -505,7 +500,6 @@ config ARCH_OMAP1
        select GENERIC_IRQ_CHIP
        select GENERIC_IRQ_MULTI_HANDLER
        select GPIOLIB
-       select HAVE_IDE
        select HAVE_LEGACY_CLK
        select IRQ_DOMAIN
        select NEED_MACH_IO_H if PCCARD
index 33e413c..9b4cf5e 100644 (file)
@@ -4,6 +4,7 @@
 #include "aspeed-g5.dtsi"
 #include <dt-bindings/gpio/aspeed-gpio.h>
 #include <dt-bindings/i2c/i2c.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 
 /{
        model = "ASRock E3C246D4I BMC";
@@ -73,7 +74,8 @@
 
 &vuart {
        status = "okay";
-       aspeed,sirq-active-high;
+       aspeed,lpc-io-reg = <0x2f8>;
+       aspeed,lpc-interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
 };
 
 &mac0 {
index d26a9e1..aa24cac 100644 (file)
                reg = <0x69>;
        };
 
-       power-supply@6a {
+       power-supply@6b {
                compatible = "ibm,cffps";
-               reg = <0x6a>;
+               reg = <0x6b>;
        };
 
-       power-supply@6b {
+       power-supply@6d {
                compatible = "ibm,cffps";
-               reg = <0x6b>;
+               reg = <0x6d>;
        };
 };
 
 
 &emmc {
        status = "okay";
+       clk-phase-mmc-hs200 = <180>, <180>;
 };
 
 &fsim0 {
index 941c048..481d0ee 100644 (file)
        /*W0-W7*/       "","","","","","","","",
        /*X0-X7*/       "","","","","","","","",
        /*Y0-Y7*/       "","","","","","","","",
-       /*Z0-Z7*/       "","","","","","","","",
-       /*AA0-AA7*/     "","","","","","","","",
-       /*AB0-AB7*/     "","","","","","","","",
-       /*AC0-AC7*/     "","","","","","","","";
+       /*Z0-Z7*/       "","","","","","","","";
 
        pin_mclr_vpp {
                gpio-hog;
index e863ec0..e33153d 100644 (file)
        /*W0-W7*/       "","","","","","","","",
        /*X0-X7*/       "","","","","","","","",
        /*Y0-Y7*/       "","","","","","","","",
-       /*Z0-Z7*/       "","","","","","","","",
-       /*AA0-AA7*/     "","","","","","","","",
-       /*AB0-AB7*/     "","","","","","","","",
-       /*AC0-AC7*/     "","","","","","","","";
+       /*Z0-Z7*/       "","","","","","","","";
 };
 
 &fmc {
 
 &emmc {
        status = "okay";
+       clk-phase-mmc-hs200 = <36>, <270>;
 };
 
 &fsim0 {
index 8d209c1..9caba45 100644 (file)
                                        clocks = <&clks IMX6Q_CLK_ECSPI5>,
                                                 <&clks IMX6Q_CLK_ECSPI5>;
                                        clock-names = "ipg", "per";
-                                       dmas = <&sdma 11 8 1>, <&sdma 12 8 2>;
+                                       dmas = <&sdma 11 7 1>, <&sdma 12 7 2>;
                                        dma-names = "rx", "tx";
                                        status = "disabled";
                                };
index 82e01ce..89c342f 100644 (file)
                                        clocks = <&clks IMX6QDL_CLK_ECSPI1>,
                                                 <&clks IMX6QDL_CLK_ECSPI1>;
                                        clock-names = "ipg", "per";
-                                       dmas = <&sdma 3 8 1>, <&sdma 4 8 2>;
+                                       dmas = <&sdma 3 7 1>, <&sdma 4 7 2>;
                                        dma-names = "rx", "tx";
                                        status = "disabled";
                                };
                                        clocks = <&clks IMX6QDL_CLK_ECSPI2>,
                                                 <&clks IMX6QDL_CLK_ECSPI2>;
                                        clock-names = "ipg", "per";
-                                       dmas = <&sdma 5 8 1>, <&sdma 6 8 2>;
+                                       dmas = <&sdma 5 7 1>, <&sdma 6 7 2>;
                                        dma-names = "rx", "tx";
                                        status = "disabled";
                                };
                                        clocks = <&clks IMX6QDL_CLK_ECSPI3>,
                                                 <&clks IMX6QDL_CLK_ECSPI3>;
                                        clock-names = "ipg", "per";
-                                       dmas = <&sdma 7 8 1>, <&sdma 8 8 2>;
+                                       dmas = <&sdma 7 7 1>, <&sdma 8 7 2>;
                                        dma-names = "rx", "tx";
                                        status = "disabled";
                                };
                                        clocks = <&clks IMX6QDL_CLK_ECSPI4>,
                                                 <&clks IMX6QDL_CLK_ECSPI4>;
                                        clock-names = "ipg", "per";
-                                       dmas = <&sdma 9 8 1>, <&sdma 10 8 2>;
+                                       dmas = <&sdma 9 7 1>, <&sdma 10 7 2>;
                                        dma-names = "rx", "tx";
                                        status = "disabled";
                                };
index dace8ff..0a4ffd1 100644 (file)
                         * EBI2. This has a 25MHz chrystal next to it, so no
                         * clocking is needed.
                         */
-                       ethernet-ebi2@2,0 {
+                       ethernet@2,0 {
                                compatible = "smsc,lan9221", "smsc,lan9115";
                                reg = <2 0x0 0x100>;
                                /*
                                phy-mode = "mii";
                                reg-io-width = <2>;
                                smsc,force-external-phy;
-                               /* IRQ on edge falling = active low */
-                               smsc,irq-active-low;
                                smsc,irq-push-pull;
 
                                /*
index 37bd41f..151c022 100644 (file)
                #size-cells = <1>;
                ranges;
 
-               vic: intc@10140000 {
+               vic: interrupt-controller@10140000 {
                        compatible = "arm,versatile-vic";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        reg = <0x10140000 0x1000>;
-                       clear-mask = <0xffffffff>;
                        valid-mask = <0xffffffff>;
                };
 
-               sic: intc@10003000 {
+               sic: interrupt-controller@10003000 {
                        compatible = "arm,versatile-sic";
                        interrupt-controller;
                        #interrupt-cells = <1>;
index 06a0fdf..e7e751a 100644 (file)
@@ -7,7 +7,7 @@
 
        amba {
                /* The Versatile PB is using more SIC IRQ lines than the AB */
-               sic: intc@10003000 {
+               sic: interrupt-controller@10003000 {
                        clear-mask = <0xffffffff>;
                        /*
                         * Valid interrupt lines mask according to
index b06e537..4dfe321 100644 (file)
@@ -57,10 +57,7 @@ CONFIG_DRM=y
 CONFIG_DRM_DISPLAY_CONNECTOR=y
 CONFIG_DRM_SIMPLE_BRIDGE=y
 CONFIG_DRM_PL111=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_MATROX=y
-CONFIG_FB_MATROX_MILLENIUM=y
-CONFIG_FB_MATROX_MYSTIQUE=y
+CONFIG_FB=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_LOGO=y
index 52a0400..d9abaae 100644 (file)
@@ -821,7 +821,7 @@ CONFIG_USB_ISP1760=y
 CONFIG_USB_HSIC_USB3503=y
 CONFIG_AB8500_USB=y
 CONFIG_KEYSTONE_USB_PHY=m
-CONFIG_NOP_USB_XCEIV=m
+CONFIG_NOP_USB_XCEIV=y
 CONFIG_AM335X_PHY_USB=m
 CONFIG_TWL6030_USB=m
 CONFIG_USB_GPIO_VBUS=y
index 483c400..4c01e31 100644 (file)
@@ -64,11 +64,9 @@ CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_DRM_DISPLAY_CONNECTOR=y
 CONFIG_DRM_SIMPLE_BRIDGE=y
 CONFIG_DRM_PL111=y
-CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
 CONFIG_SOUND=y
 CONFIG_SND=y
 # CONFIG_SND_DRIVERS is not set
index 66c8b09..d9a27e4 100644 (file)
@@ -135,6 +135,7 @@ CONFIG_DRM_SII902X=y
 CONFIG_DRM_SIMPLE_BRIDGE=y
 CONFIG_DRM_I2C_ADV7511=y
 CONFIG_DRM_I2C_ADV7511_AUDIO=y
+CONFIG_FB=y
 CONFIG_FB_SH_MOBILE_LCDC=y
 CONFIG_BACKLIGHT_PWM=y
 CONFIG_BACKLIGHT_AS3711=y
index dbb1ef6..3b30913 100644 (file)
@@ -61,6 +61,10 @@ CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_ATMEL_MXT=y
 CONFIG_TOUCHSCREEN_BU21013=y
 CONFIG_TOUCHSCREEN_CY8CTMA140=y
+CONFIG_TOUCHSCREEN_CYTTSP_CORE=y
+CONFIG_TOUCHSCREEN_CYTTSP_SPI=y
+CONFIG_TOUCHSCREEN_MMS114=y
+CONFIG_TOUCHSCREEN_ZINITIX=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_AB8500_PONKEY=y
 CONFIG_INPUT_GPIO_VIBRA=y
@@ -100,6 +104,7 @@ CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI=y
 CONFIG_DRM_PANEL_SONY_ACX424AKP=y
 CONFIG_DRM_LIMA=y
 CONFIG_DRM_MCDE=y
+CONFIG_FB=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_KTD253=y
 CONFIG_BACKLIGHT_GPIO=y
index e7ecfb3..b703f47 100644 (file)
@@ -60,7 +60,7 @@ CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_DRM_DISPLAY_CONNECTOR=y
 CONFIG_DRM_SIMPLE_BRIDGE=y
 CONFIG_DRM_PL111=y
-CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_LOGO=y
 CONFIG_SOUND=y
@@ -88,8 +88,6 @@ CONFIG_NFSD=y
 CONFIG_NFSD_V3=y
 CONFIG_NLS_CODEPAGE_850=m
 CONFIG_NLS_ISO8859_1=m
-CONFIG_FONTS=y
-CONFIG_FONT_ACORN_8x8=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
index 4479369..b5e246d 100644 (file)
@@ -11,9 +11,6 @@ CONFIG_CPUSETS=y
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_PROFILING=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
 CONFIG_ARCH_VEXPRESS=y
 CONFIG_ARCH_VEXPRESS_DCSCB=y
 CONFIG_ARCH_VEXPRESS_TC2_PM=y
@@ -23,14 +20,17 @@ CONFIG_MCPM=y
 CONFIG_VMSPLIT_2G=y
 CONFIG_NR_CPUS=8
 CONFIG_ARM_PSCI=y
-CONFIG_CMA=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_CMDLINE="console=ttyAMA0"
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
 CONFIG_NEON=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CMA=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -43,7 +43,6 @@ CONFIG_IP_PNP_BOOTP=y
 CONFIG_NET_9P=y
 CONFIG_NET_9P_VIRTIO=y
 CONFIG_DEVTMPFS=y
-CONFIG_DMA_CMA=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_BLOCK=y
@@ -59,7 +58,6 @@ CONFIG_VIRTIO_BLK=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_SCSI_VIRTIO=y
 CONFIG_ATA=y
-# CONFIG_SATA_PMP is not set
 CONFIG_NETDEVICES=y
 CONFIG_VIRTIO_NET=y
 CONFIG_SMC91X=y
@@ -81,11 +79,9 @@ CONFIG_DRM=y
 CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_DRM_SII902X=y
 CONFIG_DRM_PL111=y
-CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
 CONFIG_SOUND=y
 CONFIG_SND=y
 # CONFIG_SND_DRIVERS is not set
@@ -136,10 +132,11 @@ CONFIG_ROOT_NFS=y
 CONFIG_9P_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_DMA_CMA=y
 CONFIG_DEBUG_INFO=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_DEBUG_USER=y
-# CONFIG_CRYPTO_HW is not set
index de11030..1d3aef8 100644 (file)
@@ -9,7 +9,6 @@ menuconfig ARCH_DAVINCI
        select PM_GENERIC_DOMAINS_OF if PM && OF
        select REGMAP_MMIO
        select RESET_CONTROLLER
-       select HAVE_IDE
        select PINCTRL_SINGLE
 
 if ARCH_DAVINCI
index 71c1d18..d73c7b6 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/of.h>
-#include <linux/omap-gpmc.h>
 
 #include <trace/events/power.h>
 
@@ -81,8 +80,6 @@ static void omap3_core_save_context(void)
 
        /* Save the Interrupt controller context */
        omap_intc_save_context();
-       /* Save the GPMC context */
-       omap3_gpmc_save_context();
        /* Save the system control module context, padconf already save above*/
        omap3_control_save_context();
 }
@@ -91,8 +88,6 @@ static void omap3_core_restore_context(void)
 {
        /* Restore the control module context, padconf restored by h/w */
        omap3_control_restore_context();
-       /* Restore the GPMC context */
-       omap3_gpmc_restore_context();
        /* Restore the interrupt controller context */
        omap_intc_restore_context();
 }
index d23970b..f70fb9c 100644 (file)
@@ -49,6 +49,7 @@ static int __init parse_tag_acorn(const struct tag *tag)
                fallthrough;    /* ??? */
        case 256:
                vram_size += PAGE_SIZE * 256;
+               break;
        default:
                break;
        }
index 6452ebf..b21f51b 100644 (file)
@@ -403,7 +403,7 @@ static const struct platform_suspend_ops tegra_suspend_ops = {
        .enter          = tegra_suspend_enter,
 };
 
-void __init tegra_init_suspend(void)
+void tegra_pm_init_suspend(void)
 {
        enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode();
 
index 81525f5..e63f96d 100644 (file)
@@ -25,10 +25,4 @@ void tegra30_sleep_core_init(void);
 
 extern void (*tegra_tear_down_cpu)(void);
 
-#ifdef CONFIG_PM_SLEEP
-void tegra_init_suspend(void);
-#else
-static inline void tegra_init_suspend(void) {}
-#endif
-
 #endif /* _MACH_TEGRA_PM_H_ */
index c011359..ab5008f 100644 (file)
@@ -84,8 +84,6 @@ static void __init tegra_dt_init(void)
 
 static void __init tegra_dt_init_late(void)
 {
-       tegra_init_suspend();
-
        if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) &&
            of_machine_is_compatible("compal,paz00"))
                tegra_paz00_wifikill_init();
index 897634d..a951276 100644 (file)
@@ -1602,6 +1602,9 @@ exit:
                rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
                emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
                break;
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               break;
        /* ST: *(size *)(dst + off) = imm */
        case BPF_ST | BPF_MEM | BPF_W:
        case BPF_ST | BPF_MEM | BPF_H:
index e07e7de..b5b13a9 100644 (file)
@@ -1605,7 +1605,8 @@ config ARM64_BTI_KERNEL
        depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
        # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
        depends on !CC_IS_GCC || GCC_VERSION >= 100100
-       depends on !(CC_IS_CLANG && GCOV_KERNEL)
+       # https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9
+       depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
        depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
        help
          Build the kernel with Branch Target Identification annotations
index 9f7c7f5..f4eaab3 100644 (file)
                        };
 
                        flexcan1: can@308c0000 {
-                               compatible = "fsl,imx8mp-flexcan", "fsl,imx6q-flexcan";
+                               compatible = "fsl,imx8mp-flexcan";
                                reg = <0x308c0000 0x10000>;
                                interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX8MP_CLK_IPG_ROOT>,
                        };
 
                        flexcan2: can@308d0000 {
-                               compatible = "fsl,imx8mp-flexcan", "fsl,imx6q-flexcan";
+                               compatible = "fsl,imx8mp-flexcan";
                                reg = <0x308d0000 0x10000>;
                                interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX8MP_CLK_IPG_ROOT>,
                        eqos: ethernet@30bf0000 {
                                compatible = "nxp,imx8mp-dwmac-eqos", "snps,dwmac-5.10a";
                                reg = <0x30bf0000 0x10000>;
-                               interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
-                                            <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
-                               interrupt-names = "eth_wake_irq", "macirq";
+                               interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupt-names = "macirq", "eth_wake_irq";
                                clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>,
                                         <&clk IMX8MP_CLK_QOS_ENET_ROOT>,
                                         <&clk IMX8MP_CLK_ENET_QOS_TIMER>,
index b7d5328..076d5ef 100644 (file)
                                 <&bpmp TEGRA194_CLK_XUSB_SS>,
                                 <&bpmp TEGRA194_CLK_XUSB_FS>;
                        clock-names = "dev", "ss", "ss_src", "fs_src";
+                       interconnects = <&mc TEGRA194_MEMORY_CLIENT_XUSB_DEVR &emc>,
+                                       <&mc TEGRA194_MEMORY_CLIENT_XUSB_DEVW &emc>;
+                       interconnect-names = "dma-mem", "write";
+                       iommus = <&smmu TEGRA194_SID_XUSB_DEV>;
                        power-domains = <&bpmp TEGRA194_POWER_DOMAIN_XUSBB>,
                                        <&bpmp TEGRA194_POWER_DOMAIN_XUSBA>;
                        power-domain-names = "dev", "ss";
                                      "xusb_ss", "xusb_ss_src", "xusb_hs_src",
                                      "xusb_fs_src", "pll_u_480m", "clk_m",
                                      "pll_e";
+                       interconnects = <&mc TEGRA194_MEMORY_CLIENT_XUSB_HOSTR &emc>,
+                                       <&mc TEGRA194_MEMORY_CLIENT_XUSB_HOSTW &emc>;
+                       interconnect-names = "dma-mem", "write";
+                       iommus = <&smmu TEGRA194_SID_XUSB_HOST>;
 
                        power-domains = <&bpmp TEGRA194_POWER_DOMAIN_XUSBC>,
                                        <&bpmp TEGRA194_POWER_DOMAIN_XUSBA>;
                 * for 8x and 11.025x sample rate streams.
                 */
                assigned-clock-rates = <258000000>;
+
+               interconnects = <&mc TEGRA194_MEMORY_CLIENT_APEDMAR &emc>,
+                               <&mc TEGRA194_MEMORY_CLIENT_APEDMAW &emc>;
+               interconnect-names = "dma-mem", "write";
+               iommus = <&smmu TEGRA194_SID_APE>;
        };
 
        tcu: tcu {
index 0686923..51e1709 100644 (file)
        status = "okay";
        extcon = <&usb2_id>;
 
-       usb@7600000 {
+       dwc3@7600000 {
                extcon = <&usb2_id>;
                dr_mode = "otg";
                maximum-speed = "high-speed";
        status = "okay";
        extcon = <&usb3_id>;
 
-       usb@6a00000 {
+       dwc3@6a00000 {
                extcon = <&usb3_id>;
                dr_mode = "otg";
        };
index 95d6cb8..f39bc10 100644 (file)
                        resets = <&gcc GCC_USB0_BCR>;
                        status = "disabled";
 
-                       dwc_0: usb@8a00000 {
+                       dwc_0: dwc3@8a00000 {
                                compatible = "snps,dwc3";
                                reg = <0x8a00000 0xcd00>;
                                interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
                        resets = <&gcc GCC_USB1_BCR>;
                        status = "disabled";
 
-                       dwc_1: usb@8c00000 {
+                       dwc_1: dwc3@8c00000 {
                                compatible = "snps,dwc3";
                                reg = <0x8c00000 0xcd00>;
                                interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
index 0e1bc46..78c55ca 100644 (file)
                        power-domains = <&gcc USB30_GDSC>;
                        status = "disabled";
 
-                       usb@6a00000 {
+                       dwc3@6a00000 {
                                compatible = "snps,dwc3";
                                reg = <0x06a00000 0xcc00>;
                                interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>;
                        qcom,select-utmi-as-pipe-clk;
                        status = "disabled";
 
-                       usb@7600000 {
+                       dwc3@7600000 {
                                compatible = "snps,dwc3";
                                reg = <0x07600000 0xcc00>;
                                interrupts = <0 138 IRQ_TYPE_LEVEL_HIGH>;
index 6f294f9..e9d3ce2 100644 (file)
 
                        resets = <&gcc GCC_USB_30_BCR>;
 
-                       usb3_dwc3: usb@a800000 {
+                       usb3_dwc3: dwc3@a800000 {
                                compatible = "snps,dwc3";
                                reg = <0x0a800000 0xcd00>;
                                interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
index f8a5530..a80c578 100644 (file)
 &usb3 {
        status = "okay";
 
-       usb@7580000 {
+       dwc3@7580000 {
                dr_mode = "host";
        };
 };
index 9c4be02..339790b 100644 (file)
                        assigned-clock-rates = <19200000>, <200000000>;
                        status = "disabled";
 
-                       usb@7580000 {
+                       dwc3@7580000 {
                                compatible = "snps,dwc3";
                                reg = <0x07580000 0xcd00>;
                                interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
                        assigned-clock-rates = <19200000>, <133333333>;
                        status = "disabled";
 
-                       usb@78c0000 {
+                       dwc3@78c0000 {
                                compatible = "snps,dwc3";
                                reg = <0x078c0000 0xcc00>;
                                interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
index a5d58eb..a9a052f 100644 (file)
                                        <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3 0>;
                        interconnect-names = "usb-ddr", "apps-usb";
 
-                       usb_1_dwc3: usb@a600000 {
+                       usb_1_dwc3: dwc3@a600000 {
                                compatible = "snps,dwc3";
                                reg = <0 0x0a600000 0 0xe000>;
                                interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
index 1796ae8..0a86fe7 100644 (file)
                                        <&gladiator_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_0 0>;
                        interconnect-names = "usb-ddr", "apps-usb";
 
-                       usb_1_dwc3: usb@a600000 {
+                       usb_1_dwc3: dwc3@a600000 {
                                compatible = "snps,dwc3";
                                reg = <0 0x0a600000 0 0xcd00>;
                                interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
                                        <&gladiator_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_USB3_1 0>;
                        interconnect-names = "usb-ddr", "apps-usb";
 
-                       usb_2_dwc3: usb@a800000 {
+                       usb_2_dwc3: dwc3@a800000 {
                                compatible = "snps,dwc3";
                                reg = <0 0x0a800000 0 0xcd00>;
                                interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
index 612dda0..eef9d79 100644 (file)
 
                        resets = <&gcc GCC_USB30_PRIM_BCR>;
 
-                       usb_1_dwc3: usb@a600000 {
+                       usb_1_dwc3: dwc3@a600000 {
                                compatible = "snps,dwc3";
                                reg = <0 0x0a600000 0 0xcd00>;
                                interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
index 734c8ad..01482d2 100644 (file)
                                     <GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "eri", "rxi", "txi",
                                          "bri", "dri", "tei";
-                       clocks = <&cpg CPG_MOD R9A07G044_CLK_SCIF0>;
+                       clocks = <&cpg CPG_MOD R9A07G044_SCIF0_CLK_PCK>;
                        clock-names = "fck";
                        power-domains = <&cpg>;
-                       resets = <&cpg R9A07G044_CLK_SCIF0>;
+                       resets = <&cpg R9A07G044_SCIF0_RST_SYSTEM_N>;
                        status = "disabled";
                };
 
index a9c0716..a074459 100644 (file)
@@ -47,7 +47,7 @@
  * cache before the transfer is done, causing old data to be seen by
  * the CPU.
  */
-#define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN      (128)
 
 #ifdef CONFIG_KASAN_SW_TAGS
 #define ARCH_SLAB_MINALIGN     (1ULL << KASAN_SHADOW_SCALE_SHIFT)
index 99ad77d..97ddc6c 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/cpumask.h>
 
+#include <asm/smp.h>
 #include <asm/types.h>
 
 struct mpidr_hash {
index cce3085..3f1490b 100644 (file)
@@ -17,7 +17,7 @@ CFLAGS_syscall.o      += -fno-stack-protector
 # It's not safe to invoke KCOV when portions of the kernel environment aren't
 # available or are out-of-sync with HW state. Since `noinstr` doesn't always
 # inhibit KCOV instrumentation, disable it for the entire compilation unit.
-KCOV_INSTRUMENT_entry.o := n
+KCOV_INSTRUMENT_entry-common.o := n
 KCOV_INSTRUMENT_idle.o := n
 
 # Object file lists.
index 125d5c9..0ead8bf 100644 (file)
@@ -81,6 +81,7 @@
 #include <asm/mmu_context.h>
 #include <asm/mte.h>
 #include <asm/processor.h>
+#include <asm/smp.h>
 #include <asm/sysreg.h>
 #include <asm/traps.h>
 #include <asm/virt.h>
index 12ce14a..db8b2e2 100644 (file)
@@ -604,7 +604,7 @@ asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
        __el0_fiq_handler_common(regs);
 }
 
-static void __el0_error_handler_common(struct pt_regs *regs)
+static void noinstr __el0_error_handler_common(struct pt_regs *regs)
 {
        unsigned long esr = read_sysreg(esr_el1);
 
index 69b3fde..36f51b0 100644 (file)
@@ -193,18 +193,6 @@ void mte_check_tfsr_el1(void)
 }
 #endif
 
-static void update_gcr_el1_excl(u64 excl)
-{
-
-       /*
-        * Note that the mask controlled by the user via prctl() is an
-        * include while GCR_EL1 accepts an exclude mask.
-        * No need for ISB since this only affects EL0 currently, implicit
-        * with ERET.
-        */
-       sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
-}
-
 static void set_gcr_el1_excl(u64 excl)
 {
        current->thread.gcr_user_excl = excl;
@@ -265,7 +253,8 @@ void mte_suspend_exit(void)
        if (!system_supports_mte())
                return;
 
-       update_gcr_el1_excl(gcr_kernel_excl);
+       sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, gcr_kernel_excl);
+       isb();
 }
 
 long set_mte_ctrl(struct task_struct *task, unsigned long arg)
index d3d37f9..4873811 100644 (file)
@@ -32,20 +32,23 @@ SYM_FUNC_END(__arm_smccc_sve_check)
 EXPORT_SYMBOL(__arm_smccc_sve_check)
 
        .macro SMCCC instr
+       stp     x29, x30, [sp, #-16]!
+       mov     x29, sp
 alternative_if ARM64_SVE
        bl      __arm_smccc_sve_check
 alternative_else_nop_endif
        \instr  #0
-       ldr     x4, [sp]
+       ldr     x4, [sp, #16]
        stp     x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
        stp     x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
-       ldr     x4, [sp, #8]
+       ldr     x4, [sp, #24]
        cbz     x4, 1f /* no quirk structure */
        ldr     x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS]
        cmp     x9, #ARM_SMCCC_QUIRK_QCOM_A6
        b.ne    1f
        str     x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS]
-1:     ret
+1:     ldp     x29, x30, [sp], #16
+       ret
        .endm
 
 /*
index 3155c9e..0625bf2 100644 (file)
@@ -947,7 +947,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                vma_shift = get_vma_page_shift(vma, hva);
        }
 
-       shared = (vma->vm_flags & VM_PFNMAP);
+       shared = (vma->vm_flags & VM_SHARED);
 
        switch (vma_shift) {
 #ifndef __PAGETABLE_PMD_FOLDED
index 95cd62d..2cf999e 100644 (file)
@@ -29,7 +29,7 @@
        .endm
 
        .macro ldrh1 reg, ptr, val
-       user_ldst 9998f, ldtrh, \reg, \ptr, \val
+       user_ldst 9997f, ldtrh, \reg, \ptr, \val
        .endm
 
        .macro strh1 reg, ptr, val
@@ -37,7 +37,7 @@
        .endm
 
        .macro ldr1 reg, ptr, val
-       user_ldst 9998f, ldtr, \reg, \ptr, \val
+       user_ldst 9997f, ldtr, \reg, \ptr, \val
        .endm
 
        .macro str1 reg, ptr, val
@@ -45,7 +45,7 @@
        .endm
 
        .macro ldp1 reg1, reg2, ptr, val
-       user_ldp 9998f, \reg1, \reg2, \ptr, \val
+       user_ldp 9997f, \reg1, \reg2, \ptr, \val
        .endm
 
        .macro stp1 reg1, reg2, ptr, val
        .endm
 
 end    .req    x5
+srcin  .req    x15
 SYM_FUNC_START(__arch_copy_from_user)
        add     end, x0, x2
+       mov     srcin, x1
 #include "copy_template.S"
        mov     x0, #0                          // Nothing to copy
        ret
@@ -63,6 +65,11 @@ EXPORT_SYMBOL(__arch_copy_from_user)
 
        .section .fixup,"ax"
        .align  2
+9997:  cmp     dst, dstin
+       b.ne    9998f
+       // Before being absolutely sure we couldn't copy anything, try harder
+USER(9998f, ldtrb tmp1w, [srcin])
+       strb    tmp1w, [dst], #1
 9998:  sub     x0, end, dst                    // bytes not copied
        ret
        .previous
index 1f61cd0..dbea379 100644 (file)
        .endm
 
        .macro ldrh1 reg, ptr, val
-       user_ldst 9998f, ldtrh, \reg, \ptr, \val
+       user_ldst 9997f, ldtrh, \reg, \ptr, \val
        .endm
 
        .macro strh1 reg, ptr, val
-       user_ldst 9998f, sttrh, \reg, \ptr, \val
+       user_ldst 9997f, sttrh, \reg, \ptr, \val
        .endm
 
        .macro ldr1 reg, ptr, val
-       user_ldst 9998f, ldtr, \reg, \ptr, \val
+       user_ldst 9997f, ldtr, \reg, \ptr, \val
        .endm
 
        .macro str1 reg, ptr, val
-       user_ldst 9998f, sttr, \reg, \ptr, \val
+       user_ldst 9997f, sttr, \reg, \ptr, \val
        .endm
 
        .macro ldp1 reg1, reg2, ptr, val
-       user_ldp 9998f, \reg1, \reg2, \ptr, \val
+       user_ldp 9997f, \reg1, \reg2, \ptr, \val
        .endm
 
        .macro stp1 reg1, reg2, ptr, val
-       user_stp 9998f, \reg1, \reg2, \ptr, \val
+       user_stp 9997f, \reg1, \reg2, \ptr, \val
        .endm
 
 end    .req    x5
-
+srcin  .req    x15
 SYM_FUNC_START(__arch_copy_in_user)
        add     end, x0, x2
+       mov     srcin, x1
 #include "copy_template.S"
        mov     x0, #0
        ret
@@ -65,6 +66,12 @@ EXPORT_SYMBOL(__arch_copy_in_user)
 
        .section .fixup,"ax"
        .align  2
+9997:  cmp     dst, dstin
+       b.ne    9998f
+       // Before being absolutely sure we couldn't copy anything, try harder
+USER(9998f, ldtrb tmp1w, [srcin])
+USER(9998f, sttrb tmp1w, [dst])
+       add     dst, dst, #1
 9998:  sub     x0, end, dst                    // bytes not copied
        ret
        .previous
index 043da90..9f380ee 100644 (file)
@@ -32,7 +32,7 @@
        .endm
 
        .macro strh1 reg, ptr, val
-       user_ldst 9998f, sttrh, \reg, \ptr, \val
+       user_ldst 9997f, sttrh, \reg, \ptr, \val
        .endm
 
        .macro ldr1 reg, ptr, val
@@ -40,7 +40,7 @@
        .endm
 
        .macro str1 reg, ptr, val
-       user_ldst 9998f, sttr, \reg, \ptr, \val
+       user_ldst 9997f, sttr, \reg, \ptr, \val
        .endm
 
        .macro ldp1 reg1, reg2, ptr, val
        .endm
 
        .macro stp1 reg1, reg2, ptr, val
-       user_stp 9998f, \reg1, \reg2, \ptr, \val
+       user_stp 9997f, \reg1, \reg2, \ptr, \val
        .endm
 
 end    .req    x5
+srcin  .req    x15
 SYM_FUNC_START(__arch_copy_to_user)
        add     end, x0, x2
+       mov     srcin, x1
 #include "copy_template.S"
        mov     x0, #0
        ret
@@ -62,6 +64,12 @@ EXPORT_SYMBOL(__arch_copy_to_user)
 
        .section .fixup,"ax"
        .align  2
+9997:  cmp     dst, dstin
+       b.ne    9998f
+       // Before being absolutely sure we couldn't copy anything, try harder
+       ldrb    tmp1w, [srcin]
+USER(9998f, sttrb tmp1w, [dst])
+       add     dst, dst, #1
 9998:  sub     x0, end, dst                    // bytes not copied
        ret
        .previous
index 35fbdb7..1648790 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/mte-def.h>
 
 /* Assumptions:
  *
 #define REP8_7f 0x7f7f7f7f7f7f7f7f
 #define REP8_80 0x8080808080808080
 
+/*
+ * When KASAN_HW_TAGS is in use, memory is checked at MTE_GRANULE_SIZE
+ * (16-byte) granularity, and we must ensure that no access straddles this
+ * alignment boundary.
+ */
+#ifdef CONFIG_KASAN_HW_TAGS
+#define MIN_PAGE_SIZE MTE_GRANULE_SIZE
+#else
 #define MIN_PAGE_SIZE 4096
+#endif
 
        /* Since strings are short on average, we check the first 16 bytes
           of the string for a NUL character.  In order to do an unaligned ldp
index d745865..9ff0de1 100644 (file)
@@ -1339,7 +1339,6 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
        return dt_virt;
 }
 
-#if CONFIG_PGTABLE_LEVELS > 3
 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
 {
        pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
@@ -1354,16 +1353,6 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
        return 1;
 }
 
-int pud_clear_huge(pud_t *pudp)
-{
-       if (!pud_sect(READ_ONCE(*pudp)))
-               return 0;
-       pud_clear(pudp);
-       return 1;
-}
-#endif
-
-#if CONFIG_PGTABLE_LEVELS > 2
 int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
 {
        pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
@@ -1378,6 +1367,14 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
        return 1;
 }
 
+int pud_clear_huge(pud_t *pudp)
+{
+       if (!pud_sect(READ_ONCE(*pudp)))
+               return 0;
+       pud_clear(pudp);
+       return 1;
+}
+
 int pmd_clear_huge(pmd_t *pmdp)
 {
        if (!pmd_sect(READ_ONCE(*pmdp)))
@@ -1385,7 +1382,6 @@ int pmd_clear_huge(pmd_t *pmdp)
        pmd_clear(pmdp);
        return 1;
 }
-#endif
 
 int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
 {
index dccf98a..41c23f4 100644 (file)
@@ -823,6 +823,19 @@ emit_cond_jmp:
                        return ret;
                break;
 
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               /*
+                * Nothing required here.
+                *
+                * In case of arm64, we rely on the firmware mitigation of
+                * Speculative Store Bypass as controlled via the ssbd kernel
+                * parameter. Whenever the mitigation is enabled, it works
+                * for all of the kernel code with no need to provide any
+                * additional instructions.
+                */
+               break;
+
        /* ST: *(size *)(dst + off) = imm */
        case BPF_ST | BPF_MEM | BPF_W:
        case BPF_ST | BPF_MEM | BPF_H:
index b5e14d5..c30baa0 100644 (file)
@@ -44,7 +44,6 @@ config H8300_H8MAX
        bool "H8MAX"
        select H83069
        select RAMKERNEL
-       select HAVE_IDE
        help
          H8MAX Evaluation Board Support
          More Information. (Japanese Only)
index cf425c2..4993c7a 100644 (file)
@@ -25,7 +25,6 @@ config IA64
        select HAVE_ASM_MODVERSIONS
        select HAVE_UNSTABLE_SCHED_CLOCK
        select HAVE_EXIT_THREAD
-       select HAVE_IDE
        select HAVE_KPROBES
        select HAVE_KRETPROBES
        select HAVE_FTRACE_MCOUNT_RECORD
index 96989ad..d632a1d 100644 (file)
@@ -23,7 +23,6 @@ config M68K
        select HAVE_DEBUG_BUGVERBOSE
        select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_HAS_NO_UNALIGNED
        select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
-       select HAVE_IDE
        select HAVE_MOD_ARCH_SPECIFIC
        select HAVE_UID16
        select MMU_GATHER_NO_RANGE if MMU
index d964c1f..6a07a68 100644 (file)
@@ -33,6 +33,7 @@ config MAC
        depends on MMU
        select MMU_MOTOROLA if MMU
        select HAVE_ARCH_NVRAM_OPS
+       select HAVE_PATA_PLATFORM
        select LEGACY_TIMER_TICK
        help
          This option enables support for the Apple Macintosh series of
index 2c4d2ca..4853751 100644 (file)
@@ -26,7 +26,7 @@ DEFINE_CLK(pll, "pll.0", MCF_CLK);
 DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
 
 static struct clk_lookup m525x_clk_lookup[] = {
-       CLKDEV_INIT(NULL, "pll.0", &pll),
+       CLKDEV_INIT(NULL, "pll.0", &clk_pll),
        CLKDEV_INIT(NULL, "sys.0", &clk_sys),
        CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
        CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),
index cee6087..6dfb27d 100644 (file)
@@ -71,7 +71,6 @@ config MIPS
        select HAVE_FUNCTION_TRACER
        select HAVE_GCC_PLUGINS
        select HAVE_GENERIC_VDSO
-       select HAVE_IDE
        select HAVE_IOREMAP_PROT
        select HAVE_IRQ_EXIT_ON_IRQ_STACK
        select HAVE_IRQ_TIME_ACCOUNTING
index 08f9dd6..86310d6 100644 (file)
@@ -76,7 +76,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
                /* we only have a 32-bit FPU */
                return SIGFPE;
 #endif
-               fallthrough;
+               /* fallthrough */
        case FPU_32BIT:
                if (cpu_has_fre) {
                        /* clear FRE */
index cd4afcd..9adad24 100644 (file)
@@ -1383,6 +1383,7 @@ static void build_r4000_tlb_refill_handler(void)
        switch (boot_cpu_type()) {
        default:
                if (sizeof(long) == 4) {
+               fallthrough;
        case CPU_LOONGSON2EF:
                /* Loongson2 ebase is different than r4k, we have more space */
                        if ((p - tlb_handler) > 64)
@@ -2169,6 +2170,7 @@ static void build_r4000_tlb_load_handler(void)
                default:
                        if (cpu_has_mips_r2_exec_hazard) {
                                uasm_i_ehb(&p);
+                       fallthrough;
 
                case CPU_CAVIUM_OCTEON:
                case CPU_CAVIUM_OCTEON_PLUS:
index 939dd06..3a73e93 100644 (file)
@@ -1355,6 +1355,9 @@ jeq_common:
                }
                break;
 
+       case BPF_ST | BPF_NOSPEC: /* speculation barrier */
+               break;
+
        case BPF_ST | BPF_B | BPF_MEM:
        case BPF_ST | BPF_H | BPF_MEM:
        case BPF_ST | BPF_W | BPF_MEM:
index c206b31..1bdf5e7 100644 (file)
@@ -59,7 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
 
index bde9907..4f8c1fb 100644 (file)
@@ -3,7 +3,6 @@ config PARISC
        def_bool y
        select ARCH_32BIT_OFF_T if !64BIT
        select ARCH_MIGHT_HAVE_PC_PARPORT
-       select HAVE_IDE
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_SYSCALL_TRACEPOINTS
index d01e340..663766f 100644 (file)
@@ -220,7 +220,6 @@ config PPC
        select HAVE_HARDLOCKUP_DETECTOR_ARCH    if PPC_BOOK3S_64 && SMP
        select HAVE_HARDLOCKUP_DETECTOR_PERF    if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
        select HAVE_HW_BREAKPOINT               if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
-       select HAVE_IDE
        select HAVE_IOREMAP_PROT
        select HAVE_IRQ_EXIT_ON_IRQ_STACK
        select HAVE_IRQ_TIME_ACCOUNTING
index 2813e3f..3c5baaa 100644 (file)
@@ -27,6 +27,13 @@ KASAN_SANITIZE := n
 
 ccflags-y := -shared -fno-common -fno-builtin -nostdlib \
        -Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both
+
+# Go prior to 1.16.x assumes r30 is not clobbered by any VDSO code. That used to be true
+# by accident when the VDSO was hand-written asm code, but may not be now that the VDSO is
+# compiler generated. To avoid breaking Go tell GCC not to use r30. Impact on code
+# generation is minimal, it will just use r29 instead.
+ccflags-y += $(call cc-option, -ffixed-r30)
+
 asflags-y := -D__VDSO64__ -s
 
 targets += vdso64.lds
index 1d1fcc2..085fb8e 100644 (file)
@@ -2697,8 +2697,10 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
                HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX;
        if (cpu_has_feature(CPU_FTR_HVMODE)) {
                vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
                if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
                        vcpu->arch.hfscr |= HFSCR_TM;
+#endif
        }
        if (cpu_has_feature(CPU_FTR_TM_COMP))
                vcpu->arch.hfscr |= HFSCR_TM;
index 8543ad5..898f942 100644 (file)
@@ -302,6 +302,9 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
        if (vcpu->kvm->arch.l1_ptcr == 0)
                return H_NOT_AVAILABLE;
 
+       if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
+               return H_BAD_MODE;
+
        /* copy parameters in */
        hv_ptr = kvmppc_get_gpr(vcpu, 4);
        regs_ptr = kvmppc_get_gpr(vcpu, 5);
@@ -322,6 +325,23 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
        if (l2_hv.vcpu_token >= NR_CPUS)
                return H_PARAMETER;
 
+       /*
+        * L1 must have set up a suspended state to enter the L2 in a
+        * transactional state, and only in that case. These have to be
+        * filtered out here to prevent causing a TM Bad Thing in the
+        * host HRFID. We could synthesize a TM Bad Thing back to the L1
+        * here but there doesn't seem like much point.
+        */
+       if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) {
+               if (!MSR_TM_ACTIVE(l2_regs.msr))
+                       return H_BAD_MODE;
+       } else {
+               if (l2_regs.msr & MSR_TS_MASK)
+                       return H_BAD_MODE;
+               if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK))
+                       return H_BAD_MODE;
+       }
+
        /* translate lpid */
        l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
        if (!l2)
index 83f592e..961b3d7 100644 (file)
@@ -317,6 +317,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
         */
        mtspr(SPRN_HDEC, hdec);
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+tm_return_to_guest:
+#endif
        mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
        mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
        mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
@@ -415,11 +418,23 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
                 * is in real suspend mode and is trying to transition to
                 * transactional mode.
                 */
-               if (local_paca->kvm_hstate.fake_suspend &&
+               if (!local_paca->kvm_hstate.fake_suspend &&
                                (vcpu->arch.shregs.msr & MSR_TS_S)) {
                        if (kvmhv_p9_tm_emulation_early(vcpu)) {
-                               /* Prevent it being handled again. */
-                               trap = 0;
+                               /*
+                                * Go straight back into the guest with the
+                                * new NIP/MSR as set by TM emulation.
+                                */
+                               mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
+                               mtspr(SPRN_HSRR1, vcpu->arch.shregs.msr);
+
+                               /*
+                                * tm_return_to_guest re-loads SRR0/1, DAR,
+                                * DSISR after RI is cleared, in case they had
+                                * been clobbered by a MCE.
+                                */
+                               __mtmsrd(0, 1); /* clear RI */
+                               goto tm_return_to_guest;
                        }
                }
 #endif
@@ -499,6 +514,10 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
         * If we are in real mode, only switch MMU on after the MMU is
         * switched to host, to avoid the P9_RADIX_PREFETCH_BUG.
         */
+       if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+           vcpu->arch.shregs.msr & MSR_TS_MASK)
+               msr |= MSR_TS_S;
+
        __mtmsrd(msr, 0);
 
        end_timing(vcpu);
index c5e6775..0f847f1 100644 (file)
@@ -242,6 +242,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
         * value so we can restore it on the way out.
         */
        orig_rets = args.rets;
+       if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) {
+               /*
+                * Don't overflow our args array: ensure there is room for
+                * at least rets[0] (even if the call specifies 0 nret).
+                *
+                * Each handler must then check for the correct nargs and nret
+                * values, but they may always return failure in rets[0].
+                */
+               rc = -EINVAL;
+               goto fail;
+       }
        args.rets = &args.args[be32_to_cpu(args.nargs)];
 
        mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
@@ -269,9 +280,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
 fail:
        /*
         * We only get here if the guest has called RTAS with a bogus
-        * args pointer. That means we can't get to the args, and so we
-        * can't fail the RTAS call. So fail right out to userspace,
-        * which should kill the guest.
+        * args pointer or nargs/nret values that would overflow the
+        * array. That means we can't get to the args, and so we can't
+        * fail the RTAS call. So fail right out to userspace, which
+        * should kill the guest.
+        *
+        * SLOF should actually pass the hcall return value from the
+        * rtas handler call in r3, so enter_rtas could be modified to
+        * return a failure indication in r3 and we could return such
+        * errors to the guest rather than failing to host userspace.
+        * However old guests that don't test for failure could then
+        * continue silently after errors, so for now we won't do this.
         */
        return rc;
 }
index be33b53..b4e6f70 100644 (file)
@@ -2048,9 +2048,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        {
                struct kvm_enable_cap cap;
                r = -EFAULT;
-               vcpu_load(vcpu);
                if (copy_from_user(&cap, argp, sizeof(cap)))
                        goto out;
+               vcpu_load(vcpu);
                r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
                vcpu_put(vcpu);
                break;
@@ -2074,9 +2074,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        case KVM_DIRTY_TLB: {
                struct kvm_dirty_tlb dirty;
                r = -EFAULT;
-               vcpu_load(vcpu);
                if (copy_from_user(&dirty, argp, sizeof(dirty)))
                        goto out;
+               vcpu_load(vcpu);
                r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
                vcpu_put(vcpu);
                break;
index 60780e0..0df9fe2 100644 (file)
@@ -240,3 +240,13 @@ void __init setup_kuap(bool disabled)
        mtspr(SPRN_MD_AP, MD_APG_KUAP);
 }
 #endif
+
+int pud_clear_huge(pud_t *pud)
+{
+        return 0;
+}
+
+int pmd_clear_huge(pmd_t *pmd)
+{
+        return 0;
+}
index 34bb158..beb12cb 100644 (file)
@@ -738,6 +738,12 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
                        break;
 
                /*
+                * BPF_ST NOSPEC (speculation barrier)
+                */
+               case BPF_ST | BPF_NOSPEC:
+                       break;
+
+               /*
                 * BPF_ST(X)
                 */
                case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
index de85958..b87a63d 100644 (file)
@@ -628,6 +628,12 @@ emit_clear:
                        break;
 
                /*
+                * BPF_ST NOSPEC (speculation barrier)
+                */
+               case BPF_ST | BPF_NOSPEC:
+                       break;
+
+               /*
                 * BPF_ST(X)
                 */
                case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
index 9b88e3c..534b031 100644 (file)
@@ -42,6 +42,7 @@ static int pasemi_system_reset_exception(struct pt_regs *regs)
        switch (regs->msr & SRR1_WAKEMASK) {
        case SRR1_WAKEDEC:
                set_dec(1);
+               break;
        case SRR1_WAKEEE:
                /*
                 * Handle these when interrupts get re-enabled and we take
index bdfea6d..3256a31 100644 (file)
@@ -146,6 +146,7 @@ static inline void psurge_clr_ipi(int cpu)
                switch(psurge_type) {
                case PSURGE_DUAL:
                        out_8(psurge_sec_intr, ~0);
+                       break;
                case PSURGE_NONE:
                        break;
                default:
index 631a0d5..6b08866 100644 (file)
@@ -77,7 +77,7 @@
 #include "../../../../drivers/pci/pci.h"
 
 DEFINE_STATIC_KEY_FALSE(shared_processor);
-EXPORT_SYMBOL_GPL(shared_processor);
+EXPORT_SYMBOL(shared_processor);
 
 int CMO_PrPSP = -1;
 int CMO_SecPSP = -1;
index 6d98cd9..7b3483b 100644 (file)
@@ -27,10 +27,10 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
 
 #define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
 
-/* Load initrd at enough distance from DRAM start */
+/* Load initrd anywhere in system RAM */
 static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
 {
-       return image_addr + SZ_256M;
+       return ULONG_MAX;
 }
 
 #define alloc_screen_info(x...)                (&screen_info)
index ff467b9..ac75936 100644 (file)
@@ -132,8 +132,12 @@ unsigned long get_wchan(struct task_struct *task)
 {
        unsigned long pc = 0;
 
-       if (likely(task && task != current && !task_is_running(task)))
+       if (likely(task && task != current && !task_is_running(task))) {
+               if (!try_get_task_stack(task))
+                       return 0;
                walk_stackframe(task, NULL, save_wchan, &pc);
+               put_task_stack(task);
+       }
        return pc;
 }
 
index bceb062..63bc691 100644 (file)
@@ -30,23 +30,23 @@ ENTRY(__asm_copy_from_user)
         * t0 - end of uncopied dst
         */
        add     t0, a0, a2
-       bgtu    a0, t0, 5f
 
        /*
         * Use byte copy only if too small.
+        * SZREG holds 4 for RV32 and 8 for RV64
         */
-       li      a3, 8*SZREG /* size must be larger than size in word_copy */
+       li      a3, 9*SZREG /* size must be larger than size in word_copy */
        bltu    a2, a3, .Lbyte_copy_tail
 
        /*
-        * Copy first bytes until dst is align to word boundary.
+        * Copy first bytes until dst is aligned to word boundary.
         * a0 - start of dst
         * t1 - start of aligned dst
         */
        addi    t1, a0, SZREG-1
        andi    t1, t1, ~(SZREG-1)
        /* dst is already aligned, skip */
-       beq     a0, t1, .Lskip_first_bytes
+       beq     a0, t1, .Lskip_align_dst
 1:
        /* a5 - one byte for copying data */
        fixup lb      a5, 0(a1), 10f
@@ -55,7 +55,7 @@ ENTRY(__asm_copy_from_user)
        addi    a0, a0, 1       /* dst */
        bltu    a0, t1, 1b      /* t1 - start of aligned dst */
 
-.Lskip_first_bytes:
+.Lskip_align_dst:
        /*
         * Now dst is aligned.
         * Use shift-copy if src is misaligned.
@@ -72,10 +72,9 @@ ENTRY(__asm_copy_from_user)
         *
         * a0 - start of aligned dst
         * a1 - start of aligned src
-        * a3 - a1 & mask:(SZREG-1)
         * t0 - end of aligned dst
         */
-       addi    t0, t0, -(8*SZREG-1) /* not to over run */
+       addi    t0, t0, -(8*SZREG) /* not to over run */
 2:
        fixup REG_L   a4,        0(a1), 10f
        fixup REG_L   a5,    SZREG(a1), 10f
@@ -97,7 +96,7 @@ ENTRY(__asm_copy_from_user)
        addi    a1, a1, 8*SZREG
        bltu    a0, t0, 2b
 
-       addi    t0, t0, 8*SZREG-1 /* revert to original value */
+       addi    t0, t0, 8*SZREG /* revert to original value */
        j       .Lbyte_copy_tail
 
 .Lshift_copy:
@@ -107,7 +106,7 @@ ENTRY(__asm_copy_from_user)
         * For misaligned copy we still perform aligned word copy, but
         * we need to use the value fetched from the previous iteration and
         * do some shifts.
-        * This is safe because reading less than a word size.
+        * This is safe because reading is less than a word size.
         *
         * a0 - start of aligned dst
         * a1 - start of src
@@ -117,7 +116,7 @@ ENTRY(__asm_copy_from_user)
         */
        /* calculating aligned word boundary for dst */
        andi    t1, t0, ~(SZREG-1)
-       /* Converting unaligned src to aligned arc */
+       /* Converting unaligned src to aligned src */
        andi    a1, a1, ~(SZREG-1)
 
        /*
@@ -125,11 +124,11 @@ ENTRY(__asm_copy_from_user)
         * t3 - prev shift
         * t4 - current shift
         */
-       slli    t3, a3, LGREG
+       slli    t3, a3, 3 /* converting bytes in a3 to bits */
        li      a5, SZREG*8
        sub     t4, a5, t3
 
-       /* Load the first word to combine with seceond word */
+       /* Load the first word to combine with second word */
        fixup REG_L   a5, 0(a1), 10f
 
 3:
@@ -161,7 +160,7 @@ ENTRY(__asm_copy_from_user)
         * a1 - start of remaining src
         * t0 - end of remaining dst
         */
-       bgeu    a0, t0, 5f
+       bgeu    a0, t0, .Lout_copy_user  /* check if end of copy */
 4:
        fixup lb      a5, 0(a1), 10f
        addi    a1, a1, 1       /* src */
@@ -169,7 +168,7 @@ ENTRY(__asm_copy_from_user)
        addi    a0, a0, 1       /* dst */
        bltu    a0, t0, 4b      /* t0 - end of dst */
 
-5:
+.Lout_copy_user:
        /* Disable access to user memory */
        csrc CSR_STATUS, t6
        li      a0, 0
index 269fc64..a14bf39 100644 (file)
@@ -127,10 +127,17 @@ void __init mem_init(void)
 }
 
 /*
- * The default maximal physical memory size is -PAGE_OFFSET,
- * limit the memory size via mem.
+ * The default maximal physical memory size is -PAGE_OFFSET for 32-bit kernel,
+ * whereas for 64-bit kernel, the end of the virtual address space is occupied
+ * by the modules/BPF/kernel mappings which reduces the available size of the
+ * linear mapping.
+ * Limit the memory size via mem.
  */
+#ifdef CONFIG_64BIT
+static phys_addr_t memory_limit = -PAGE_OFFSET - SZ_4G;
+#else
 static phys_addr_t memory_limit = -PAGE_OFFSET;
+#endif
 
 static int __init early_mem(char *p)
 {
@@ -152,7 +159,7 @@ static void __init setup_bootmem(void)
 {
        phys_addr_t vmlinux_end = __pa_symbol(&_end);
        phys_addr_t vmlinux_start = __pa_symbol(&_start);
-       phys_addr_t max_mapped_addr = __pa(~(ulong)0);
+       phys_addr_t __maybe_unused max_mapped_addr;
        phys_addr_t dram_end;
 
 #ifdef CONFIG_XIP_KERNEL
@@ -175,14 +182,21 @@ static void __init setup_bootmem(void)
        memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
 
        dram_end = memblock_end_of_DRAM();
+
+#ifndef CONFIG_64BIT
        /*
         * memblock allocator is not aware of the fact that last 4K bytes of
         * the addressable memory can not be mapped because of IS_ERR_VALUE
         * macro. Make sure that last 4k bytes are not usable by memblock
-        * if end of dram is equal to maximum addressable memory.
+        * if end of dram is equal to maximum addressable memory.  For 64-bit
+        * kernel, this problem can't happen here as the end of the virtual
+        * address space is occupied by the kernel mapping then this check must
+        * be done in create_kernel_page_table.
         */
+       max_mapped_addr = __pa(~(ulong)0);
        if (max_mapped_addr == (dram_end - 1))
                memblock_set_current_limit(max_mapped_addr - 4096);
+#endif
 
        min_low_pfn = PFN_UP(memblock_start_of_DRAM());
        max_low_pfn = max_pfn = PFN_DOWN(dram_end);
@@ -570,6 +584,14 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
        BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
        BUG_ON((kernel_map.phys_addr % map_size) != 0);
 
+#ifdef CONFIG_64BIT
+       /*
+        * The last 4K bytes of the addressable memory can not be mapped because
+        * of IS_ERR_VALUE macro.
+        */
+       BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
+#endif
+
        pt_ops.alloc_pte = alloc_pte_early;
        pt_ops.get_pte_virt = get_pte_virt_early;
 #ifndef __PAGETABLE_PMD_FOLDED
@@ -709,6 +731,8 @@ static void __init setup_vm_final(void)
                if (start <= __pa(PAGE_OFFSET) &&
                    __pa(PAGE_OFFSET) < end)
                        start = __pa(PAGE_OFFSET);
+               if (end >= __pa(PAGE_OFFSET) + memory_limit)
+                       end = __pa(PAGE_OFFSET) + memory_limit;
 
                map_size = best_map_size(start, end - start);
                for (pa = start; pa < end; pa += map_size) {
index 81de865..e649742 100644 (file)
@@ -1251,6 +1251,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
                        return -1;
                break;
 
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               break;
+
        case BPF_ST | BPF_MEM | BPF_B:
        case BPF_ST | BPF_MEM | BPF_H:
        case BPF_ST | BPF_MEM | BPF_W:
index 87e3bf5..3af4131 100644 (file)
@@ -939,6 +939,10 @@ out_be:
                emit_ld(rd, 0, RV_REG_T1, ctx);
                break;
 
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               break;
+
        /* ST: *(size *)(dst + off) = imm */
        case BPF_ST | BPF_MEM | BPF_B:
                emit_imm(RV_REG_T1, imm, ctx);
index f7c77cd..5ff5fee 100644 (file)
@@ -9,16 +9,6 @@
 #include <asm/errno.h>
 #include <asm/sigp.h>
 
-#ifdef CC_USING_EXPOLINE
-       .pushsection .dma.text.__s390_indirect_jump_r14,"axG"
-__dma__s390_indirect_jump_r14:
-       larl    %r1,0f
-       ex      0,0(%r1)
-       j       .
-0:     br      %r14
-       .popsection
-#endif
-
        .section .dma.text,"ax"
 /*
  * Simplified version of expoline thunk. The normal thunks can not be used here,
@@ -27,11 +17,10 @@ __dma__s390_indirect_jump_r14:
  * affects a few functions that are not performance-relevant.
  */
        .macro BR_EX_DMA_r14
-#ifdef CC_USING_EXPOLINE
-       jg      __dma__s390_indirect_jump_r14
-#else
-       br      %r14
-#endif
+       larl    %r1,0f
+       ex      0,0(%r1)
+       j       .
+0:     br      %r14
        .endm
 
 /*
index 86afcc6..7de253f 100644 (file)
@@ -5,7 +5,12 @@ CONFIG_WATCH_QUEUE=y
 CONFIG_AUDIT=y
 CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_LSM=y
 CONFIG_PREEMPT=y
+CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -28,14 +33,13 @@ CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_PERF=y
 CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_MISC=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
-CONFIG_BPF_LSM=y
-CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
@@ -76,6 +80,7 @@ CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
 CONFIG_BLK_CGROUP_IOLATENCY=y
 CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_CGROUP_IOPRIO=y
 CONFIG_BLK_INLINE_ENCRYPTION=y
 CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
 CONFIG_PARTITION_ADVANCED=y
@@ -95,6 +100,7 @@ CONFIG_CLEANCACHE=y
 CONFIG_FRONTSWAP=y
 CONFIG_CMA_DEBUG=y
 CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_SYSFS=y
 CONFIG_CMA_AREAS=7
 CONFIG_MEM_SOFT_DIRTY=y
 CONFIG_ZSWAP=y
@@ -158,6 +164,7 @@ CONFIG_IPV6_RPL_LWTUNNEL=y
 CONFIG_MPTCP=y
 CONFIG_NETFILTER=y
 CONFIG_BRIDGE_NETFILTER=m
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
@@ -280,6 +287,7 @@ CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NFT_FIB_IPV4=m
 CONFIG_NF_TABLES_ARP=y
+CONFIG_NF_LOG_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -384,12 +392,11 @@ CONFIG_VSOCKETS=m
 CONFIG_VIRTIO_VSOCKETS=m
 CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
-CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
 CONFIG_PCI=y
-CONFIG_PCI_IOV=y
 # CONFIG_PCIEASPM is not set
 CONFIG_PCI_DEBUG=y
+CONFIG_PCI_IOV=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_DEVTMPFS=y
@@ -436,7 +443,7 @@ CONFIG_MD_MULTIPATH=m
 CONFIG_MD_FAULTY=m
 CONFIG_MD_CLUSTER=m
 CONFIG_BCACHE=m
-CONFIG_BLK_DEV_DM=m
+CONFIG_BLK_DEV_DM=y
 CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
@@ -453,6 +460,7 @@ CONFIG_DM_MULTIPATH_ST=m
 CONFIG_DM_MULTIPATH_HST=m
 CONFIG_DM_MULTIPATH_IOA=m
 CONFIG_DM_DELAY=m
+CONFIG_DM_INIT=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_FLAKEY=m
 CONFIG_DM_VERITY=m
@@ -495,6 +503,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_GOOGLE is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MICROSOFT is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
@@ -551,7 +560,6 @@ CONFIG_INPUT_EVDEV=y
 CONFIG_LEGACY_PTY_COUNT=0
 CONFIG_VIRTIO_CONSOLE=m
 CONFIG_HW_RANDOM_VIRTIO=m
-CONFIG_RAW_DRIVER=m
 CONFIG_HANGCHECK_TIMER=m
 CONFIG_TN3270_FS=y
 CONFIG_PPS=m
@@ -574,7 +582,6 @@ CONFIG_SYNC_FILE=y
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
 CONFIG_VFIO_MDEV=m
-CONFIG_VFIO_MDEV_DEVICE=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
@@ -619,6 +626,7 @@ CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
 CONFIG_VIRTIO_FS=m
 CONFIG_OVERLAY_FS=m
+CONFIG_NETFS_STATS=y
 CONFIG_FSCACHE=m
 CONFIG_CACHEFILES=m
 CONFIG_ISO9660_FS=y
@@ -654,7 +662,6 @@ CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
 CONFIG_NFSD_V4_SECURITY_LABEL=y
 CONFIG_CIFS=m
-CONFIG_CIFS_STATS2=y
 CONFIG_CIFS_WEAK_PW_HASH=y
 CONFIG_CIFS_UPCALL=y
 CONFIG_CIFS_XATTR=y
@@ -682,6 +689,7 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_DISABLE=y
 CONFIG_SECURITY_LOCKDOWN_LSM=y
 CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
+CONFIG_SECURITY_LANDLOCK=y
 CONFIG_INTEGRITY_SIGNATURE=y
 CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
 CONFIG_IMA=y
@@ -696,6 +704,7 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
 CONFIG_CRYPTO_ECRDSA=m
 CONFIG_CRYPTO_SM2=m
 CONFIG_CRYPTO_CURVE25519=m
@@ -843,7 +852,6 @@ CONFIG_FAULT_INJECTION_DEBUG_FS=y
 CONFIG_FAIL_FUNCTION=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LKDTM=m
-CONFIG_TEST_LIST_SORT=y
 CONFIG_TEST_MIN_HEAP=y
 CONFIG_TEST_SORT=y
 CONFIG_KPROBES_SANITY_TEST=y
@@ -853,3 +861,4 @@ CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BITOPS=m
 CONFIG_TEST_BPF=m
+CONFIG_TEST_LIVEPATCH=m
index 71b49ea..b671642 100644 (file)
@@ -4,6 +4,11 @@ CONFIG_WATCH_QUEUE=y
 CONFIG_AUDIT=y
 CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_LSM=y
+CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -26,14 +31,13 @@ CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_PERF=y
 CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_MISC=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
-CONFIG_BPF_LSM=y
-CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
@@ -70,6 +74,7 @@ CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
 CONFIG_BLK_CGROUP_IOLATENCY=y
 CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_CGROUP_IOPRIO=y
 CONFIG_BLK_INLINE_ENCRYPTION=y
 CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
 CONFIG_PARTITION_ADVANCED=y
@@ -87,6 +92,7 @@ CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CLEANCACHE=y
 CONFIG_FRONTSWAP=y
+CONFIG_CMA_SYSFS=y
 CONFIG_CMA_AREAS=7
 CONFIG_MEM_SOFT_DIRTY=y
 CONFIG_ZSWAP=y
@@ -149,6 +155,7 @@ CONFIG_IPV6_RPL_LWTUNNEL=y
 CONFIG_MPTCP=y
 CONFIG_NETFILTER=y
 CONFIG_BRIDGE_NETFILTER=m
+CONFIG_NETFILTER_NETLINK_HOOK=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
@@ -271,6 +278,7 @@ CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NFT_FIB_IPV4=m
 CONFIG_NF_TABLES_ARP=y
+CONFIG_NF_LOG_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -374,11 +382,10 @@ CONFIG_VSOCKETS=m
 CONFIG_VIRTIO_VSOCKETS=m
 CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
-CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
 CONFIG_PCI=y
-CONFIG_PCI_IOV=y
 # CONFIG_PCIEASPM is not set
+CONFIG_PCI_IOV=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_UEVENT_HELPER=y
@@ -427,7 +434,7 @@ CONFIG_MD_MULTIPATH=m
 CONFIG_MD_FAULTY=m
 CONFIG_MD_CLUSTER=m
 CONFIG_BCACHE=m
-CONFIG_BLK_DEV_DM=m
+CONFIG_BLK_DEV_DM=y
 CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
@@ -444,6 +451,7 @@ CONFIG_DM_MULTIPATH_ST=m
 CONFIG_DM_MULTIPATH_HST=m
 CONFIG_DM_MULTIPATH_IOA=m
 CONFIG_DM_DELAY=m
+CONFIG_DM_INIT=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_FLAKEY=m
 CONFIG_DM_VERITY=m
@@ -487,6 +495,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_GOOGLE is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MICROSOFT is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
@@ -543,7 +552,6 @@ CONFIG_INPUT_EVDEV=y
 CONFIG_LEGACY_PTY_COUNT=0
 CONFIG_VIRTIO_CONSOLE=m
 CONFIG_HW_RANDOM_VIRTIO=m
-CONFIG_RAW_DRIVER=m
 CONFIG_HANGCHECK_TIMER=m
 CONFIG_TN3270_FS=y
 # CONFIG_PTP_1588_CLOCK is not set
@@ -566,7 +574,6 @@ CONFIG_SYNC_FILE=y
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
 CONFIG_VFIO_MDEV=m
-CONFIG_VFIO_MDEV_DEVICE=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
@@ -607,6 +614,7 @@ CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
 CONFIG_VIRTIO_FS=m
 CONFIG_OVERLAY_FS=m
+CONFIG_NETFS_STATS=y
 CONFIG_FSCACHE=m
 CONFIG_CACHEFILES=m
 CONFIG_ISO9660_FS=y
@@ -642,7 +650,6 @@ CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
 CONFIG_NFSD_V4_SECURITY_LABEL=y
 CONFIG_CIFS=m
-CONFIG_CIFS_STATS2=y
 CONFIG_CIFS_WEAK_PW_HASH=y
 CONFIG_CIFS_UPCALL=y
 CONFIG_CIFS_XATTR=y
@@ -669,6 +676,7 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_DISABLE=y
 CONFIG_SECURITY_LOCKDOWN_LSM=y
 CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
+CONFIG_SECURITY_LANDLOCK=y
 CONFIG_INTEGRITY_SIGNATURE=y
 CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
 CONFIG_IMA=y
@@ -684,6 +692,7 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
 CONFIG_CRYPTO_ECRDSA=m
 CONFIG_CRYPTO_SM2=m
 CONFIG_CRYPTO_CURVE25519=m
@@ -754,6 +763,7 @@ CONFIG_CRC8=m
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_GDB_SCRIPTS=y
@@ -781,3 +791,4 @@ CONFIG_LKDTM=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
+CONFIG_TEST_LIVEPATCH=m
index 76123a4..d576aaa 100644 (file)
@@ -29,9 +29,9 @@ CONFIG_PARTITION_ADVANCED=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 # CONFIG_COMPACTION is not set
 # CONFIG_MIGRATION is not set
-# CONFIG_BOUNCE is not set
 CONFIG_NET=y
 # CONFIG_IUCV is not set
+# CONFIG_PCPU_DEV_REFCNT is not set
 # CONFIG_ETHTOOL_NETLINK is not set
 CONFIG_DEVTMPFS=y
 CONFIG_BLK_DEV_RAM=y
@@ -51,7 +51,6 @@ CONFIG_ZFCP=y
 # CONFIG_SERIO is not set
 # CONFIG_HVC_IUCV is not set
 # CONFIG_HW_RANDOM_S390 is not set
-CONFIG_RAW_DRIVER=y
 # CONFIG_HMC_DRV is not set
 # CONFIG_S390_TAPE is not set
 # CONFIG_VMCP is not set
index 695c619..345cbe9 100644 (file)
@@ -19,6 +19,7 @@ void ftrace_caller(void);
 
 extern char ftrace_graph_caller_end;
 extern unsigned long ftrace_plt;
+extern void *ftrace_func;
 
 struct dyn_arch_ftrace { };
 
index 9b4473f..161a9e1 100644 (file)
@@ -445,15 +445,15 @@ struct kvm_vcpu_stat {
        u64 instruction_sigp_init_cpu_reset;
        u64 instruction_sigp_cpu_reset;
        u64 instruction_sigp_unknown;
-       u64 diagnose_10;
-       u64 diagnose_44;
-       u64 diagnose_9c;
-       u64 diagnose_9c_ignored;
-       u64 diagnose_9c_forward;
-       u64 diagnose_258;
-       u64 diagnose_308;
-       u64 diagnose_500;
-       u64 diagnose_other;
+       u64 instruction_diagnose_10;
+       u64 instruction_diagnose_44;
+       u64 instruction_diagnose_9c;
+       u64 diag_9c_ignored;
+       u64 diag_9c_forward;
+       u64 instruction_diagnose_258;
+       u64 instruction_diagnose_308;
+       u64 instruction_diagnose_500;
+       u64 instruction_diagnose_other;
        u64 pfault_sync;
 };
 
index c6ddeb5..2d8f595 100644 (file)
@@ -40,6 +40,7 @@
  * trampoline (ftrace_plt), which clobbers also r1.
  */
 
+void *ftrace_func __read_mostly = ftrace_stub;
 unsigned long ftrace_plt;
 
 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
@@ -85,6 +86,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
 int ftrace_update_ftrace_func(ftrace_func_t func)
 {
+       ftrace_func = func;
        return 0;
 }
 
index faf64c2..6b13797 100644 (file)
@@ -59,13 +59,13 @@ ENTRY(ftrace_caller)
 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
        aghik   %r2,%r0,-MCOUNT_INSN_SIZE
        lgrl    %r4,function_trace_op
-       lgrl    %r1,ftrace_trace_function
+       lgrl    %r1,ftrace_func
 #else
        lgr     %r2,%r0
        aghi    %r2,-MCOUNT_INSN_SIZE
        larl    %r4,function_trace_op
        lg      %r4,0(%r4)
-       larl    %r1,ftrace_trace_function
+       larl    %r1,ftrace_func
        lg      %r1,0(%r1)
 #endif
        lgr     %r3,%r14
index 975a00c..d7dc36e 100644 (file)
@@ -745,7 +745,7 @@ static int __init cpumf_pmu_init(void)
        if (!cf_dbg) {
                pr_err("Registration of s390dbf(cpum_cf) failed\n");
                return -ENOMEM;
-       };
+       }
        debug_register_view(cf_dbg, &debug_sprintf_view);
 
        cpumf_pmu.attr_groups = cpumf_cf_event_group();
index bbf8622..bd3ef12 100644 (file)
@@ -126,6 +126,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
        case DIE_SSTEP:
                if (uprobe_post_sstep_notifier(regs))
                        return NOTIFY_STOP;
+               break;
        default:
                break;
        }
index b2349a3..3457dcf 100644 (file)
@@ -29,6 +29,7 @@ $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
 $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
 
 obj-y += vdso32_wrapper.o
+targets += vdso32.lds
 CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
 
 # Disable gcov profiling, ubsan and kasan for VDSO code
index 02c146f..807fa9d 100644 (file)
@@ -24,7 +24,7 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
 
        start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
        end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE;
-       vcpu->stat.diagnose_10++;
+       vcpu->stat.instruction_diagnose_10++;
 
        if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
            || start < 2 * PAGE_SIZE)
@@ -74,7 +74,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
 
        VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx",
                   vcpu->run->s.regs.gprs[rx]);
-       vcpu->stat.diagnose_258++;
+       vcpu->stat.instruction_diagnose_258++;
        if (vcpu->run->s.regs.gprs[rx] & 7)
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
        rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
@@ -145,7 +145,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
 static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
 {
        VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
-       vcpu->stat.diagnose_44++;
+       vcpu->stat.instruction_diagnose_44++;
        kvm_vcpu_on_spin(vcpu, true);
        return 0;
 }
@@ -169,7 +169,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
        int tid;
 
        tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
-       vcpu->stat.diagnose_9c++;
+       vcpu->stat.instruction_diagnose_9c++;
 
        /* yield to self */
        if (tid == vcpu->vcpu_id)
@@ -192,7 +192,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
                VCPU_EVENT(vcpu, 5,
                           "diag time slice end directed to %d: yield forwarded",
                           tid);
-               vcpu->stat.diagnose_9c_forward++;
+               vcpu->stat.diag_9c_forward++;
                return 0;
        }
 
@@ -203,7 +203,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
        return 0;
 no_yield:
        VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
-       vcpu->stat.diagnose_9c_ignored++;
+       vcpu->stat.diag_9c_ignored++;
        return 0;
 }
 
@@ -213,7 +213,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
        unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
 
        VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode);
-       vcpu->stat.diagnose_308++;
+       vcpu->stat.instruction_diagnose_308++;
        switch (subcode) {
        case 3:
                vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
@@ -245,7 +245,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
 {
        int ret;
 
-       vcpu->stat.diagnose_500++;
+       vcpu->stat.instruction_diagnose_500++;
        /* No virtio-ccw notification? Get out quickly. */
        if (!vcpu->kvm->arch.css_support ||
            (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
@@ -299,7 +299,7 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
        case 0x500:
                return __diag_virtio_hypercall(vcpu);
        default:
-               vcpu->stat.diagnose_other++;
+               vcpu->stat.instruction_diagnose_other++;
                return -EOPNOTSUPP;
        }
 }
index b655a7d..4527ac7 100644 (file)
@@ -163,15 +163,15 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
        STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
        STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
        STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
-       STATS_DESC_COUNTER(VCPU, diagnose_10),
-       STATS_DESC_COUNTER(VCPU, diagnose_44),
-       STATS_DESC_COUNTER(VCPU, diagnose_9c),
-       STATS_DESC_COUNTER(VCPU, diagnose_9c_ignored),
-       STATS_DESC_COUNTER(VCPU, diagnose_9c_forward),
-       STATS_DESC_COUNTER(VCPU, diagnose_258),
-       STATS_DESC_COUNTER(VCPU, diagnose_308),
-       STATS_DESC_COUNTER(VCPU, diagnose_500),
-       STATS_DESC_COUNTER(VCPU, diagnose_other),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
+       STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
+       STATS_DESC_COUNTER(VCPU, diag_9c_forward),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
+       STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
        STATS_DESC_COUNTER(VCPU, pfault_sync)
 };
 static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
index 63cae04..8841926 100644 (file)
@@ -112,7 +112,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
 {
        u32 r1 = reg2hex[b1];
 
-       if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15)
+       if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1])
                jit->seen_reg[r1] = 1;
 }
 
@@ -1154,6 +1154,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                }
                break;
        /*
+        * BPF_NOSPEC (speculation barrier)
+        */
+       case BPF_ST | BPF_NOSPEC:
+               break;
+       /*
         * BPF_ST(X)
         */
        case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
index 45a0549..b683b69 100644 (file)
@@ -39,7 +39,6 @@ config SUPERH
        select HAVE_FUTEX_CMPXCHG if FUTEX
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_HW_BREAKPOINT
-       select HAVE_IDE if HAS_IOPORT_MAP
        select HAVE_IOREMAP_PROT if MMU && !X2TLB
        select HAVE_KERNEL_BZIP2
        select HAVE_KERNEL_GZIP
index c5fa793..f0c0f95 100644 (file)
@@ -19,7 +19,6 @@ config SPARC
        select OF
        select OF_PROMTREE
        select HAVE_ASM_MODVERSIONS
-       select HAVE_IDE
        select HAVE_ARCH_KGDB if !SMP || SPARC64
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_SECCOMP if SPARC64
index 4b8d3c6..9a2f20c 100644 (file)
@@ -1287,6 +1287,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                        return 1;
                break;
        }
+       /* speculation barrier */
+       case BPF_ST | BPF_NOSPEC:
+               break;
        /* ST: *(size *)(dst + off) = imm */
        case BPF_ST | BPF_MEM | BPF_W:
        case BPF_ST | BPF_MEM | BPF_H:
index 4927065..88fb922 100644 (file)
@@ -202,7 +202,6 @@ config X86
        select HAVE_FUNCTION_TRACER
        select HAVE_GCC_PLUGINS
        select HAVE_HW_BREAKPOINT
-       select HAVE_IDE
        select HAVE_IOREMAP_PROT
        select HAVE_IRQ_EXIT_ON_IRQ_STACK       if X86_64
        select HAVE_IRQ_TIME_ACCOUNTING
index cc8f177..c890d67 100644 (file)
@@ -237,7 +237,7 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
        for_each_present_cpu(i) {
                if (i == 0)
                        continue;
-               ret = hv_call_add_logical_proc(numa_cpu_node(i), i, i);
+               ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i));
                BUG_ON(ret);
        }
 
index 674906f..68f091b 100644 (file)
@@ -79,9 +79,10 @@ __jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
        return (struct jump_label_patch){.code = code, .size = size};
 }
 
-static inline void __jump_label_transform(struct jump_entry *entry,
-                                         enum jump_label_type type,
-                                         int init)
+static __always_inline void
+__jump_label_transform(struct jump_entry *entry,
+                      enum jump_label_type type,
+                      int init)
 {
        const struct jump_label_patch jlp = __jump_label_patch(entry, type);
 
index c42613c..739be5d 100644 (file)
@@ -765,7 +765,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
 
                edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS);
                edx.split.bit_width_fixed = cap.bit_width_fixed;
-               edx.split.anythread_deprecated = 1;
+               if (cap.version)
+                       edx.split.anythread_deprecated = 1;
                edx.split.reserved1 = 0;
                edx.split.reserved2 = 0;
 
@@ -940,8 +941,21 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
                unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
                unsigned phys_as = entry->eax & 0xff;
 
-               if (!g_phys_as)
+               /*
+                * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
+                * the guest operates in the same PA space as the host, i.e.
+                * reductions in MAXPHYADDR for memory encryption affect shadow
+                * paging, too.
+                *
+                * If TDP is enabled but an explicit guest MAXPHYADDR is not
+                * provided, use the raw bare metal MAXPHYADDR as reductions to
+                * the HPAs do not affect GPAs.
+                */
+               if (!tdp_enabled)
+                       g_phys_as = boot_cpu_data.x86_phys_bits;
+               else if (!g_phys_as)
                        g_phys_as = phys_as;
+
                entry->eax = g_phys_as | (virt_as << 8);
                entry->edx = 0;
                cpuid_entry_override(entry, CPUID_8000_0008_EBX);
@@ -964,12 +978,18 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
        case 0x8000001a:
        case 0x8000001e:
                break;
-       /* Support memory encryption cpuid if host supports it */
        case 0x8000001F:
-               if (!kvm_cpu_cap_has(X86_FEATURE_SEV))
+               if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
                        entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
-               else
+               } else {
                        cpuid_entry_override(entry, CPUID_8000_001F_EAX);
+
+                       /*
+                        * Enumerate '0' for "PA bits reduction", the adjusted
+                        * MAXPHYADDR is enumerated directly (see 0x80000008).
+                        */
+                       entry->ebx &= ~GENMASK(11, 6);
+               }
                break;
        /*Add support for Centaur's CPUID instruction*/
        case 0xC0000000:
index 698969e..ff005fe 100644 (file)
@@ -96,7 +96,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
 static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
 {
        ioapic->rtc_status.pending_eoi = 0;
-       bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
+       bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID + 1);
 }
 
 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
index 6604017..11e4065 100644 (file)
@@ -43,13 +43,13 @@ struct kvm_vcpu;
 
 struct dest_map {
        /* vcpu bitmap where IRQ has been sent */
-       DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
+       DECLARE_BITMAP(map, KVM_MAX_VCPU_ID + 1);
 
        /*
         * Vector sent to a given vcpu, only valid when
         * the vcpu's bit in map is set
         */
-       u8 vectors[KVM_MAX_VCPU_ID];
+       u8 vectors[KVM_MAX_VCPU_ID + 1];
 };
 
 
index 845d114..66f7f5b 100644 (file)
@@ -53,6 +53,8 @@
 #include <asm/kvm_page_track.h>
 #include "trace.h"
 
+#include "paging.h"
+
 extern bool itlb_multihit_kvm_mitigation;
 
 int __read_mostly nx_huge_pages = -1;
diff --git a/arch/x86/kvm/mmu/paging.h b/arch/x86/kvm/mmu/paging.h
new file mode 100644 (file)
index 0000000..de8ab32
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Shadow paging constants/helpers that don't need to be #undef'd. */
+#ifndef __KVM_X86_PAGING_H
+#define __KVM_X86_PAGING_H
+
+#define GUEST_PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
+#define PT64_LVL_ADDR_MASK(level) \
+       (GUEST_PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
+                                               * PT64_LEVEL_BITS))) - 1))
+#define PT64_LVL_OFFSET_MASK(level) \
+       (GUEST_PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
+                                               * PT64_LEVEL_BITS))) - 1))
+#endif /* __KVM_X86_PAGING_H */
+
index 490a028..ee044d3 100644 (file)
@@ -24,7 +24,7 @@
        #define pt_element_t u64
        #define guest_walker guest_walker64
        #define FNAME(name) paging##64_##name
-       #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
+       #define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK
        #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
        #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
        #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
@@ -57,7 +57,7 @@
        #define pt_element_t u64
        #define guest_walker guest_walkerEPT
        #define FNAME(name) ept_##name
-       #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
+       #define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK
        #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
        #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
        #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
index 7a5ce93..eb7b227 100644 (file)
@@ -38,12 +38,6 @@ static_assert(SPTE_TDP_AD_ENABLED_MASK == 0);
 #else
 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
 #endif
-#define PT64_LVL_ADDR_MASK(level) \
-       (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
-                                               * PT64_LEVEL_BITS))) - 1))
-#define PT64_LVL_OFFSET_MASK(level) \
-       (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
-                                               * PT64_LEVEL_BITS))) - 1))
 
 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
                        | shadow_x_mask | shadow_nx_mask | shadow_me_mask)
index 1d01da6..a8ad78a 100644 (file)
@@ -646,7 +646,7 @@ out:
 void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
-       struct vmcb *vmcb = svm->vmcb;
+       struct vmcb *vmcb = svm->vmcb01.ptr;
        bool activated = kvm_vcpu_apicv_active(vcpu);
 
        if (!enable_apicv)
index 21d03e3..61738ff 100644 (file)
@@ -154,6 +154,10 @@ void recalc_intercepts(struct vcpu_svm *svm)
 
        for (i = 0; i < MAX_INTERCEPT; i++)
                c->intercepts[i] |= g->intercepts[i];
+
+       /* If SMI is not intercepted, ignore guest SMI intercept as well  */
+       if (!intercept_smi)
+               vmcb_clr_intercept(c, INTERCEPT_SMI);
 }
 
 static void copy_vmcb_control_area(struct vmcb_control_area *dst,
@@ -304,8 +308,8 @@ static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu,
        return true;
 }
 
-static void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
-                                           struct vmcb_control_area *control)
+void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
+                                    struct vmcb_control_area *control)
 {
        copy_vmcb_control_area(&svm->nested.ctl, control);
 
@@ -511,7 +515,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
         * Also covers avic_vapic_bar, avic_backing_page, avic_logical_id,
         * avic_physical_id.
         */
-       WARN_ON(svm->vmcb01.ptr->control.int_ctl & AVIC_ENABLE_MASK);
+       WARN_ON(kvm_apicv_activated(svm->vcpu.kvm));
 
        /* Copied from vmcb01.  msrpm_base can be overwritten later.  */
        svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl;
@@ -618,6 +622,11 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
        struct kvm_host_map map;
        u64 vmcb12_gpa;
 
+       if (!svm->nested.hsave_msr) {
+               kvm_inject_gp(vcpu, 0);
+               return 1;
+       }
+
        if (is_smm(vcpu)) {
                kvm_queue_exception(vcpu, UD_VECTOR);
                return 1;
@@ -692,7 +701,28 @@ out:
        return ret;
 }
 
-void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
+/* Copy state save area fields which are handled by VMRUN */
+void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
+                         struct vmcb_save_area *from_save)
+{
+       to_save->es = from_save->es;
+       to_save->cs = from_save->cs;
+       to_save->ss = from_save->ss;
+       to_save->ds = from_save->ds;
+       to_save->gdtr = from_save->gdtr;
+       to_save->idtr = from_save->idtr;
+       to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
+       to_save->efer = from_save->efer;
+       to_save->cr0 = from_save->cr0;
+       to_save->cr3 = from_save->cr3;
+       to_save->cr4 = from_save->cr4;
+       to_save->rax = from_save->rax;
+       to_save->rsp = from_save->rsp;
+       to_save->rip = from_save->rip;
+       to_save->cpl = 0;
+}
+
+void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
 {
        to_vmcb->save.fs = from_vmcb->save.fs;
        to_vmcb->save.gs = from_vmcb->save.gs;
@@ -1355,28 +1385,11 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
 
        svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
 
-       svm->vmcb01.ptr->save.es = save->es;
-       svm->vmcb01.ptr->save.cs = save->cs;
-       svm->vmcb01.ptr->save.ss = save->ss;
-       svm->vmcb01.ptr->save.ds = save->ds;
-       svm->vmcb01.ptr->save.gdtr = save->gdtr;
-       svm->vmcb01.ptr->save.idtr = save->idtr;
-       svm->vmcb01.ptr->save.rflags = save->rflags | X86_EFLAGS_FIXED;
-       svm->vmcb01.ptr->save.efer = save->efer;
-       svm->vmcb01.ptr->save.cr0 = save->cr0;
-       svm->vmcb01.ptr->save.cr3 = save->cr3;
-       svm->vmcb01.ptr->save.cr4 = save->cr4;
-       svm->vmcb01.ptr->save.rax = save->rax;
-       svm->vmcb01.ptr->save.rsp = save->rsp;
-       svm->vmcb01.ptr->save.rip = save->rip;
-       svm->vmcb01.ptr->save.cpl = 0;
-
+       svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
        nested_load_control_from_vmcb12(svm, ctl);
 
        svm_switch_vmcb(svm, &svm->nested.vmcb02);
-
        nested_vmcb02_prepare_control(svm);
-
        kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
        ret = 0;
 out_free:
index 62926f1..6710d9e 100644 (file)
@@ -1272,8 +1272,8 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
        /* Pin guest memory */
        guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
                                    PAGE_SIZE, &n, 0);
-       if (!guest_page)
-               return -EFAULT;
+       if (IS_ERR(guest_page))
+               return PTR_ERR(guest_page);
 
        /* allocate memory for header and transport buffer */
        ret = -ENOMEM;
@@ -1310,8 +1310,9 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
        }
 
        /* Copy packet header to userspace. */
-       ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
-                               params.hdr_len);
+       if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
+                        params.hdr_len))
+               ret = -EFAULT;
 
 e_free_trans_data:
        kfree(trans_data);
@@ -1463,11 +1464,12 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
        data.trans_len = params.trans_len;
 
        /* Pin guest memory */
-       ret = -EFAULT;
        guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
                                    PAGE_SIZE, &n, 0);
-       if (!guest_page)
+       if (IS_ERR(guest_page)) {
+               ret = PTR_ERR(guest_page);
                goto e_free_trans;
+       }
 
        /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
        data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
index 8834822..e8ccab5 100644 (file)
@@ -198,6 +198,11 @@ module_param(avic, bool, 0444);
 bool __read_mostly dump_invalid_vmcb;
 module_param(dump_invalid_vmcb, bool, 0644);
 
+
+bool intercept_smi = true;
+module_param(intercept_smi, bool, 0444);
+
+
 static bool svm_gp_erratum_intercept = true;
 
 static u8 rsm_ins_bytes[] = "\x0f\xaa";
@@ -1185,7 +1190,10 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
 
        svm_set_intercept(svm, INTERCEPT_INTR);
        svm_set_intercept(svm, INTERCEPT_NMI);
-       svm_set_intercept(svm, INTERCEPT_SMI);
+
+       if (intercept_smi)
+               svm_set_intercept(svm, INTERCEPT_SMI);
+
        svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
        svm_set_intercept(svm, INTERCEPT_RDPMC);
        svm_set_intercept(svm, INTERCEPT_CPUID);
@@ -1398,8 +1406,6 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
                goto error_free_vmsa_page;
        }
 
-       svm_vcpu_init_msrpm(vcpu, svm->msrpm);
-
        svm->vmcb01.ptr = page_address(vmcb01_page);
        svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
 
@@ -1411,6 +1417,8 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
        svm_switch_vmcb(svm, &svm->vmcb01);
        init_vmcb(vcpu);
 
+       svm_vcpu_init_msrpm(vcpu, svm->msrpm);
+
        svm_init_osvw(vcpu);
        vcpu->arch.microcode_version = 0x01000065;
 
@@ -1560,8 +1568,11 @@ static void svm_set_vintr(struct vcpu_svm *svm)
 {
        struct vmcb_control_area *control;
 
-       /* The following fields are ignored when AVIC is enabled */
-       WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu));
+       /*
+        * The following fields are ignored when AVIC is enabled
+        */
+       WARN_ON(kvm_apicv_activated(svm->vcpu.kvm));
+
        svm_set_intercept(svm, INTERCEPT_VINTR);
 
        /*
@@ -1923,7 +1934,7 @@ static int npf_interception(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
+       u64 fault_address = svm->vmcb->control.exit_info_2;
        u64 error_code = svm->vmcb->control.exit_info_1;
 
        trace_kvm_page_fault(fault_address, error_code);
@@ -2106,6 +2117,11 @@ static int nmi_interception(struct kvm_vcpu *vcpu)
        return 1;
 }
 
+static int smi_interception(struct kvm_vcpu *vcpu)
+{
+       return 1;
+}
+
 static int intr_interception(struct kvm_vcpu *vcpu)
 {
        ++vcpu->stat.irq_exits;
@@ -2134,11 +2150,12 @@ static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
        ret = kvm_skip_emulated_instruction(vcpu);
 
        if (vmload) {
-               nested_svm_vmloadsave(vmcb12, svm->vmcb);
+               svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
                svm->sysenter_eip_hi = 0;
                svm->sysenter_esp_hi = 0;
-       } else
-               nested_svm_vmloadsave(svm->vmcb, vmcb12);
+       } else {
+               svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
+       }
 
        kvm_vcpu_unmap(vcpu, &map, true);
 
@@ -2941,7 +2958,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
                        svm_disable_lbrv(vcpu);
                break;
        case MSR_VM_HSAVE_PA:
-               svm->nested.hsave_msr = data;
+               /*
+                * Old kernels did not validate the value written to
+                * MSR_VM_HSAVE_PA.  Allow KVM_SET_MSR to set an invalid
+                * value to allow live migrating buggy or malicious guests
+                * originating from those kernels.
+                */
+               if (!msr->host_initiated && !page_address_valid(vcpu, data))
+                       return 1;
+
+               svm->nested.hsave_msr = data & PAGE_MASK;
                break;
        case MSR_VM_CR:
                return svm_set_vm_cr(vcpu, data);
@@ -3080,8 +3106,7 @@ static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
        [SVM_EXIT_EXCP_BASE + GP_VECTOR]        = gp_interception,
        [SVM_EXIT_INTR]                         = intr_interception,
        [SVM_EXIT_NMI]                          = nmi_interception,
-       [SVM_EXIT_SMI]                          = kvm_emulate_as_nop,
-       [SVM_EXIT_INIT]                         = kvm_emulate_as_nop,
+       [SVM_EXIT_SMI]                          = smi_interception,
        [SVM_EXIT_VINTR]                        = interrupt_window_interception,
        [SVM_EXIT_RDPMC]                        = kvm_emulate_rdpmc,
        [SVM_EXIT_CPUID]                        = kvm_emulate_cpuid,
@@ -4288,6 +4313,7 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
+       struct kvm_host_map map_save;
        int ret;
 
        if (is_guest_mode(vcpu)) {
@@ -4303,6 +4329,29 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
                ret = nested_svm_vmexit(svm);
                if (ret)
                        return ret;
+
+               /*
+                * KVM uses VMCB01 to store L1 host state while L2 runs but
+                * VMCB01 is going to be used during SMM and thus the state will
+                * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
+                * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
+                * format of the area is identical to guest save area offsetted
+                * by 0x400 (matches the offset of 'struct vmcb_save_area'
+                * within 'struct vmcb'). Note: HSAVE area may also be used by
+                * L1 hypervisor to save additional host context (e.g. KVM does
+                * that, see svm_prepare_guest_switch()) which must be
+                * preserved.
+                */
+               if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
+                                &map_save) == -EINVAL)
+                       return 1;
+
+               BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
+
+               svm_copy_vmrun_state(map_save.hva + 0x400,
+                                    &svm->vmcb01.ptr->save);
+
+               kvm_vcpu_unmap(vcpu, &map_save, true);
        }
        return 0;
 }
@@ -4310,13 +4359,14 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
 static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
-       struct kvm_host_map map;
+       struct kvm_host_map map, map_save;
        int ret = 0;
 
        if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
                u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
                u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
                u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
+               struct vmcb *vmcb12;
 
                if (guest) {
                        if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
@@ -4332,8 +4382,25 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
                        if (svm_allocate_nested(svm))
                                return 1;
 
-                       ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, map.hva);
+                       vmcb12 = map.hva;
+
+                       nested_load_control_from_vmcb12(svm, &vmcb12->control);
+
+                       ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);
                        kvm_vcpu_unmap(vcpu, &map, true);
+
+                       /*
+                        * Restore L1 host state from L1 HSAVE area as VMCB01 was
+                        * used during SMM (see svm_enter_smm())
+                        */
+                       if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
+                                        &map_save) == -EINVAL)
+                               return 1;
+
+                       svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
+                                            map_save.hva + 0x400);
+
+                       kvm_vcpu_unmap(vcpu, &map_save, true);
                }
        }
 
index f89b623..bd0fe94 100644 (file)
@@ -31,6 +31,7 @@
 #define MSRPM_OFFSETS  16
 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
 extern bool npt_enabled;
+extern bool intercept_smi;
 
 /*
  * Clean bits in VMCB.
@@ -463,7 +464,9 @@ void svm_leave_nested(struct vcpu_svm *svm);
 void svm_free_nested(struct vcpu_svm *svm);
 int svm_allocate_nested(struct vcpu_svm *svm);
 int nested_svm_vmrun(struct kvm_vcpu *vcpu);
-void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
+void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
+                         struct vmcb_save_area *from_save);
+void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
 int nested_svm_vmexit(struct vcpu_svm *svm);
 
 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
@@ -479,6 +482,8 @@ int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
                               bool has_error_code, u32 error_code);
 int nested_svm_exit_special(struct vcpu_svm *svm);
+void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
+                                    struct vmcb_control_area *control);
 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
index 9b9a55a..c53b8bf 100644 (file)
@@ -89,7 +89,7 @@ static inline void svm_hv_vmcb_dirty_nested_enlightenments(
         * as we mark it dirty unconditionally towards end of vcpu
         * init phase.
         */
-       if (vmcb && vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) &&
+       if (vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) &&
            hve->hv_enlightenments_control.msr_bitmap)
                vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
 }
index 3979a94..db88ed4 100644 (file)
@@ -14,8 +14,6 @@
 #include "vmx_ops.h"
 #include "cpuid.h"
 
-extern const u32 vmx_msr_index[];
-
 #define MSR_TYPE_R     1
 #define MSR_TYPE_W     2
 #define MSR_TYPE_RW    3
index c6dc1b4..4116567 100644 (file)
@@ -3407,7 +3407,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        return 1;
                break;
        case MSR_KVM_ASYNC_PF_ACK:
-               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
                        return 1;
                if (data & 0x1) {
                        vcpu->arch.apf.pageready_pending = false;
@@ -3746,7 +3746,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                msr_info->data = vcpu->arch.apf.msr_int_val;
                break;
        case MSR_KVM_ASYNC_PF_ACK:
-               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
+               if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
                        return 1;
 
                msr_info->data = 0;
@@ -9601,6 +9601,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                set_debugreg(vcpu->arch.eff_db[3], 3);
                set_debugreg(vcpu->arch.dr6, 6);
                vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
+       } else if (unlikely(hw_breakpoint_active())) {
+               set_debugreg(0, 7);
        }
 
        for (;;) {
@@ -10985,9 +10987,6 @@ int kvm_arch_hardware_setup(void *opaque)
        int r;
 
        rdmsrl_safe(MSR_EFER, &host_efer);
-       if (WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_NX) &&
-                        !(host_efer & EFER_NX)))
-               return -EIO;
 
        if (boot_cpu_has(X86_FEATURE_XSAVES))
                rdmsrl(MSR_IA32_XSS, host_xss);
index 3364fe6..3481b35 100644 (file)
@@ -682,7 +682,6 @@ int p4d_clear_huge(p4d_t *p4d)
 }
 #endif
 
-#if CONFIG_PGTABLE_LEVELS > 3
 /**
  * pud_set_huge - setup kernel PUD mapping
  *
@@ -722,23 +721,6 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
 }
 
 /**
- * pud_clear_huge - clear kernel PUD mapping when it is set
- *
- * Returns 1 on success and 0 on failure (no PUD map is found).
- */
-int pud_clear_huge(pud_t *pud)
-{
-       if (pud_large(*pud)) {
-               pud_clear(pud);
-               return 1;
-       }
-
-       return 0;
-}
-#endif
-
-#if CONFIG_PGTABLE_LEVELS > 2
-/**
  * pmd_set_huge - setup kernel PMD mapping
  *
  * See text over pud_set_huge() above.
@@ -769,6 +751,21 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
 }
 
 /**
+ * pud_clear_huge - clear kernel PUD mapping when it is set
+ *
+ * Returns 1 on success and 0 on failure (no PUD map is found).
+ */
+int pud_clear_huge(pud_t *pud)
+{
+       if (pud_large(*pud)) {
+               pud_clear(pud);
+               return 1;
+       }
+
+       return 0;
+}
+
+/**
  * pmd_clear_huge - clear kernel PMD mapping when it is set
  *
  * Returns 1 on success and 0 on failure (no PMD map is found).
@@ -782,7 +779,6 @@ int pmd_clear_huge(pmd_t *pmd)
 
        return 0;
 }
-#endif
 
 #ifdef CONFIG_X86_64
 /**
index e835164..16d76f8 100644 (file)
@@ -570,6 +570,9 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
 
        for (i = 0; i < prog->aux->size_poke_tab; i++) {
                poke = &prog->aux->poke_tab[i];
+               if (poke->aux && poke->aux != prog->aux)
+                       continue;
+
                WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
 
                if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
@@ -1216,6 +1219,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                        }
                        break;
 
+                       /* speculation barrier */
+               case BPF_ST | BPF_NOSPEC:
+                       if (boot_cpu_has(X86_FEATURE_XMM2))
+                               /* Emit 'lfence' */
+                               EMIT3(0x0F, 0xAE, 0xE8);
+                       break;
+
                        /* ST: *(u8*)(dst_reg + off) = imm */
                case BPF_ST | BPF_MEM | BPF_B:
                        if (is_ereg(dst_reg))
index 3da88de..3bfda5f 100644 (file)
@@ -1886,6 +1886,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                        i++;
                        break;
                }
+               /* speculation barrier */
+               case BPF_ST | BPF_NOSPEC:
+                       if (boot_cpu_has(X86_FEATURE_XMM2))
+                               /* Emit 'lfence' */
+                               EMIT3(0x0F, 0xAE, 0xE8);
+                       break;
                /* ST: *(u8*)(dst_reg + off) = imm */
                case BPF_ST | BPF_MEM | BPF_H:
                case BPF_ST | BPF_MEM | BPF_B:
index 2332b21..3878880 100644 (file)
@@ -327,7 +327,6 @@ config XTENSA_PLATFORM_ISS
 
 config XTENSA_PLATFORM_XT2000
        bool "XT2000"
-       select HAVE_IDE
        help
          XT2000 is the name of Tensilica's feature-rich emulation platform.
          This hardware is capable of running a full Linux distribution.
index c2d6bc8..5fac375 100644 (file)
@@ -1440,16 +1440,17 @@ static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
                return -1;
 
        iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
+       wait->committed = true;
 
        /*
         * autoremove_wake_function() removes the wait entry only when it
-        * actually changed the task state.  We want the wait always
-        * removed.  Remove explicitly and use default_wake_function().
+        * actually changed the task state. We want the wait always removed.
+        * Remove explicitly and use default_wake_function(). Note that the
+        * order of operations is important as finish_wait() tests whether
+        * @wq_entry is removed without grabbing the lock.
         */
-       list_del_init(&wq_entry->entry);
-       wait->committed = true;
-
        default_wake_function(wq_entry, mode, flags, key);
+       list_del_init_careful(&wq_entry->entry);
        return 0;
 }
 
index c838d81..0f006ca 100644 (file)
@@ -515,17 +515,6 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
        percpu_ref_put(&q->q_usage_counter);
 }
 
-static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
-                                  struct blk_mq_hw_ctx *hctx,
-                                  unsigned int hctx_idx)
-{
-       if (hctx->sched_tags) {
-               blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
-               blk_mq_free_rq_map(hctx->sched_tags, set->flags);
-               hctx->sched_tags = NULL;
-       }
-}
-
 static int blk_mq_sched_alloc_tags(struct request_queue *q,
                                   struct blk_mq_hw_ctx *hctx,
                                   unsigned int hctx_idx)
@@ -539,8 +528,10 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q,
                return -ENOMEM;
 
        ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
-       if (ret)
-               blk_mq_sched_free_tags(set, hctx, hctx_idx);
+       if (ret) {
+               blk_mq_free_rq_map(hctx->sched_tags, set->flags);
+               hctx->sched_tags = NULL;
+       }
 
        return ret;
 }
index af4d2ab..298ee78 100644 (file)
@@ -1079,10 +1079,9 @@ static void disk_release(struct device *dev)
        disk_release_events(disk);
        kfree(disk->random);
        xa_destroy(&disk->part_tbl);
-       bdput(disk->part0);
        if (test_bit(GD_QUEUE_REF, &disk->state) && disk->queue)
                blk_put_queue(disk->queue);
-       kfree(disk);
+       bdput(disk->part0);     /* frees the disk */
 }
 struct class block_class = {
        .name           = "block",
index 9d872ea..8f9940f 100644 (file)
@@ -370,7 +370,7 @@ config ACPI_TABLE_UPGRADE
 config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD
        bool "Override ACPI tables from built-in initrd"
        depends on ACPI_TABLE_UPGRADE
-       depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION=""
+       depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION_NONE
        help
          This option provides functionality to override arbitrary ACPI tables
          from built-in uncompressed initrd.
index 5fca182..550b908 100644 (file)
@@ -9,6 +9,42 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 
+struct pch_fivr_resp {
+       u64 status;
+       u64 result;
+};
+
+static int pch_fivr_read(acpi_handle handle, char *method, struct pch_fivr_resp *fivr_resp)
+{
+       struct acpi_buffer resp = { sizeof(struct pch_fivr_resp), fivr_resp};
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       struct acpi_buffer format = { sizeof("NN"), "NN" };
+       union acpi_object *obj;
+       acpi_status status;
+       int ret = -EFAULT;
+
+       status = acpi_evaluate_object(handle, method, NULL, &buffer);
+       if (ACPI_FAILURE(status))
+               return ret;
+
+       obj = buffer.pointer;
+       if (!obj || obj->type != ACPI_TYPE_PACKAGE)
+               goto release_buffer;
+
+       status = acpi_extract_package(obj, &format, &resp);
+       if (ACPI_FAILURE(status))
+               goto release_buffer;
+
+       if (fivr_resp->status)
+               goto release_buffer;
+
+       ret = 0;
+
+release_buffer:
+       kfree(buffer.pointer);
+       return ret;
+}
+
 /*
  * Presentation of attributes which are defined for INT1045
  * They are:
@@ -23,15 +59,14 @@ static ssize_t name##_show(struct device *dev,\
                           char *buf)\
 {\
        struct acpi_device *acpi_dev = dev_get_drvdata(dev);\
-       unsigned long long val;\
-       acpi_status status;\
+       struct pch_fivr_resp fivr_resp;\
+       int status;\
 \
-       status = acpi_evaluate_integer(acpi_dev->handle, #method,\
-                                      NULL, &val);\
-       if (ACPI_SUCCESS(status))\
-               return sprintf(buf, "%d\n", (int)val);\
-       else\
-               return -EINVAL;\
+       status = pch_fivr_read(acpi_dev->handle, #method, &fivr_resp);\
+       if (status)\
+               return status;\
+\
+       return sprintf(buf, "%llu\n", fivr_resp.result);\
 }
 
 #define PCH_FIVR_STORE(name, method) \
index dc01fb5..ee78a21 100644 (file)
@@ -423,13 +423,6 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
        }
 }
 
-static bool irq_is_legacy(struct acpi_resource_irq *irq)
-{
-       return irq->triggering == ACPI_EDGE_SENSITIVE &&
-               irq->polarity == ACPI_ACTIVE_HIGH &&
-               irq->shareable == ACPI_EXCLUSIVE;
-}
-
 /**
  * acpi_dev_resource_interrupt - Extract ACPI interrupt resource information.
  * @ares: Input ACPI resource object.
@@ -468,7 +461,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
                }
                acpi_dev_get_irqresource(res, irq->interrupts[index],
                                         irq->triggering, irq->polarity,
-                                        irq->shareable, irq_is_legacy(irq));
+                                        irq->shareable, true);
                break;
        case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
                ext_irq = &ares->data.extended_irq;
index e7ddd28..d5cedff 100644 (file)
@@ -860,11 +860,9 @@ EXPORT_SYMBOL(acpi_dev_present);
  * Return the next match of ACPI device if another matching device was present
  * at the moment of invocation, or NULL otherwise.
  *
- * FIXME: The function does not tolerate the sudden disappearance of @adev, e.g.
- * in the case of a hotplug event. That said, the caller should ensure that
- * this will never happen.
- *
  * The caller is responsible for invoking acpi_dev_put() on the returned device.
+ * On the other hand the function invokes  acpi_dev_put() on the given @adev
+ * assuming that its reference counter had been increased beforehand.
  *
  * See additional information in acpi_dev_present() as well.
  */
@@ -880,6 +878,7 @@ acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const cha
        match.hrv = hrv;
 
        dev = bus_find_device(&acpi_bus_type, start, &match, acpi_dev_match_cb);
+       acpi_dev_put(adev);
        return dev ? to_acpi_device(dev) : NULL;
 }
 EXPORT_SYMBOL(acpi_dev_get_next_match_dev);
index 1c50780..fbdbef0 100644 (file)
@@ -378,19 +378,25 @@ static int lps0_device_attach(struct acpi_device *adev,
                 * AMDI0006:
                 * - should use rev_id 0x0
                 * - function mask = 0x3: Should use Microsoft method
+                * AMDI0007:
+                * - Should use rev_id 0x2
+                * - Should only use AMD method
                 */
                const char *hid = acpi_device_hid(adev);
-               rev_id = 0;
+               rev_id = strcmp(hid, "AMDI0007") ? 0 : 2;
                lps0_dsm_func_mask = validate_dsm(adev->handle,
                                        ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
                lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
-                                       ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id,
+                                       ACPI_LPS0_DSM_UUID_MICROSOFT, 0,
                                        &lps0_dsm_guid_microsoft);
                if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
                                                 !strcmp(hid, "AMDI0005"))) {
                        lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
                        acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
                                          ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
+               } else if (lps0_dsm_func_mask_microsoft > 0 && !strcmp(hid, "AMDI0007")) {
+                       lps0_dsm_func_mask_microsoft = -EINVAL;
+                       acpi_handle_debug(adev->handle, "_DSM Using AMD method\n");
                }
        } else {
                rev_id = 1;
index ae7189d..b71ea4a 100644 (file)
@@ -637,6 +637,20 @@ unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf,
 }
 EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
 
+static void ata_pio_xfer(struct ata_queued_cmd *qc, struct page *page,
+               unsigned int offset, size_t xfer_size)
+{
+       bool do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
+       unsigned char *buf;
+
+       buf = kmap_atomic(page);
+       qc->ap->ops->sff_data_xfer(qc, buf + offset, xfer_size, do_write);
+       kunmap_atomic(buf);
+
+       if (!do_write && !PageSlab(page))
+               flush_dcache_page(page);
+}
+
 /**
  *     ata_pio_sector - Transfer a sector of data.
  *     @qc: Command on going
@@ -648,11 +662,9 @@ EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
  */
 static void ata_pio_sector(struct ata_queued_cmd *qc)
 {
-       int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
        struct ata_port *ap = qc->ap;
        struct page *page;
        unsigned int offset;
-       unsigned char *buf;
 
        if (!qc->cursg) {
                qc->curbytes = qc->nbytes;
@@ -670,13 +682,20 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
 
        DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
 
-       /* do the actual data transfer */
-       buf = kmap_atomic(page);
-       ap->ops->sff_data_xfer(qc, buf + offset, qc->sect_size, do_write);
-       kunmap_atomic(buf);
+       /*
+        * Split the transfer when it splits a page boundary.  Note that the
+        * split still has to be dword aligned like all ATA data transfers.
+        */
+       WARN_ON_ONCE(offset % 4);
+       if (offset + qc->sect_size > PAGE_SIZE) {
+               unsigned int split_len = PAGE_SIZE - offset;
 
-       if (!do_write && !PageSlab(page))
-               flush_dcache_page(page);
+               ata_pio_xfer(qc, page, offset, split_len);
+               ata_pio_xfer(qc, nth_page(page, 1), 0,
+                            qc->sect_size - split_len);
+       } else {
+               ata_pio_xfer(qc, page, offset, qc->sect_size);
+       }
 
        qc->curbytes += qc->sect_size;
        qc->cursg_ofs += qc->sect_size;
index 5881d64..99c6308 100644 (file)
  */
 
 #include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/libata.h>
 #include <linux/irq.h>
 #include <linux/platform_device.h>
-#include <linux/platform_data/pata_ixp4xx_cf.h>
+#include <linux/regmap.h>
 #include <scsi/scsi_host.h>
 
 #define DRV_NAME       "pata_ixp4xx_cf"
-#define DRV_VERSION    "0.2"
+#define DRV_VERSION    "1.0"
 
-static int ixp4xx_set_mode(struct ata_link *link, struct ata_device **error)
+struct ixp4xx_pata {
+       struct ata_host *host;
+       struct regmap *rmap;
+       u32 cmd_csreg;
+       void __iomem *cmd;
+       void __iomem *ctl;
+};
+
+#define IXP4XX_EXP_TIMING_STRIDE       0x04
+/* The timings for the chipselect is in bits 29..16 */
+#define IXP4XX_EXP_T1_T5_MASK  GENMASK(29, 16)
+#define IXP4XX_EXP_PIO_0_8     0x0a470000
+#define IXP4XX_EXP_PIO_1_8     0x06430000
+#define IXP4XX_EXP_PIO_2_8     0x02410000
+#define IXP4XX_EXP_PIO_3_8     0x00820000
+#define IXP4XX_EXP_PIO_4_8     0x00400000
+#define IXP4XX_EXP_PIO_0_16    0x29640000
+#define IXP4XX_EXP_PIO_1_16    0x05030000
+#define IXP4XX_EXP_PIO_2_16    0x00b20000
+#define IXP4XX_EXP_PIO_3_16    0x00820000
+#define IXP4XX_EXP_PIO_4_16    0x00400000
+#define IXP4XX_EXP_BW_MASK     (BIT(6)|BIT(0))
+#define IXP4XX_EXP_BYTE_RD16   BIT(6) /* Byte reads on half-word devices */
+#define IXP4XX_EXP_BYTE_EN     BIT(0) /* Use 8bit data bus if set */
+
+static void ixp4xx_set_8bit_timing(struct ixp4xx_pata *ixpp, u8 pio_mode)
+{
+       switch (pio_mode) {
+       case XFER_PIO_0:
+               regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
+                                  IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_0_8);
+               break;
+       case XFER_PIO_1:
+               regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
+                                  IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_1_8);
+               break;
+       case XFER_PIO_2:
+               regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
+                                  IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_2_8);
+               break;
+       case XFER_PIO_3:
+               regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
+                                  IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_3_8);
+               break;
+       case XFER_PIO_4:
+               regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
+                                  IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_4_8);
+               break;
+       default:
+               break;
+       }
+       regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
+                          IXP4XX_EXP_BW_MASK, IXP4XX_EXP_BYTE_RD16|IXP4XX_EXP_BYTE_EN);
+}
+
+static void ixp4xx_set_16bit_timing(struct ixp4xx_pata *ixpp, u8 pio_mode)
 {
-       struct ata_device *dev;
-
-       ata_for_each_dev(dev, link, ENABLED) {
-               ata_dev_info(dev, "configured for PIO0\n");
-               dev->pio_mode = XFER_PIO_0;
-               dev->xfer_mode = XFER_PIO_0;
-               dev->xfer_shift = ATA_SHIFT_PIO;
-               dev->flags |= ATA_DFLAG_PIO;
+       switch (pio_mode){
+       case XFER_PIO_0:
+               regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
+                                  IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_0_16);
+               break;
+       case XFER_PIO_1:
+               regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
+                                  IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_1_16);
+               break;
+       case XFER_PIO_2:
+               regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
+                                  IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_2_16);
+               break;
+       case XFER_PIO_3:
+               regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
+                                  IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_3_16);
+               break;
+       case XFER_PIO_4:
+               regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
+                                  IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_4_16);
+               break;
+       default:
+               break;
        }
-       return 0;
+       regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg,
+                          IXP4XX_EXP_BW_MASK, IXP4XX_EXP_BYTE_RD16);
+}
+
+/* This sets up the timing on the chipselect CMD accordingly */
+static void ixp4xx_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+       struct ixp4xx_pata *ixpp = ap->host->private_data;
+
+       ata_dev_printk(adev, KERN_INFO, "configured for PIO%d 8bit\n",
+                      adev->pio_mode - XFER_PIO_0);
+       ixp4xx_set_8bit_timing(ixpp, adev->pio_mode);
 }
 
+
 static unsigned int ixp4xx_mmio_data_xfer(struct ata_queued_cmd *qc,
-                               unsigned char *buf, unsigned int buflen, int rw)
+                                         unsigned char *buf, unsigned int buflen, int rw)
 {
        unsigned int i;
        unsigned int words = buflen >> 1;
        u16 *buf16 = (u16 *) buf;
+       struct ata_device *adev = qc->dev;
        struct ata_port *ap = qc->dev->link->ap;
        void __iomem *mmio = ap->ioaddr.data_addr;
-       struct ixp4xx_pata_data *data = dev_get_platdata(ap->host->dev);
+       struct ixp4xx_pata *ixpp = ap->host->private_data;
+       unsigned long flags;
+
+       ata_dev_printk(adev, KERN_DEBUG, "%s %d bytes\n", (rw == READ) ? "READ" : "WRITE",
+                      buflen);
+       spin_lock_irqsave(ap->lock, flags);
 
        /* set the expansion bus in 16bit mode and restore
         * 8 bit mode after the transaction.
         */
-       *data->cs0_cfg &= ~(0x01);
-       udelay(100);
+       ixp4xx_set_16bit_timing(ixpp, adev->pio_mode);
+       udelay(5);
 
        /* Transfer multiple of 2 bytes */
        if (rw == READ)
@@ -76,8 +165,10 @@ static unsigned int ixp4xx_mmio_data_xfer(struct ata_queued_cmd *qc,
                words++;
        }
 
-       udelay(100);
-       *data->cs0_cfg |= 0x01;
+       ixp4xx_set_8bit_timing(ixpp, adev->pio_mode);
+       udelay(5);
+
+       spin_unlock_irqrestore(ap->lock, flags);
 
        return words << 1;
 }
@@ -90,79 +181,98 @@ static struct ata_port_operations ixp4xx_port_ops = {
        .inherits               = &ata_sff_port_ops,
        .sff_data_xfer          = ixp4xx_mmio_data_xfer,
        .cable_detect           = ata_cable_40wire,
-       .set_mode               = ixp4xx_set_mode,
+       .set_piomode            = ixp4xx_set_piomode,
+};
+
+static struct ata_port_info ixp4xx_port_info = {
+       .flags          = ATA_FLAG_NO_ATAPI,
+       .pio_mask       = ATA_PIO4,
+       .port_ops       = &ixp4xx_port_ops,
 };
 
 static void ixp4xx_setup_port(struct ata_port *ap,
-                             struct ixp4xx_pata_data *data,
-                             unsigned long raw_cs0, unsigned long raw_cs1)
+                             struct ixp4xx_pata *ixpp,
+                             unsigned long raw_cmd, unsigned long raw_ctl)
 {
        struct ata_ioports *ioaddr = &ap->ioaddr;
-       unsigned long raw_cmd = raw_cs0;
-       unsigned long raw_ctl = raw_cs1 + 0x06;
 
-       ioaddr->cmd_addr        = data->cs0;
-       ioaddr->altstatus_addr  = data->cs1 + 0x06;
-       ioaddr->ctl_addr        = data->cs1 + 0x06;
+       raw_ctl += 0x06;
+       ioaddr->cmd_addr        = ixpp->cmd;
+       ioaddr->altstatus_addr  = ixpp->ctl + 0x06;
+       ioaddr->ctl_addr        = ixpp->ctl + 0x06;
 
        ata_sff_std_ports(ioaddr);
 
-#ifndef __ARMEB__
-
-       /* adjust the addresses to handle the address swizzling of the
-        * ixp4xx in little endian mode.
-        */
-
-       *(unsigned long *)&ioaddr->data_addr            ^= 0x02;
-       *(unsigned long *)&ioaddr->cmd_addr             ^= 0x03;
-       *(unsigned long *)&ioaddr->altstatus_addr       ^= 0x03;
-       *(unsigned long *)&ioaddr->ctl_addr             ^= 0x03;
-       *(unsigned long *)&ioaddr->error_addr           ^= 0x03;
-       *(unsigned long *)&ioaddr->feature_addr         ^= 0x03;
-       *(unsigned long *)&ioaddr->nsect_addr           ^= 0x03;
-       *(unsigned long *)&ioaddr->lbal_addr            ^= 0x03;
-       *(unsigned long *)&ioaddr->lbam_addr            ^= 0x03;
-       *(unsigned long *)&ioaddr->lbah_addr            ^= 0x03;
-       *(unsigned long *)&ioaddr->device_addr          ^= 0x03;
-       *(unsigned long *)&ioaddr->status_addr          ^= 0x03;
-       *(unsigned long *)&ioaddr->command_addr         ^= 0x03;
-
-       raw_cmd ^= 0x03;
-       raw_ctl ^= 0x03;
-#endif
+       if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
+               /* adjust the addresses to handle the address swizzling of the
+                * ixp4xx in little endian mode.
+                */
+
+               *(unsigned long *)&ioaddr->data_addr            ^= 0x02;
+               *(unsigned long *)&ioaddr->cmd_addr             ^= 0x03;
+               *(unsigned long *)&ioaddr->altstatus_addr       ^= 0x03;
+               *(unsigned long *)&ioaddr->ctl_addr             ^= 0x03;
+               *(unsigned long *)&ioaddr->error_addr           ^= 0x03;
+               *(unsigned long *)&ioaddr->feature_addr         ^= 0x03;
+               *(unsigned long *)&ioaddr->nsect_addr           ^= 0x03;
+               *(unsigned long *)&ioaddr->lbal_addr            ^= 0x03;
+               *(unsigned long *)&ioaddr->lbam_addr            ^= 0x03;
+               *(unsigned long *)&ioaddr->lbah_addr            ^= 0x03;
+               *(unsigned long *)&ioaddr->device_addr          ^= 0x03;
+               *(unsigned long *)&ioaddr->status_addr          ^= 0x03;
+               *(unsigned long *)&ioaddr->command_addr         ^= 0x03;
+
+               raw_cmd ^= 0x03;
+               raw_ctl ^= 0x03;
+       }
 
        ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", raw_cmd, raw_ctl);
 }
 
 static int ixp4xx_pata_probe(struct platform_device *pdev)
 {
-       struct resource *cs0, *cs1;
-       struct ata_host *host;
-       struct ata_port *ap;
-       struct ixp4xx_pata_data *data = dev_get_platdata(&pdev->dev);
+       struct resource *cmd, *ctl;
+       struct ata_port_info pi = ixp4xx_port_info;
+       const struct ata_port_info *ppi[] = { &pi, NULL };
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct ixp4xx_pata *ixpp;
+       u32 csindex;
        int ret;
        int irq;
 
-       cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       cmd = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ctl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 
-       if (!cs0 || !cs1)
+       if (!cmd || !ctl)
                return -EINVAL;
 
-       /* allocate host */
-       host = ata_host_alloc(&pdev->dev, 1);
-       if (!host)
+       ixpp = devm_kzalloc(dev, sizeof(*ixpp), GFP_KERNEL);
+       if (!ixpp)
                return -ENOMEM;
 
-       /* acquire resources and fill host */
-       ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       ixpp->rmap = syscon_node_to_regmap(np->parent);
+       if (IS_ERR(ixpp->rmap))
+               return dev_err_probe(dev, PTR_ERR(ixpp->rmap), "no regmap\n");
+       /* Inspect our address to figure out what chipselect the CMD is on */
+       ret = of_property_read_u32_index(np, "reg", 0, &csindex);
        if (ret)
-               return ret;
+               return dev_err_probe(dev, ret, "can't inspect CMD address\n");
+       dev_info(dev, "using CS%d for PIO timing configuration\n", csindex);
+       ixpp->cmd_csreg = csindex * IXP4XX_EXP_TIMING_STRIDE;
 
-       data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000);
-       data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000);
+       ixpp->host = ata_host_alloc_pinfo(dev, ppi, 1);
+       if (!ixpp->host)
+               return -ENOMEM;
+       ixpp->host->private_data = ixpp;
 
-       if (!data->cs0 || !data->cs1)
+       ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
+
+       ixpp->cmd = devm_ioremap_resource(dev, cmd);
+       ixpp->ctl = devm_ioremap_resource(dev, ctl);
+       if (IS_ERR(ixpp->cmd) || IS_ERR(ixpp->ctl))
                return -ENOMEM;
 
        irq = platform_get_irq(pdev, 0);
@@ -173,27 +283,23 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
        else
                return -EINVAL;
 
-       /* Setup expansion bus chip selects */
-       *data->cs0_cfg = data->cs0_bits;
-       *data->cs1_cfg = data->cs1_bits;
-
-       ap = host->ports[0];
-
-       ap->ops = &ixp4xx_port_ops;
-       ap->pio_mask = ATA_PIO4;
-       ap->flags |= ATA_FLAG_NO_ATAPI;
+       /* Just one port to set up */
+       ixp4xx_setup_port(ixpp->host->ports[0], ixpp, cmd->start, ctl->start);
 
-       ixp4xx_setup_port(ap, data, cs0->start, cs1->start);
+       ata_print_version_once(dev, DRV_VERSION);
 
-       ata_print_version_once(&pdev->dev, DRV_VERSION);
-
-       /* activate host */
-       return ata_host_activate(host, irq, ata_sff_interrupt, 0, &ixp4xx_sht);
+       return ata_host_activate(ixpp->host, irq, ata_sff_interrupt, 0, &ixp4xx_sht);
 }
 
+static const struct of_device_id ixp4xx_pata_of_match[] = {
+       { .compatible = "intel,ixp4xx-compact-flash", },
+       { },
+};
+
 static struct platform_driver ixp4xx_pata_platform_driver = {
        .driver  = {
                .name   = DRV_NAME,
+               .of_match_table = ixp4xx_pata_of_match,
        },
        .probe          = ixp4xx_pata_probe,
        .remove         = ata_platform_remove_one,
index adc199d..6a30264 100644 (file)
@@ -231,6 +231,8 @@ EXPORT_SYMBOL_GPL(auxiliary_find_device);
 int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
                                struct module *owner, const char *modname)
 {
+       int ret;
+
        if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table))
                return -EINVAL;
 
@@ -246,7 +248,11 @@ int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
        auxdrv->driver.bus = &auxiliary_bus_type;
        auxdrv->driver.mod_name = modname;
 
-       return driver_register(&auxdrv->driver);
+       ret = driver_register(&auxdrv->driver);
+       if (ret)
+               kfree(auxdrv->driver.name);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(__auxiliary_driver_register);
 
index cadcade..f636049 100644 (file)
@@ -574,8 +574,10 @@ static void devlink_remove_symlinks(struct device *dev,
                return;
        }
 
-       snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
-       sysfs_remove_link(&con->kobj, buf);
+       if (device_is_registered(con)) {
+               snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
+               sysfs_remove_link(&con->kobj, buf);
+       }
        snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
        sysfs_remove_link(&sup->kobj, buf);
        kfree(buf);
index f37b9e3..f0cdff0 100644 (file)
 
 static DEFINE_IDR(loop_index_idr);
 static DEFINE_MUTEX(loop_ctl_mutex);
+static DEFINE_MUTEX(loop_validate_mutex);
+
+/**
+ * loop_global_lock_killable() - take locks for safe loop_validate_file() test
+ *
+ * @lo: struct loop_device
+ * @global: true if @lo is about to bind another "struct loop_device", false otherwise
+ *
+ * Returns 0 on success, -EINTR otherwise.
+ *
+ * Since loop_validate_file() traverses on other "struct loop_device" if
+ * is_loop_device() is true, we need a global lock for serializing concurrent
+ * loop_configure()/loop_change_fd()/__loop_clr_fd() calls.
+ */
+static int loop_global_lock_killable(struct loop_device *lo, bool global)
+{
+       int err;
+
+       if (global) {
+               err = mutex_lock_killable(&loop_validate_mutex);
+               if (err)
+                       return err;
+       }
+       err = mutex_lock_killable(&lo->lo_mutex);
+       if (err && global)
+               mutex_unlock(&loop_validate_mutex);
+       return err;
+}
+
+/**
+ * loop_global_unlock() - release locks taken by loop_global_lock_killable()
+ *
+ * @lo: struct loop_device
+ * @global: true if @lo was about to bind another "struct loop_device", false otherwise
+ */
+static void loop_global_unlock(struct loop_device *lo, bool global)
+{
+       mutex_unlock(&lo->lo_mutex);
+       if (global)
+               mutex_unlock(&loop_validate_mutex);
+}
 
 static int max_part;
 static int part_shift;
@@ -672,13 +713,15 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
        while (is_loop_device(f)) {
                struct loop_device *l;
 
+               lockdep_assert_held(&loop_validate_mutex);
                if (f->f_mapping->host->i_rdev == bdev->bd_dev)
                        return -EBADF;
 
                l = I_BDEV(f->f_mapping->host)->bd_disk->private_data;
-               if (l->lo_state != Lo_bound) {
+               if (l->lo_state != Lo_bound)
                        return -EINVAL;
-               }
+               /* Order wrt setting lo->lo_backing_file in loop_configure(). */
+               rmb();
                f = l->lo_backing_file;
        }
        if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
@@ -697,13 +740,18 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
                          unsigned int arg)
 {
-       struct file     *file = NULL, *old_file;
-       int             error;
-       bool            partscan;
+       struct file *file = fget(arg);
+       struct file *old_file;
+       int error;
+       bool partscan;
+       bool is_loop;
 
-       error = mutex_lock_killable(&lo->lo_mutex);
+       if (!file)
+               return -EBADF;
+       is_loop = is_loop_device(file);
+       error = loop_global_lock_killable(lo, is_loop);
        if (error)
-               return error;
+               goto out_putf;
        error = -ENXIO;
        if (lo->lo_state != Lo_bound)
                goto out_err;
@@ -713,11 +761,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
        if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
                goto out_err;
 
-       error = -EBADF;
-       file = fget(arg);
-       if (!file)
-               goto out_err;
-
        error = loop_validate_file(file, bdev);
        if (error)
                goto out_err;
@@ -740,7 +783,16 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
        loop_update_dio(lo);
        blk_mq_unfreeze_queue(lo->lo_queue);
        partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
-       mutex_unlock(&lo->lo_mutex);
+       loop_global_unlock(lo, is_loop);
+
+       /*
+        * Flush loop_validate_file() before fput(), for l->lo_backing_file
+        * might be pointing at old_file which might be the last reference.
+        */
+       if (!is_loop) {
+               mutex_lock(&loop_validate_mutex);
+               mutex_unlock(&loop_validate_mutex);
+       }
        /*
         * We must drop file reference outside of lo_mutex as dropping
         * the file ref can take open_mutex which creates circular locking
@@ -752,9 +804,9 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
        return 0;
 
 out_err:
-       mutex_unlock(&lo->lo_mutex);
-       if (file)
-               fput(file);
+       loop_global_unlock(lo, is_loop);
+out_putf:
+       fput(file);
        return error;
 }
 
@@ -1136,22 +1188,22 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
                          struct block_device *bdev,
                          const struct loop_config *config)
 {
-       struct file     *file;
-       struct inode    *inode;
+       struct file *file = fget(config->fd);
+       struct inode *inode;
        struct address_space *mapping;
-       int             error;
-       loff_t          size;
-       bool            partscan;
-       unsigned short  bsize;
+       int error;
+       loff_t size;
+       bool partscan;
+       unsigned short bsize;
+       bool is_loop;
+
+       if (!file)
+               return -EBADF;
+       is_loop = is_loop_device(file);
 
        /* This is safe, since we have a reference from open(). */
        __module_get(THIS_MODULE);
 
-       error = -EBADF;
-       file = fget(config->fd);
-       if (!file)
-               goto out;
-
        /*
         * If we don't hold exclusive handle for the device, upgrade to it
         * here to avoid changing device under exclusive owner.
@@ -1162,7 +1214,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
                        goto out_putf;
        }
 
-       error = mutex_lock_killable(&lo->lo_mutex);
+       error = loop_global_lock_killable(lo, is_loop);
        if (error)
                goto out_bdev;
 
@@ -1242,6 +1294,9 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
        size = get_loop_size(lo, file);
        loop_set_size(lo, size);
 
+       /* Order wrt reading lo_state in loop_validate_file(). */
+       wmb();
+
        lo->lo_state = Lo_bound;
        if (part_shift)
                lo->lo_flags |= LO_FLAGS_PARTSCAN;
@@ -1253,7 +1308,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
         * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
         */
        bdgrab(bdev);
-       mutex_unlock(&lo->lo_mutex);
+       loop_global_unlock(lo, is_loop);
        if (partscan)
                loop_reread_partitions(lo);
        if (!(mode & FMODE_EXCL))
@@ -1261,13 +1316,12 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
        return 0;
 
 out_unlock:
-       mutex_unlock(&lo->lo_mutex);
+       loop_global_unlock(lo, is_loop);
 out_bdev:
        if (!(mode & FMODE_EXCL))
                bd_abort_claiming(bdev, loop_configure);
 out_putf:
        fput(file);
-out:
        /* This is safe: open() is still holding a reference. */
        module_put(THIS_MODULE);
        return error;
@@ -1283,6 +1337,18 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
        int lo_number;
        struct loop_worker *pos, *worker;
 
+       /*
+        * Flush loop_configure() and loop_change_fd(). It is acceptable for
+        * loop_validate_file() to succeed, for actual clear operation has not
+        * started yet.
+        */
+       mutex_lock(&loop_validate_mutex);
+       mutex_unlock(&loop_validate_mutex);
+       /*
+        * loop_validate_file() now fails because l->lo_state != Lo_bound
+        * became visible.
+        */
+
        mutex_lock(&lo->lo_mutex);
        if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
                err = -ENXIO;
index b7d6637..c383179 100644 (file)
@@ -239,8 +239,8 @@ static void nbd_dev_remove(struct nbd_device *nbd)
 
        if (disk) {
                del_gendisk(disk);
-               blk_mq_free_tag_set(&nbd->tag_set);
                blk_cleanup_disk(disk);
+               blk_mq_free_tag_set(&nbd->tag_set);
        }
 
        /*
index 3b2b8e8..9b32989 100644 (file)
@@ -1014,8 +1014,8 @@ static void __exit pd_exit(void)
                if (p) {
                        disk->gd = NULL;
                        del_gendisk(p);
-                       blk_mq_free_tag_set(&disk->tag_set);
                        blk_cleanup_disk(p);
+                       blk_mq_free_tag_set(&disk->tag_set);
                        pi_release(disk->pi);
                }
        }
index 531d390..90b947c 100644 (file)
@@ -4100,8 +4100,6 @@ again:
 
 static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
 {
-       bool need_wait;
-
        dout("%s rbd_dev %p\n", __func__, rbd_dev);
        lockdep_assert_held_write(&rbd_dev->lock_rwsem);
 
@@ -4113,11 +4111,11 @@ static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
         */
        rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
        rbd_assert(!completion_done(&rbd_dev->releasing_wait));
-       need_wait = !list_empty(&rbd_dev->running_list);
-       downgrade_write(&rbd_dev->lock_rwsem);
-       if (need_wait)
-               wait_for_completion(&rbd_dev->releasing_wait);
-       up_read(&rbd_dev->lock_rwsem);
+       if (list_empty(&rbd_dev->running_list))
+               return true;
+
+       up_write(&rbd_dev->lock_rwsem);
+       wait_for_completion(&rbd_dev->releasing_wait);
 
        down_write(&rbd_dev->lock_rwsem);
        if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
@@ -4203,15 +4201,11 @@ static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
        if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
                down_write(&rbd_dev->lock_rwsem);
                if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
-                       /*
-                        * we already know that the remote client is
-                        * the owner
-                        */
-                       up_write(&rbd_dev->lock_rwsem);
-                       return;
+                       dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
+                            __func__, rbd_dev, cid.gid, cid.handle);
+               } else {
+                       rbd_set_owner_cid(rbd_dev, &cid);
                }
-
-               rbd_set_owner_cid(rbd_dev, &cid);
                downgrade_write(&rbd_dev->lock_rwsem);
        } else {
                down_read(&rbd_dev->lock_rwsem);
@@ -4236,14 +4230,12 @@ static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
        if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
                down_write(&rbd_dev->lock_rwsem);
                if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
-                       dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
+                       dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
                             __func__, rbd_dev, cid.gid, cid.handle,
                             rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
-                       up_write(&rbd_dev->lock_rwsem);
-                       return;
+               } else {
+                       rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
                }
-
-               rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
                downgrade_write(&rbd_dev->lock_rwsem);
        } else {
                down_read(&rbd_dev->lock_rwsem);
@@ -4951,6 +4943,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
                disk->minors = RBD_MINORS_PER_MAJOR;
        }
        disk->fops = &rbd_bd_ops;
+       disk->private_data = rbd_dev;
 
        blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
        /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
index 8d49f8f..d83fee2 100644 (file)
@@ -502,34 +502,21 @@ static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
 static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
                       unsigned command, unsigned long argument)
 {
-       struct blkfront_info *info = bdev->bd_disk->private_data;
        int i;
 
-       dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
-               command, (long)argument);
-
        switch (command) {
        case CDROMMULTISESSION:
-               dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
                for (i = 0; i < sizeof(struct cdrom_multisession); i++)
                        if (put_user(0, (char __user *)(argument + i)))
                                return -EFAULT;
                return 0;
-
-       case CDROM_GET_CAPABILITY: {
-               struct gendisk *gd = info->gd;
-               if (gd->flags & GENHD_FL_CD)
+       case CDROM_GET_CAPABILITY:
+               if (bdev->bd_disk->flags & GENHD_FL_CD)
                        return 0;
                return -EINVAL;
-       }
-
        default:
-               /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
-                 command);*/
-               return -EINVAL; /* same return as native Linux */
+               return -EINVAL;
        }
-
-       return 0;
 }
 
 static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
@@ -1177,36 +1164,6 @@ out_release_minors:
        return err;
 }
 
-static void xlvbd_release_gendisk(struct blkfront_info *info)
-{
-       unsigned int minor, nr_minors, i;
-       struct blkfront_ring_info *rinfo;
-
-       if (info->rq == NULL)
-               return;
-
-       /* No more blkif_request(). */
-       blk_mq_stop_hw_queues(info->rq);
-
-       for_each_rinfo(info, rinfo, i) {
-               /* No more gnttab callback work. */
-               gnttab_cancel_free_callback(&rinfo->callback);
-
-               /* Flush gnttab callback work. Must be done with no locks held. */
-               flush_work(&rinfo->work);
-       }
-
-       del_gendisk(info->gd);
-
-       minor = info->gd->first_minor;
-       nr_minors = info->gd->minors;
-       xlbd_release_minors(minor, nr_minors);
-
-       blk_cleanup_disk(info->gd);
-       info->gd = NULL;
-       blk_mq_free_tag_set(&info->tag_set);
-}
-
 /* Already hold rinfo->ring_lock. */
 static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo)
 {
@@ -1756,12 +1713,6 @@ abort_transaction:
        return err;
 }
 
-static void free_info(struct blkfront_info *info)
-{
-       list_del(&info->info_list);
-       kfree(info);
-}
-
 /* Common code used when first setting up, and when resuming. */
 static int talk_to_blkback(struct xenbus_device *dev,
                           struct blkfront_info *info)
@@ -1880,13 +1831,6 @@ again:
                xenbus_dev_fatal(dev, err, "%s", message);
  destroy_blkring:
        blkif_free(info, 0);
-
-       mutex_lock(&blkfront_mutex);
-       free_info(info);
-       mutex_unlock(&blkfront_mutex);
-
-       dev_set_drvdata(&dev->dev, NULL);
-
        return err;
 }
 
@@ -2126,38 +2070,26 @@ static int blkfront_resume(struct xenbus_device *dev)
 static void blkfront_closing(struct blkfront_info *info)
 {
        struct xenbus_device *xbdev = info->xbdev;
-       struct block_device *bdev = NULL;
-
-       mutex_lock(&info->mutex);
+       struct blkfront_ring_info *rinfo;
+       unsigned int i;
 
-       if (xbdev->state == XenbusStateClosing) {
-               mutex_unlock(&info->mutex);
+       if (xbdev->state == XenbusStateClosing)
                return;
-       }
 
-       if (info->gd)
-               bdev = bdgrab(info->gd->part0);
-
-       mutex_unlock(&info->mutex);
-
-       if (!bdev) {
-               xenbus_frontend_closed(xbdev);
-               return;
-       }
+       /* No more blkif_request(). */
+       blk_mq_stop_hw_queues(info->rq);
+       blk_set_queue_dying(info->rq);
+       set_capacity(info->gd, 0);
 
-       mutex_lock(&bdev->bd_disk->open_mutex);
+       for_each_rinfo(info, rinfo, i) {
+               /* No more gnttab callback work. */
+               gnttab_cancel_free_callback(&rinfo->callback);
 
-       if (bdev->bd_openers) {
-               xenbus_dev_error(xbdev, -EBUSY,
-                                "Device in use; refusing to close");
-               xenbus_switch_state(xbdev, XenbusStateClosing);
-       } else {
-               xlvbd_release_gendisk(info);
-               xenbus_frontend_closed(xbdev);
+               /* Flush gnttab callback work. Must be done with no locks held. */
+               flush_work(&rinfo->work);
        }
 
-       mutex_unlock(&bdev->bd_disk->open_mutex);
-       bdput(bdev);
+       xenbus_frontend_closed(xbdev);
 }
 
 static void blkfront_setup_discard(struct blkfront_info *info)
@@ -2472,8 +2404,7 @@ static void blkback_changed(struct xenbus_device *dev,
                        break;
                fallthrough;
        case XenbusStateClosing:
-               if (info)
-                       blkfront_closing(info);
+               blkfront_closing(info);
                break;
        }
 }
@@ -2481,56 +2412,21 @@ static void blkback_changed(struct xenbus_device *dev,
 static int blkfront_remove(struct xenbus_device *xbdev)
 {
        struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
-       struct block_device *bdev = NULL;
-       struct gendisk *disk;
 
        dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
 
-       if (!info)
-               return 0;
-
-       blkif_free(info, 0);
-
-       mutex_lock(&info->mutex);
-
-       disk = info->gd;
-       if (disk)
-               bdev = bdgrab(disk->part0);
-
-       info->xbdev = NULL;
-       mutex_unlock(&info->mutex);
-
-       if (!bdev) {
-               mutex_lock(&blkfront_mutex);
-               free_info(info);
-               mutex_unlock(&blkfront_mutex);
-               return 0;
-       }
-
-       /*
-        * The xbdev was removed before we reached the Closed
-        * state. See if it's safe to remove the disk. If the bdev
-        * isn't closed yet, we let release take care of it.
-        */
-
-       mutex_lock(&disk->open_mutex);
-       info = disk->private_data;
-
-       dev_warn(disk_to_dev(disk),
-                "%s was hot-unplugged, %d stale handles\n",
-                xbdev->nodename, bdev->bd_openers);
+       del_gendisk(info->gd);
 
-       if (info && !bdev->bd_openers) {
-               xlvbd_release_gendisk(info);
-               disk->private_data = NULL;
-               mutex_lock(&blkfront_mutex);
-               free_info(info);
-               mutex_unlock(&blkfront_mutex);
-       }
+       mutex_lock(&blkfront_mutex);
+       list_del(&info->info_list);
+       mutex_unlock(&blkfront_mutex);
 
-       mutex_unlock(&disk->open_mutex);
-       bdput(bdev);
+       blkif_free(info, 0);
+       xlbd_release_minors(info->gd->first_minor, info->gd->minors);
+       blk_cleanup_disk(info->gd);
+       blk_mq_free_tag_set(&info->tag_set);
 
+       kfree(info);
        return 0;
 }
 
@@ -2541,77 +2437,9 @@ static int blkfront_is_ready(struct xenbus_device *dev)
        return info->is_ready && info->xbdev;
 }
 
-static int blkif_open(struct block_device *bdev, fmode_t mode)
-{
-       struct gendisk *disk = bdev->bd_disk;
-       struct blkfront_info *info;
-       int err = 0;
-
-       mutex_lock(&blkfront_mutex);
-
-       info = disk->private_data;
-       if (!info) {
-               /* xbdev gone */
-               err = -ERESTARTSYS;
-               goto out;
-       }
-
-       mutex_lock(&info->mutex);
-
-       if (!info->gd)
-               /* xbdev is closed */
-               err = -ERESTARTSYS;
-
-       mutex_unlock(&info->mutex);
-
-out:
-       mutex_unlock(&blkfront_mutex);
-       return err;
-}
-
-static void blkif_release(struct gendisk *disk, fmode_t mode)
-{
-       struct blkfront_info *info = disk->private_data;
-       struct xenbus_device *xbdev;
-
-       mutex_lock(&blkfront_mutex);
-       if (disk->part0->bd_openers)
-               goto out_mutex;
-
-       /*
-        * Check if we have been instructed to close. We will have
-        * deferred this request, because the bdev was still open.
-        */
-
-       mutex_lock(&info->mutex);
-       xbdev = info->xbdev;
-
-       if (xbdev && xbdev->state == XenbusStateClosing) {
-               /* pending switch to state closed */
-               dev_info(disk_to_dev(disk), "releasing disk\n");
-               xlvbd_release_gendisk(info);
-               xenbus_frontend_closed(info->xbdev);
-       }
-
-       mutex_unlock(&info->mutex);
-
-       if (!xbdev) {
-               /* sudden device removal */
-               dev_info(disk_to_dev(disk), "releasing disk\n");
-               xlvbd_release_gendisk(info);
-               disk->private_data = NULL;
-               free_info(info);
-       }
-
-out_mutex:
-       mutex_unlock(&blkfront_mutex);
-}
-
 static const struct block_device_operations xlvbd_block_fops =
 {
        .owner = THIS_MODULE,
-       .open = blkif_open,
-       .release = blkif_release,
        .getgeo = blkif_getgeo,
        .ioctl = blkif_ioctl,
        .compat_ioctl = blkdev_compat_ptr_ioctl,
index e7f7eee..a5b96f3 100644 (file)
@@ -95,6 +95,17 @@ config IMX_WEIM
          The WEIM(Wireless External Interface Module) works like a bus.
          You can attach many different devices on it, such as NOR, onenand.
 
+config INTEL_IXP4XX_EB
+       bool "Intel IXP4xx expansion bus interface driver"
+       depends on HAS_IOMEM
+       depends on ARCH_IXP4XX || COMPILE_TEST
+       default ARCH_IXP4XX
+       select MFD_SYSCON
+       help
+         Driver for the Intel IXP4xx expansion bus interface. The driver is
+         needed to set up various chip select configuration parameters before
+         devices on the expansion bus can be discovered.
+
 config MIPS_CDMM
        bool "MIPS Common Device Memory Map (CDMM) Driver"
        depends on CPU_MIPSR2 || CPU_MIPSR5
index 397e353..1c29c5e 100644 (file)
@@ -16,6 +16,7 @@ obj-$(CONFIG_FSL_MC_BUS)      += fsl-mc/
 obj-$(CONFIG_BT1_APB)          += bt1-apb.o
 obj-$(CONFIG_BT1_AXI)          += bt1-axi.o
 obj-$(CONFIG_IMX_WEIM)         += imx-weim.o
+obj-$(CONFIG_INTEL_IXP4XX_EB)  += intel-ixp4xx-eb.o
 obj-$(CONFIG_MIPS_CDMM)                += mips_cdmm.o
 obj-$(CONFIG_MVEBU_MBUS)       += mvebu-mbus.o
 
diff --git a/drivers/bus/intel-ixp4xx-eb.c b/drivers/bus/intel-ixp4xx-eb.c
new file mode 100644 (file)
index 0000000..a438844
--- /dev/null
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel IXP4xx Expansion Bus Controller
+ * Copyright (C) 2021 Linaro Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define IXP4XX_EXP_NUM_CS              8
+
+#define IXP4XX_EXP_TIMING_CS0          0x00
+#define IXP4XX_EXP_TIMING_CS1          0x04
+#define IXP4XX_EXP_TIMING_CS2          0x08
+#define IXP4XX_EXP_TIMING_CS3          0x0c
+#define IXP4XX_EXP_TIMING_CS4          0x10
+#define IXP4XX_EXP_TIMING_CS5          0x14
+#define IXP4XX_EXP_TIMING_CS6          0x18
+#define IXP4XX_EXP_TIMING_CS7          0x1c
+
+/* Bits inside each CS timing register */
+#define IXP4XX_EXP_TIMING_STRIDE       0x04
+#define IXP4XX_EXP_CS_EN               BIT(31)
+#define IXP456_EXP_PAR_EN              BIT(30) /* Only on IXP45x and IXP46x */
+#define IXP4XX_EXP_T1_MASK             GENMASK(28, 27)
+#define IXP4XX_EXP_T1_SHIFT            28
+#define IXP4XX_EXP_T2_MASK             GENMASK(27, 26)
+#define IXP4XX_EXP_T2_SHIFT            26
+#define IXP4XX_EXP_T3_MASK             GENMASK(25, 22)
+#define IXP4XX_EXP_T3_SHIFT            22
+#define IXP4XX_EXP_T4_MASK             GENMASK(21, 20)
+#define IXP4XX_EXP_T4_SHIFT            20
+#define IXP4XX_EXP_T5_MASK             GENMASK(19, 16)
+#define IXP4XX_EXP_T5_SHIFT            16
+#define IXP4XX_EXP_CYC_TYPE_MASK       GENMASK(15, 14)
+#define IXP4XX_EXP_CYC_TYPE_SHIFT      14
+#define IXP4XX_EXP_SIZE_MASK           GENMASK(13, 10)
+#define IXP4XX_EXP_SIZE_SHIFT          10
+#define IXP4XX_EXP_CNFG_0              BIT(9) /* Always zero */
+#define IXP43X_EXP_SYNC_INTEL          BIT(8) /* Only on IXP43x */
+#define IXP43X_EXP_EXP_CHIP            BIT(7) /* Only on IXP43x */
+#define IXP4XX_EXP_BYTE_RD16           BIT(6)
+#define IXP4XX_EXP_HRDY_POL            BIT(5) /* Only on IXP42x */
+#define IXP4XX_EXP_MUX_EN              BIT(4)
+#define IXP4XX_EXP_SPLT_EN             BIT(3)
+#define IXP4XX_EXP_WORD                        BIT(2) /* Always zero */
+#define IXP4XX_EXP_WR_EN               BIT(1)
+#define IXP4XX_EXP_BYTE_EN             BIT(0)
+#define IXP42X_RESERVED                        (BIT(30)|IXP4XX_EXP_CNFG_0|BIT(8)|BIT(7)|IXP4XX_EXP_WORD)
+#define IXP43X_RESERVED                        (BIT(30)|IXP4XX_EXP_CNFG_0|BIT(5)|IXP4XX_EXP_WORD)
+
+#define IXP4XX_EXP_CNFG0               0x20
+#define IXP4XX_EXP_CNFG0_MEM_MAP       BIT(31)
+#define IXP4XX_EXP_CNFG1               0x24
+
+#define IXP4XX_EXP_BOOT_BASE           0x00000000
+#define IXP4XX_EXP_NORMAL_BASE         0x50000000
+#define IXP4XX_EXP_STRIDE              0x01000000
+
+/* Fuses on the IXP43x */
+#define IXP43X_EXP_UNIT_FUSE_RESET     0x28
+#define IXP43x_EXP_FUSE_SPEED_MASK     GENMASK(23, 22)
+
+/* Number of device tree values in "reg" */
+#define IXP4XX_OF_REG_SIZE             3
+
+struct ixp4xx_eb {
+       struct device *dev;
+       struct regmap *rmap;
+       u32 bus_base;
+       bool is_42x;
+       bool is_43x;
+};
+
+struct ixp4xx_exp_tim_prop {
+       const char *prop;
+       u32 max;
+       u32 mask;
+       u16 shift;
+};
+
+static const struct ixp4xx_exp_tim_prop ixp4xx_exp_tim_props[] = {
+       {
+               .prop = "intel,ixp4xx-eb-t1",
+               .max = 3,
+               .mask = IXP4XX_EXP_T1_MASK,
+               .shift = IXP4XX_EXP_T1_SHIFT,
+       },
+       {
+               .prop = "intel,ixp4xx-eb-t2",
+               .max = 3,
+               .mask = IXP4XX_EXP_T2_MASK,
+               .shift = IXP4XX_EXP_T2_SHIFT,
+       },
+       {
+               .prop = "intel,ixp4xx-eb-t3",
+               .max = 15,
+               .mask = IXP4XX_EXP_T3_MASK,
+               .shift = IXP4XX_EXP_T3_SHIFT,
+       },
+       {
+               .prop = "intel,ixp4xx-eb-t4",
+               .max = 3,
+               .mask = IXP4XX_EXP_T4_MASK,
+               .shift = IXP4XX_EXP_T4_SHIFT,
+       },
+       {
+               .prop = "intel,ixp4xx-eb-t5",
+               .max = 15,
+               .mask = IXP4XX_EXP_T5_MASK,
+               .shift = IXP4XX_EXP_T5_SHIFT,
+       },
+       {
+               .prop = "intel,ixp4xx-eb-byte-access-on-halfword",
+               .max = 1,
+               .mask = IXP4XX_EXP_BYTE_RD16,
+       },
+       {
+               .prop = "intel,ixp4xx-eb-hpi-hrdy-pol-high",
+               .max = 1,
+               .mask = IXP4XX_EXP_HRDY_POL,
+       },
+       {
+               .prop = "intel,ixp4xx-eb-mux-address-and-data",
+               .max = 1,
+               .mask = IXP4XX_EXP_MUX_EN,
+       },
+       {
+               .prop = "intel,ixp4xx-eb-ahb-split-transfers",
+               .max = 1,
+               .mask = IXP4XX_EXP_SPLT_EN,
+       },
+       {
+               .prop = "intel,ixp4xx-eb-write-enable",
+               .max = 1,
+               .mask = IXP4XX_EXP_WR_EN,
+       },
+       {
+               .prop = "intel,ixp4xx-eb-byte-access",
+               .max = 1,
+               .mask = IXP4XX_EXP_BYTE_EN,
+       },
+};
+
+static void ixp4xx_exp_setup_chipselect(struct ixp4xx_eb *eb,
+                                       struct device_node *np,
+                                       u32 cs_index,
+                                       u32 cs_size)
+{
+       u32 cs_cfg;
+       u32 val;
+       u32 cur_cssize;
+       u32 cs_order;
+       int ret;
+       int i;
+
+       if (eb->is_42x && (cs_index > 7)) {
+               dev_err(eb->dev,
+                       "invalid chipselect %u, we only support 0-7\n",
+                       cs_index);
+               return;
+       }
+       if (eb->is_43x && (cs_index > 3)) {
+               dev_err(eb->dev,
+                       "invalid chipselect %u, we only support 0-3\n",
+                       cs_index);
+               return;
+       }
+
+       /* Several chip selects can be joined into one device */
+       if (cs_size > IXP4XX_EXP_STRIDE)
+               cur_cssize = IXP4XX_EXP_STRIDE;
+       else
+               cur_cssize = cs_size;
+
+
+       /*
+        * The following will read/modify/write the configuration for one
+        * chipselect, attempting to leave the boot defaults in place unless
+        * something is explicitly defined.
+        */
+       regmap_read(eb->rmap, IXP4XX_EXP_TIMING_CS0 +
+                   IXP4XX_EXP_TIMING_STRIDE * cs_index, &cs_cfg);
+       dev_info(eb->dev, "CS%d at %#08x, size %#08x, config before: %#08x\n",
+                cs_index, eb->bus_base + IXP4XX_EXP_STRIDE * cs_index,
+                cur_cssize, cs_cfg);
+
+       /* Size set-up first align to 2^9 .. 2^24 */
+       cur_cssize = roundup_pow_of_two(cur_cssize);
+       if (cur_cssize < 512)
+               cur_cssize = 512;
+       cs_order = ilog2(cur_cssize);
+       if (cs_order < 9 || cs_order > 24) {
+               dev_err(eb->dev, "illegal size order %d\n", cs_order);
+               return;
+       }
+       dev_dbg(eb->dev, "CS%d size order: %d\n", cs_index, cs_order);
+       cs_cfg &= ~(IXP4XX_EXP_SIZE_MASK);
+       cs_cfg |= ((cs_order - 9) << IXP4XX_EXP_SIZE_SHIFT);
+
+       for (i = 0; i < ARRAY_SIZE(ixp4xx_exp_tim_props); i++) {
+               const struct ixp4xx_exp_tim_prop *ip = &ixp4xx_exp_tim_props[i];
+
+               /* All are regular u32 values */
+               ret = of_property_read_u32(np, ip->prop, &val);
+               if (ret)
+                       continue;
+
+               /* Handle bools (single bits) first */
+               if (ip->max == 1) {
+                       if (val)
+                               cs_cfg |= ip->mask;
+                       else
+                               cs_cfg &= ~ip->mask;
+                       dev_info(eb->dev, "CS%d %s %s\n", cs_index,
+                                val ? "enabled" : "disabled",
+                                ip->prop);
+                       continue;
+               }
+
+               if (val > ip->max) {
+                       dev_err(eb->dev,
+                               "CS%d too high value for %s: %u, capped at %u\n",
+                               cs_index, ip->prop, val, ip->max);
+                       val = ip->max;
+               }
+               /* This assumes max value fills all the assigned bits (and it does) */
+               cs_cfg &= ~ip->mask;
+               cs_cfg |= (val << ip->shift);
+               dev_info(eb->dev, "CS%d set %s to %u\n", cs_index, ip->prop, val);
+       }
+
+       ret = of_property_read_u32(np, "intel,ixp4xx-eb-cycle-type", &val);
+       if (!ret) {
+               if (val > 3) {
+                       dev_err(eb->dev, "illegal cycle type %d\n", val);
+                       return;
+               }
+               dev_info(eb->dev, "CS%d set cycle type %d\n", cs_index, val);
+               cs_cfg &= ~IXP4XX_EXP_CYC_TYPE_MASK;
+               cs_cfg |= val << IXP4XX_EXP_CYC_TYPE_SHIFT;
+       }
+
+       if (eb->is_42x)
+               cs_cfg &= ~IXP42X_RESERVED;
+       if (eb->is_43x) {
+               cs_cfg &= ~IXP43X_RESERVED;
+               /*
+                * This bit for Intel strata flash is currently unused, but let's
+                * report it if we find one.
+                */
+               if (cs_cfg & IXP43X_EXP_SYNC_INTEL)
+                       dev_info(eb->dev, "claims to be Intel strata flash\n");
+       }
+       cs_cfg |= IXP4XX_EXP_CS_EN;
+
+       regmap_write(eb->rmap,
+                    IXP4XX_EXP_TIMING_CS0 + IXP4XX_EXP_TIMING_STRIDE * cs_index,
+                    cs_cfg);
+       dev_info(eb->dev, "CS%d wrote %#08x into CS config\n", cs_index, cs_cfg);
+
+       /*
+        * If several chip selects are joined together into one big
+        * device area, we call ourselves recursively for each successive
+        * chip select. For a 32MB flash chip this results in two calls
+        * for example.
+        */
+       if (cs_size > IXP4XX_EXP_STRIDE)
+               ixp4xx_exp_setup_chipselect(eb, np,
+                                           cs_index + 1,
+                                           cs_size - IXP4XX_EXP_STRIDE);
+}
+
+static void ixp4xx_exp_setup_child(struct ixp4xx_eb *eb,
+                                  struct device_node *np)
+{
+       u32 cs_sizes[IXP4XX_EXP_NUM_CS];
+       int num_regs;
+       u32 csindex;
+       u32 cssize;
+       int ret;
+       int i;
+
+       num_regs = of_property_count_elems_of_size(np, "reg", IXP4XX_OF_REG_SIZE);
+       if (num_regs <= 0)
+               return;
+       dev_dbg(eb->dev, "child %s has %d register sets\n",
+               of_node_full_name(np), num_regs);
+
+       for (csindex = 0; csindex < IXP4XX_EXP_NUM_CS; csindex++)
+               cs_sizes[csindex] = 0;
+
+       for (i = 0; i < num_regs; i++) {
+               u32 rbase, rsize;
+
+               ret = of_property_read_u32_index(np, "reg",
+                                                i * IXP4XX_OF_REG_SIZE, &csindex);
+               if (ret)
+                       break;
+               ret = of_property_read_u32_index(np, "reg",
+                                                i * IXP4XX_OF_REG_SIZE + 1, &rbase);
+               if (ret)
+                       break;
+               ret = of_property_read_u32_index(np, "reg",
+                                                i * IXP4XX_OF_REG_SIZE + 2, &rsize);
+               if (ret)
+                       break;
+
+               if (csindex >= IXP4XX_EXP_NUM_CS) {
+                       dev_err(eb->dev, "illegal CS %d\n", csindex);
+                       continue;
+               }
+               /*
+                * The memory window always starts from CS base so we need to add
+                * the start and size to get to the size from the start of the CS
+                * base. For example if CS0 is at 0x50000000 and the reg is
+                * <0 0xe40000 0x40000> the size is e80000.
+                *
+                * Roof this if we have several regs setting the same CS.
+                */
+               cssize = rbase + rsize;
+               dev_dbg(eb->dev, "CS%d size %#08x\n", csindex, cssize);
+               if (cs_sizes[csindex] < cssize)
+                       cs_sizes[csindex] = cssize;
+       }
+
+       for (csindex = 0; csindex < IXP4XX_EXP_NUM_CS; csindex++) {
+               cssize = cs_sizes[csindex];
+               if (!cssize)
+                       continue;
+               /* Just this one, so set it up and return */
+               ixp4xx_exp_setup_chipselect(eb, np, csindex, cssize);
+       }
+}
+
+static int ixp4xx_exp_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct ixp4xx_eb *eb;
+       struct device_node *child;
+       bool have_children = false;
+       u32 val;
+       int ret;
+
+       eb = devm_kzalloc(dev, sizeof(*eb), GFP_KERNEL);
+       if (!eb)
+               return -ENOMEM;
+
+       eb->dev = dev;
+       eb->is_42x = of_device_is_compatible(np, "intel,ixp42x-expansion-bus-controller");
+       eb->is_43x = of_device_is_compatible(np, "intel,ixp43x-expansion-bus-controller");
+
+       eb->rmap = syscon_node_to_regmap(np);
+       if (IS_ERR(eb->rmap))
+               return dev_err_probe(dev, PTR_ERR(eb->rmap), "no regmap\n");
+
+       /* We check that the regmap work only on first read */
+       ret = regmap_read(eb->rmap, IXP4XX_EXP_CNFG0, &val);
+       if (ret)
+               return dev_err_probe(dev, ret, "cannot read regmap\n");
+       if (val & IXP4XX_EXP_CNFG0_MEM_MAP)
+               eb->bus_base = IXP4XX_EXP_BOOT_BASE;
+       else
+               eb->bus_base = IXP4XX_EXP_NORMAL_BASE;
+       dev_info(dev, "expansion bus at %08x\n", eb->bus_base);
+
+       if (eb->is_43x) {
+               /* Check some fuses */
+               regmap_read(eb->rmap, IXP43X_EXP_UNIT_FUSE_RESET, &val);
+               switch (FIELD_GET(IXP43x_EXP_FUSE_SPEED_MASK, val)) {
+               case 0:
+                       dev_info(dev, "IXP43x at 533 MHz\n");
+                       break;
+               case 1:
+                       dev_info(dev, "IXP43x at 400 MHz\n");
+                       break;
+               case 2:
+                       dev_info(dev, "IXP43x at 667 MHz\n");
+                       break;
+               default:
+                       dev_info(dev, "IXP43x unknown speed\n");
+                       break;
+               }
+       }
+
+       /* Walk over the child nodes and see what chipselects we use */
+       for_each_available_child_of_node(np, child) {
+               ixp4xx_exp_setup_child(eb, child);
+               /* We have at least one child */
+               have_children = true;
+       }
+
+       if (have_children)
+               return of_platform_default_populate(np, NULL, dev);
+
+       return 0;
+}
+
+static const struct of_device_id ixp4xx_exp_of_match[] = {
+       { .compatible = "intel,ixp42x-expansion-bus-controller", },
+       { .compatible = "intel,ixp43x-expansion-bus-controller", },
+       { .compatible = "intel,ixp45x-expansion-bus-controller", },
+       { .compatible = "intel,ixp46x-expansion-bus-controller", },
+       { }
+};
+
+static struct platform_driver ixp4xx_exp_driver = {
+       .probe = ixp4xx_exp_probe,
+       .driver = {
+               .name = "intel-extbus",
+               .of_match_table = ixp4xx_exp_of_match,
+       },
+};
+module_platform_driver(ixp4xx_exp_driver);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Intel IXP4xx external bus driver");
+MODULE_LICENSE("GPL");
index 22acde1..fc9196f 100644 (file)
@@ -773,11 +773,18 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
        cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
 
        chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
-       mhi_chan = &mhi_cntrl->mhi_chan[chan];
-       write_lock_bh(&mhi_chan->lock);
-       mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
-       complete(&mhi_chan->completion);
-       write_unlock_bh(&mhi_chan->lock);
+
+       if (chan < mhi_cntrl->max_chan &&
+           mhi_cntrl->mhi_chan[chan].configured) {
+               mhi_chan = &mhi_cntrl->mhi_chan[chan];
+               write_lock_bh(&mhi_chan->lock);
+               mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
+               complete(&mhi_chan->completion);
+               write_unlock_bh(&mhi_chan->lock);
+       } else {
+               dev_err(&mhi_cntrl->mhi_dev->dev,
+                       "Completion packet for invalid channel ID: %d\n", chan);
+       }
 
        mhi_del_ring_element(mhi_cntrl, mhi_ring);
 }
index ca3bc40..4dd1077 100644 (file)
@@ -32,6 +32,8 @@
  * @edl: emergency download mode firmware path (if any)
  * @bar_num: PCI base address register to use for MHI MMIO register space
  * @dma_data_width: DMA transfer word size (32 or 64 bits)
+ * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
+ *                of inband wake support (such as sdx24)
  */
 struct mhi_pci_dev_info {
        const struct mhi_controller_config *config;
@@ -40,6 +42,7 @@ struct mhi_pci_dev_info {
        const char *edl;
        unsigned int bar_num;
        unsigned int dma_data_width;
+       bool sideband_wake;
 };
 
 #define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
@@ -72,6 +75,22 @@ struct mhi_pci_dev_info {
                .doorbell_mode_switch = false,          \
        }
 
+#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
+       {                                               \
+               .num = ch_num,                          \
+               .name = ch_name,                        \
+               .num_elements = el_count,               \
+               .event_ring = ev_ring,                  \
+               .dir = DMA_FROM_DEVICE,                 \
+               .ee_mask = BIT(MHI_EE_AMSS),            \
+               .pollcfg = 0,                           \
+               .doorbell = MHI_DB_BRST_DISABLE,        \
+               .lpm_notify = false,                    \
+               .offload_channel = false,               \
+               .doorbell_mode_switch = false,          \
+               .auto_queue = true,                     \
+       }
+
 #define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
        {                                       \
                .num_elements = el_count,       \
@@ -210,7 +229,7 @@ static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
        MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
        MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
        MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
-       MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0),
+       MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
        MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
        MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
        MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
@@ -242,7 +261,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
        .edl = "qcom/sdx65m/edl.mbn",
        .config = &modem_qcom_v1_mhiv_config,
        .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
-       .dma_data_width = 32
+       .dma_data_width = 32,
+       .sideband_wake = false,
 };
 
 static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
@@ -251,7 +271,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
        .edl = "qcom/sdx55m/edl.mbn",
        .config = &modem_qcom_v1_mhiv_config,
        .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
-       .dma_data_width = 32
+       .dma_data_width = 32,
+       .sideband_wake = false,
 };
 
 static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
@@ -259,7 +280,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
        .edl = "qcom/prog_firehose_sdx24.mbn",
        .config = &modem_qcom_v1_mhiv_config,
        .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
-       .dma_data_width = 32
+       .dma_data_width = 32,
+       .sideband_wake = true,
 };
 
 static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
@@ -301,7 +323,8 @@ static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
        .edl = "qcom/prog_firehose_sdx24.mbn",
        .config = &modem_quectel_em1xx_config,
        .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
-       .dma_data_width = 32
+       .dma_data_width = 32,
+       .sideband_wake = true,
 };
 
 static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
@@ -339,7 +362,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
        .edl = "qcom/sdx55m/edl.mbn",
        .config = &modem_foxconn_sdx55_config,
        .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
-       .dma_data_width = 32
+       .dma_data_width = 32,
+       .sideband_wake = false,
 };
 
 static const struct pci_device_id mhi_pci_id_table[] = {
@@ -640,9 +664,12 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        mhi_cntrl->status_cb = mhi_pci_status_cb;
        mhi_cntrl->runtime_get = mhi_pci_runtime_get;
        mhi_cntrl->runtime_put = mhi_pci_runtime_put;
-       mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
-       mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
-       mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
+
+       if (info->sideband_wake) {
+               mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
+               mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
+               mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
+       }
 
        err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
        if (err)
index 38cb116..8b718e5 100644 (file)
@@ -100,6 +100,7 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
  * @cookie: data used by legacy platform callbacks
  * @name: name if available
  * @revision: interconnect target module revision
+ * @reserved: target module is reserved and already in use
  * @enabled: sysc runtime enabled status
  * @needs_resume: runtime resume needed on resume from suspend
  * @child_needs_resume: runtime resume needed for child on resume from suspend
@@ -130,6 +131,7 @@ struct sysc {
        struct ti_sysc_cookie cookie;
        const char *name;
        u32 revision;
+       unsigned int reserved:1;
        unsigned int enabled:1;
        unsigned int needs_resume:1;
        unsigned int child_needs_resume:1;
@@ -853,7 +855,7 @@ static int sysc_check_registers(struct sysc *ddata)
 }
 
 /**
- * syc_ioremap - ioremap register space for the interconnect target module
+ * sysc_ioremap - ioremap register space for the interconnect target module
  * @ddata: device driver data
  *
  * Note that the interconnect target module registers can be anywhere
@@ -1444,10 +1446,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
                   SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET),
        SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff,
                   SYSC_QUIRK_LEGACY_IDLE),
-       SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff,
-                  SYSC_QUIRK_LEGACY_IDLE),
-       SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff,
-                  SYSC_QUIRK_LEGACY_IDLE),
        SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
                   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
        SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
@@ -1499,6 +1497,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
                   SYSC_MODULE_QUIRK_SGX),
        SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff,
                   SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+       SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff,
+                  SYSC_QUIRK_SWSUP_SIDLE),
        SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0,
                   SYSC_MODULE_QUIRK_RTC_UNLOCK),
        SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff,
@@ -1555,7 +1555,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
        SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
        SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0),
        SYSC_QUIRK("keypad", 0x4a31c000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 0),
-       SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff, 0),
        SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0),
        SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0),
        SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0),
@@ -1583,6 +1582,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
        SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0),
        SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0),
        SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
+       SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, 0),
+       SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff, 0),
        SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0),
        SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0),
        SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0),
@@ -3093,8 +3094,8 @@ static int sysc_probe(struct platform_device *pdev)
                return error;
 
        error = sysc_check_active_timer(ddata);
-       if (error)
-               return error;
+       if (error == -EBUSY)
+               ddata->reserved = true;
 
        error = sysc_get_clocks(ddata);
        if (error)
@@ -3109,9 +3110,8 @@ static int sysc_probe(struct platform_device *pdev)
                goto unprepare;
 
        pm_runtime_enable(ddata->dev);
-       error = pm_runtime_get_sync(ddata->dev);
+       error = pm_runtime_resume_and_get(ddata->dev);
        if (error < 0) {
-               pm_runtime_put_noidle(ddata->dev);
                pm_runtime_disable(ddata->dev);
                goto unprepare;
        }
@@ -3130,11 +3130,15 @@ static int sysc_probe(struct platform_device *pdev)
        sysc_show_registers(ddata);
 
        ddata->dev->type = &sysc_device_type;
-       error = of_platform_populate(ddata->dev->of_node, sysc_match_table,
-                                    pdata ? pdata->auxdata : NULL,
-                                    ddata->dev);
-       if (error)
-               goto err;
+
+       if (!ddata->reserved) {
+               error = of_platform_populate(ddata->dev->of_node,
+                                            sysc_match_table,
+                                            pdata ? pdata->auxdata : NULL,
+                                            ddata->dev);
+               if (error)
+                       goto err;
+       }
 
        INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
 
@@ -3165,9 +3169,8 @@ static int sysc_remove(struct platform_device *pdev)
 
        cancel_delayed_work_sync(&ddata->idle_work);
 
-       error = pm_runtime_get_sync(ddata->dev);
+       error = pm_runtime_resume_and_get(ddata->dev);
        if (error < 0) {
-               pm_runtime_put_noidle(ddata->dev);
                pm_runtime_disable(ddata->dev);
                goto unprepare;
        }
index 027484e..3c99696 100644 (file)
@@ -75,6 +75,7 @@ static int __op_panel_update_display(void)
                                rc);
                        break;
                }
+               break;
        case OPAL_SUCCESS:
                break;
        default:
index 50b5269..ae24e03 100644 (file)
@@ -30,8 +30,9 @@ enum clk_ids {
        CLK_PLL2_DIV20,
        CLK_PLL3,
        CLK_PLL3_DIV2,
+       CLK_PLL3_DIV2_4,
+       CLK_PLL3_DIV2_4_2,
        CLK_PLL3_DIV4,
-       CLK_PLL3_DIV8,
        CLK_PLL4,
        CLK_PLL5,
        CLK_PLL5_DIV2,
@@ -42,12 +43,13 @@ enum clk_ids {
 };
 
 /* Divider tables */
-static const struct clk_div_table dtable_3b[] = {
+static const struct clk_div_table dtable_1_32[] = {
        {0, 1},
        {1, 2},
        {2, 4},
        {3, 8},
        {4, 32},
+       {0, 0},
 };
 
 static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
@@ -66,47 +68,56 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
        DEF_FIXED(".pll2_div20", CLK_PLL2_DIV20, CLK_PLL2, 1, 20),
 
        DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 1, 2),
+       DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4),
+       DEF_FIXED(".pll3_div2_4_2", CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV2_4, 1, 2),
        DEF_FIXED(".pll3_div4", CLK_PLL3_DIV4, CLK_PLL3, 1, 4),
-       DEF_FIXED(".pll3_div8", CLK_PLL3_DIV8, CLK_PLL3, 1, 8),
 
        /* Core output clk */
        DEF_FIXED("I", R9A07G044_CLK_I, CLK_PLL1, 1, 1),
        DEF_DIV("P0", R9A07G044_CLK_P0, CLK_PLL2_DIV16, DIVPL2A,
-               dtable_3b, CLK_DIVIDER_HIWORD_MASK),
+               dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
        DEF_FIXED("TSU", R9A07G044_CLK_TSU, CLK_PLL2_DIV20, 1, 1),
-       DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV8,
-               DIVPL3B, dtable_3b, CLK_DIVIDER_HIWORD_MASK),
+       DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV2_4,
+               DIVPL3B, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
+       DEF_DIV("P2", R9A07G044_CLK_P2, CLK_PLL3_DIV2_4_2,
+               DIVPL3A, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
 };
 
 static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
-       DEF_MOD("gic",          R9A07G044_CLK_GIC600,
-                               R9A07G044_CLK_P1,
-                               0x514, BIT(0), (BIT(0) | BIT(1))),
-       DEF_MOD("ia55",         R9A07G044_CLK_IA55,
-                               R9A07G044_CLK_P1,
-                               0x518, (BIT(0) | BIT(1)), BIT(0)),
-       DEF_MOD("scif0",        R9A07G044_CLK_SCIF0,
-                               R9A07G044_CLK_P0,
-                               0x584, BIT(0), BIT(0)),
-       DEF_MOD("scif1",        R9A07G044_CLK_SCIF1,
-                               R9A07G044_CLK_P0,
-                               0x584, BIT(1), BIT(1)),
-       DEF_MOD("scif2",        R9A07G044_CLK_SCIF2,
-                               R9A07G044_CLK_P0,
-                               0x584, BIT(2), BIT(2)),
-       DEF_MOD("scif3",        R9A07G044_CLK_SCIF3,
-                               R9A07G044_CLK_P0,
-                               0x584, BIT(3), BIT(3)),
-       DEF_MOD("scif4",        R9A07G044_CLK_SCIF4,
-                               R9A07G044_CLK_P0,
-                               0x584, BIT(4), BIT(4)),
-       DEF_MOD("sci0",         R9A07G044_CLK_SCI0,
-                               R9A07G044_CLK_P0,
-                               0x588, BIT(0), BIT(0)),
+       DEF_MOD("gic",          R9A07G044_GIC600_GICCLK, R9A07G044_CLK_P1,
+                               0x514, 0),
+       DEF_MOD("ia55_pclk",    R9A07G044_IA55_PCLK, R9A07G044_CLK_P2,
+                               0x518, 0),
+       DEF_MOD("ia55_clk",     R9A07G044_IA55_CLK, R9A07G044_CLK_P1,
+                               0x518, 1),
+       DEF_MOD("scif0",        R9A07G044_SCIF0_CLK_PCK, R9A07G044_CLK_P0,
+                               0x584, 0),
+       DEF_MOD("scif1",        R9A07G044_SCIF1_CLK_PCK, R9A07G044_CLK_P0,
+                               0x584, 1),
+       DEF_MOD("scif2",        R9A07G044_SCIF2_CLK_PCK, R9A07G044_CLK_P0,
+                               0x584, 2),
+       DEF_MOD("scif3",        R9A07G044_SCIF3_CLK_PCK, R9A07G044_CLK_P0,
+                               0x584, 3),
+       DEF_MOD("scif4",        R9A07G044_SCIF4_CLK_PCK, R9A07G044_CLK_P0,
+                               0x584, 4),
+       DEF_MOD("sci0",         R9A07G044_SCI0_CLKP, R9A07G044_CLK_P0,
+                               0x588, 0),
+};
+
+static struct rzg2l_reset r9a07g044_resets[] = {
+       DEF_RST(R9A07G044_GIC600_GICRESET_N, 0x814, 0),
+       DEF_RST(R9A07G044_GIC600_DBG_GICRESET_N, 0x814, 1),
+       DEF_RST(R9A07G044_IA55_RESETN, 0x818, 0),
+       DEF_RST(R9A07G044_SCIF0_RST_SYSTEM_N, 0x884, 0),
+       DEF_RST(R9A07G044_SCIF1_RST_SYSTEM_N, 0x884, 1),
+       DEF_RST(R9A07G044_SCIF2_RST_SYSTEM_N, 0x884, 2),
+       DEF_RST(R9A07G044_SCIF3_RST_SYSTEM_N, 0x884, 3),
+       DEF_RST(R9A07G044_SCIF4_RST_SYSTEM_N, 0x884, 4),
+       DEF_RST(R9A07G044_SCI0_RST, 0x888, 0),
 };
 
 static const unsigned int r9a07g044_crit_mod_clks[] __initconst = {
-       MOD_CLK_BASE + R9A07G044_CLK_GIC600,
+       MOD_CLK_BASE + R9A07G044_GIC600_GICCLK,
 };
 
 const struct rzg2l_cpg_info r9a07g044_cpg_info = {
@@ -123,5 +134,9 @@ const struct rzg2l_cpg_info r9a07g044_cpg_info = {
        /* Module Clocks */
        .mod_clks = r9a07g044_mod_clks,
        .num_mod_clks = ARRAY_SIZE(r9a07g044_mod_clks),
-       .num_hw_mod_clks = R9A07G044_CLK_MIPI_DSI_PIN + 1,
+       .num_hw_mod_clks = R9A07G044_TSU_PCLK + 1,
+
+       /* Resets */
+       .resets = r9a07g044_resets,
+       .num_resets = ARRAY_SIZE(r9a07g044_resets),
 };
index 5009b9e..e7c59af 100644 (file)
@@ -47,9 +47,9 @@
 #define SDIV(val)              DIV_RSMASK(val, 0, 0x7)
 
 #define CLK_ON_R(reg)          (reg)
-#define CLK_MON_R(reg)         (0x680 - 0x500 + (reg))
-#define CLK_RST_R(reg)         (0x800 - 0x500 + (reg))
-#define CLK_MRST_R(reg)                (0x980 - 0x500 + (reg))
+#define CLK_MON_R(reg)         (0x180 + (reg))
+#define CLK_RST_R(reg)         (reg)
+#define CLK_MRST_R(reg)                (0x180 + (reg))
 
 #define GET_REG_OFFSET(val)            ((val >> 20) & 0xfff)
 #define GET_REG_SAMPLL_CLK1(val)       ((val >> 22) & 0xfff)
@@ -78,6 +78,7 @@ struct rzg2l_cpg_priv {
        struct clk **clks;
        unsigned int num_core_clks;
        unsigned int num_mod_clks;
+       unsigned int num_resets;
        unsigned int last_dt_core_clk;
 
        struct raw_notifier_head notifiers;
@@ -315,15 +316,13 @@ fail:
  *
  * @hw: handle between common and hardware-specific interfaces
  * @off: register offset
- * @onoff: ON/MON bits
- * @reset: reset bits
+ * @bit: ON/MON bit
  * @priv: CPG/MSTP private data
  */
 struct mstp_clock {
        struct clk_hw hw;
        u16 off;
-       u8 onoff;
-       u8 reset;
+       u8 bit;
        struct rzg2l_cpg_priv *priv;
 };
 
@@ -337,6 +336,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
        struct device *dev = priv->dev;
        unsigned long flags;
        unsigned int i;
+       u32 bitmask = BIT(clock->bit);
        u32 value;
 
        if (!clock->off) {
@@ -349,9 +349,9 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
        spin_lock_irqsave(&priv->rmw_lock, flags);
 
        if (enable)
-               value = (clock->onoff << 16) | clock->onoff;
+               value = (bitmask << 16) | bitmask;
        else
-               value = clock->onoff << 16;
+               value = bitmask << 16;
        writel(value, priv->base + CLK_ON_R(reg));
 
        spin_unlock_irqrestore(&priv->rmw_lock, flags);
@@ -360,7 +360,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
                return 0;
 
        for (i = 1000; i > 0; --i) {
-               if (((readl(priv->base + CLK_MON_R(reg))) & clock->onoff))
+               if (((readl(priv->base + CLK_MON_R(reg))) & bitmask))
                        break;
                cpu_relax();
        }
@@ -388,6 +388,7 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
 {
        struct mstp_clock *clock = to_mod_clock(hw);
        struct rzg2l_cpg_priv *priv = clock->priv;
+       u32 bitmask = BIT(clock->bit);
        u32 value;
 
        if (!clock->off) {
@@ -397,7 +398,7 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
 
        value = readl(priv->base + CLK_MON_R(clock->off));
 
-       return !(value & clock->onoff);
+       return !(value & bitmask);
 }
 
 static const struct clk_ops rzg2l_mod_clock_ops = {
@@ -457,8 +458,7 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
        init.num_parents = 1;
 
        clock->off = mod->off;
-       clock->onoff = mod->onoff;
-       clock->reset = mod->reset;
+       clock->bit = mod->bit;
        clock->priv = priv;
        clock->hw.init = &init;
 
@@ -483,12 +483,11 @@ static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
 {
        struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
        const struct rzg2l_cpg_info *info = priv->info;
-       unsigned int reg = info->mod_clks[id].off;
-       u32 dis = info->mod_clks[id].reset;
+       unsigned int reg = info->resets[id].off;
+       u32 dis = BIT(info->resets[id].bit);
        u32 we = dis << 16;
 
-       dev_dbg(rcdev->dev, "reset name:%s id:%ld offset:0x%x\n",
-               info->mod_clks[id].name, id, CLK_RST_R(reg));
+       dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
 
        /* Reset module */
        writel(we, priv->base + CLK_RST_R(reg));
@@ -507,11 +506,10 @@ static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
 {
        struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
        const struct rzg2l_cpg_info *info = priv->info;
-       unsigned int reg = info->mod_clks[id].off;
-       u32 value = info->mod_clks[id].reset << 16;
+       unsigned int reg = info->resets[id].off;
+       u32 value = BIT(info->resets[id].bit) << 16;
 
-       dev_dbg(rcdev->dev, "assert name:%s id:%ld offset:0x%x\n",
-               info->mod_clks[id].name, id, CLK_RST_R(reg));
+       dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
 
        writel(value, priv->base + CLK_RST_R(reg));
        return 0;
@@ -522,12 +520,12 @@ static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
 {
        struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
        const struct rzg2l_cpg_info *info = priv->info;
-       unsigned int reg = info->mod_clks[id].off;
-       u32 dis = info->mod_clks[id].reset;
+       unsigned int reg = info->resets[id].off;
+       u32 dis = BIT(info->resets[id].bit);
        u32 value = (dis << 16) | dis;
 
-       dev_dbg(rcdev->dev, "deassert name:%s id:%ld offset:0x%x\n",
-               info->mod_clks[id].name, id, CLK_RST_R(reg));
+       dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
+               CLK_RST_R(reg));
 
        writel(value, priv->base + CLK_RST_R(reg));
        return 0;
@@ -538,8 +536,8 @@ static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
 {
        struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
        const struct rzg2l_cpg_info *info = priv->info;
-       unsigned int reg = info->mod_clks[id].off;
-       u32 bitmask = info->mod_clks[id].reset;
+       unsigned int reg = info->resets[id].off;
+       u32 bitmask = BIT(info->resets[id].bit);
 
        return !(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
 }
@@ -554,9 +552,11 @@ static const struct reset_control_ops rzg2l_cpg_reset_ops = {
 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
                                 const struct of_phandle_args *reset_spec)
 {
+       struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
+       const struct rzg2l_cpg_info *info = priv->info;
        unsigned int id = reset_spec->args[0];
 
-       if (id >= rcdev->nr_resets) {
+       if (id >= rcdev->nr_resets || !info->resets[id].off) {
                dev_err(rcdev->dev, "Invalid reset index %u\n", id);
                return -EINVAL;
        }
@@ -571,7 +571,7 @@ static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
        priv->rcdev.dev = priv->dev;
        priv->rcdev.of_reset_n_cells = 1;
        priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
-       priv->rcdev.nr_resets = priv->num_mod_clks;
+       priv->rcdev.nr_resets = priv->num_resets;
 
        return devm_reset_controller_register(priv->dev, &priv->rcdev);
 }
@@ -594,42 +594,49 @@ static int rzg2l_cpg_attach_dev(struct generic_pm_domain *unused, struct device
 {
        struct device_node *np = dev->of_node;
        struct of_phandle_args clkspec;
+       bool once = true;
        struct clk *clk;
        int error;
        int i = 0;
 
        while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
                                           &clkspec)) {
-               if (rzg2l_cpg_is_pm_clk(&clkspec))
-                       goto found;
-
-               of_node_put(clkspec.np);
+               if (rzg2l_cpg_is_pm_clk(&clkspec)) {
+                       if (once) {
+                               once = false;
+                               error = pm_clk_create(dev);
+                               if (error) {
+                                       of_node_put(clkspec.np);
+                                       goto err;
+                               }
+                       }
+                       clk = of_clk_get_from_provider(&clkspec);
+                       of_node_put(clkspec.np);
+                       if (IS_ERR(clk)) {
+                               error = PTR_ERR(clk);
+                               goto fail_destroy;
+                       }
+
+                       error = pm_clk_add_clk(dev, clk);
+                       if (error) {
+                               dev_err(dev, "pm_clk_add_clk failed %d\n",
+                                       error);
+                               goto fail_put;
+                       }
+               } else {
+                       of_node_put(clkspec.np);
+               }
                i++;
        }
 
        return 0;
 
-found:
-       clk = of_clk_get_from_provider(&clkspec);
-       of_node_put(clkspec.np);
-
-       if (IS_ERR(clk))
-               return PTR_ERR(clk);
-
-       error = pm_clk_create(dev);
-       if (error)
-               goto fail_put;
-
-       error = pm_clk_add_clk(dev, clk);
-       if (error)
-               goto fail_destroy;
-
-       return 0;
+fail_put:
+       clk_put(clk);
 
 fail_destroy:
        pm_clk_destroy(dev);
-fail_put:
-       clk_put(clk);
+err:
        return error;
 }
 
@@ -692,6 +699,7 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev)
        priv->clks = clks;
        priv->num_core_clks = info->num_total_core_clks;
        priv->num_mod_clks = info->num_hw_mod_clks;
+       priv->num_resets = info->num_resets;
        priv->last_dt_core_clk = info->last_dt_core_clk;
 
        for (i = 0; i < nclks; i++)
index 3948bdd..6369528 100644 (file)
@@ -21,6 +21,7 @@
 #define DDIV_PACK(offset, bitpos, size) \
                (((offset) << 20) | ((bitpos) << 12) | ((size) << 8))
 #define DIVPL2A                DDIV_PACK(CPG_PL2_DDIV, 0, 3)
+#define DIVPL3A                DDIV_PACK(CPG_PL3A_DDIV, 0, 3)
 #define DIVPL3B                DDIV_PACK(CPG_PL3A_DDIV, 4, 3)
 
 /**
@@ -76,26 +77,40 @@ enum clk_types {
  * @id: clock index in array containing all Core and Module Clocks
  * @parent: id of parent clock
  * @off: register offset
- * @onoff: ON/MON bits
- * @reset: reset bits
+ * @bit: ON/MON bit
  */
 struct rzg2l_mod_clk {
        const char *name;
        unsigned int id;
        unsigned int parent;
        u16 off;
-       u8 onoff;
-       u8 reset;
+       u8 bit;
 };
 
-#define DEF_MOD(_name, _id, _parent, _off, _onoff, _reset)     \
-       [_id] = { \
+#define DEF_MOD(_name, _id, _parent, _off, _bit)       \
+       { \
                .name = _name, \
-               .id = MOD_CLK_BASE + _id, \
+               .id = MOD_CLK_BASE + (_id), \
                .parent = (_parent), \
                .off = (_off), \
-               .onoff = (_onoff), \
-               .reset = (_reset) \
+               .bit = (_bit), \
+       }
+
+/**
+ * struct rzg2l_reset - Reset definitions
+ *
+ * @off: register offset
+ * @bit: reset bit
+ */
+struct rzg2l_reset {
+       u16 off;
+       u8 bit;
+};
+
+#define DEF_RST(_id, _off, _bit)       \
+       [_id] = { \
+               .off = (_off), \
+               .bit = (_bit) \
        }
 
 /**
@@ -126,6 +141,10 @@ struct rzg2l_cpg_info {
        unsigned int num_mod_clks;
        unsigned int num_hw_mod_clks;
 
+       /* Resets */
+       const struct rzg2l_reset *resets;
+       unsigned int num_resets;
+
        /* Critical Module Clocks that should not be disabled */
        const unsigned int *crit_mod_clks;
        unsigned int num_crit_mod_clks;
index 9396745..cbb1849 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/delay.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/platform_device.h>
 /* Goes away with OF conversion */
 #include <linux/platform_data/timer-ixp4xx.h>
 
@@ -29,9 +30,6 @@
 #define IXP4XX_OSRT1_OFFSET    0x08  /* Timer 1 Reload */
 #define IXP4XX_OST2_OFFSET     0x0C  /* Timer 2 Timestamp */
 #define IXP4XX_OSRT2_OFFSET    0x10  /* Timer 2 Reload */
-#define IXP4XX_OSWT_OFFSET     0x14  /* Watchdog Timer */
-#define IXP4XX_OSWE_OFFSET     0x18  /* Watchdog Enable */
-#define IXP4XX_OSWK_OFFSET     0x1C  /* Watchdog Key */
 #define IXP4XX_OSST_OFFSET     0x20  /* Timer Status */
 
 /*
 #define IXP4XX_OSST_TIMER_1_PEND       0x00000001
 #define IXP4XX_OSST_TIMER_2_PEND       0x00000002
 #define IXP4XX_OSST_TIMER_TS_PEND      0x00000004
-#define IXP4XX_OSST_TIMER_WDOG_PEND    0x00000008
-#define IXP4XX_OSST_TIMER_WARM_RESET   0x00000010
-
-#define        IXP4XX_WDT_KEY                  0x0000482E
-#define        IXP4XX_WDT_RESET_ENABLE         0x00000001
-#define        IXP4XX_WDT_IRQ_ENABLE           0x00000002
-#define        IXP4XX_WDT_COUNT_ENABLE         0x00000004
+/* Remaining registers are for the watchdog and defined in the watchdog driver */
 
 struct ixp4xx_timer {
        void __iomem *base;
-       unsigned int tick_rate;
        u32 latch;
        struct clock_event_device clkevt;
 #ifdef CONFIG_ARM
@@ -181,7 +172,6 @@ static __init int ixp4xx_timer_register(void __iomem *base,
        if (!tmr)
                return -ENOMEM;
        tmr->base = base;
-       tmr->tick_rate = timer_freq;
 
        /*
         * The timer register doesn't allow to specify the two least
@@ -239,6 +229,40 @@ static __init int ixp4xx_timer_register(void __iomem *base,
        return 0;
 }
 
+static struct platform_device ixp4xx_watchdog_device = {
+       .name = "ixp4xx-watchdog",
+       .id = -1,
+};
+
+/*
+ * This probe gets called after the timer is already up and running. The main
+ * function on this platform is to spawn the watchdog device as a child.
+ */
+static int ixp4xx_timer_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+
+       /* Pass the base address as platform data and nothing else */
+       ixp4xx_watchdog_device.dev.platform_data = local_ixp4xx_timer->base;
+       ixp4xx_watchdog_device.dev.parent = dev;
+       return platform_device_register(&ixp4xx_watchdog_device);
+}
+
+static const struct of_device_id ixp4xx_timer_dt_id[] = {
+       { .compatible = "intel,ixp4xx-timer", },
+       { /* sentinel */ },
+};
+
+static struct platform_driver ixp4xx_timer_driver = {
+       .probe  = ixp4xx_timer_probe,
+       .driver = {
+               .name = "ixp4xx-timer",
+               .of_match_table = ixp4xx_timer_dt_id,
+               .suppress_bind_attrs = true,
+       },
+};
+builtin_platform_driver(ixp4xx_timer_driver);
+
 /**
  * ixp4xx_timer_setup() - Timer setup function to be called from boardfiles
  * @timerbase: physical base of timer block
index 182a4db..c538a15 100644 (file)
@@ -942,8 +942,6 @@ static int __init longhaul_init(void)
                return cpufreq_register_driver(&longhaul_driver);
        case 10:
                pr_err("Use acpi-cpufreq driver for VIA C7\n");
-       default:
-               ;
        }
 
        return -ENODEV;
index 20d9bdd..394e6e1 100644 (file)
@@ -211,8 +211,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
                                         struct sync_file *b)
 {
        struct sync_file *sync_file;
-       struct dma_fence **fences, **nfences, **a_fences, **b_fences;
-       int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
+       struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences;
+       int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences;
 
        sync_file = sync_file_alloc();
        if (!sync_file)
@@ -236,7 +236,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
         * If a sync_file can only be created with sync_file_merge
         * and sync_file_create, this is a reasonable assumption.
         */
-       for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
+       for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
                struct dma_fence *pt_a = a_fences[i_a];
                struct dma_fence *pt_b = b_fences[i_b];
 
@@ -277,15 +277,16 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
                fences = nfences;
        }
 
-       if (sync_file_set_fence(sync_file, fences, i) < 0) {
-               kfree(fences);
+       if (sync_file_set_fence(sync_file, fences, i) < 0)
                goto err;
-       }
 
        strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name));
        return sync_file;
 
 err:
+       while (i)
+               dma_fence_put(fences[--i]);
+       kfree(fences);
        fput(sync_file->file);
        return NULL;
 
index 8070fd6..cacc725 100644 (file)
@@ -198,12 +198,12 @@ struct sdma_script_start_addrs {
        s32 per_2_firi_addr;
        s32 mcu_2_firi_addr;
        s32 uart_2_per_addr;
-       s32 uart_2_mcu_addr;
+       s32 uart_2_mcu_ram_addr;
        s32 per_2_app_addr;
        s32 mcu_2_app_addr;
        s32 per_2_per_addr;
        s32 uartsh_2_per_addr;
-       s32 uartsh_2_mcu_addr;
+       s32 uartsh_2_mcu_ram_addr;
        s32 per_2_shp_addr;
        s32 mcu_2_shp_addr;
        s32 ata_2_mcu_addr;
@@ -230,6 +230,10 @@ struct sdma_script_start_addrs {
        s32 zcanfd_2_mcu_addr;
        s32 zqspi_2_mcu_addr;
        s32 mcu_2_ecspi_addr;
+       s32 mcu_2_sai_addr;
+       s32 sai_2_mcu_addr;
+       s32 uart_2_mcu_addr;
+       s32 uartsh_2_mcu_addr;
        /* End of v3 array */
        s32 mcu_2_zqspi_addr;
        /* End of v4 array */
@@ -433,9 +437,10 @@ struct sdma_channel {
        unsigned long                   watermark_level;
        u32                             shp_addr, per_addr;
        enum dma_status                 status;
-       bool                            context_loaded;
        struct imx_dma_data             data;
        struct work_struct              terminate_worker;
+       struct list_head                terminated;
+       bool                            is_ram_script;
 };
 
 #define IMX_DMA_SG_LOOP                BIT(0)
@@ -476,6 +481,13 @@ struct sdma_driver_data {
        int num_events;
        struct sdma_script_start_addrs  *script_addrs;
        bool check_ratio;
+       /*
+        * ecspi ERR009165 fixed should be done in sdma script
+        * and it has been fixed in soc from i.mx6ul.
+        * please get more information from the below link:
+        * https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
+        */
+       bool ecspi_fixed;
 };
 
 struct sdma_engine {
@@ -499,6 +511,7 @@ struct sdma_engine {
        struct sdma_buffer_descriptor   *bd0;
        /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
        bool                            clk_ratio;
+       bool                            fw_loaded;
 };
 
 static int sdma_config_write(struct dma_chan *chan,
@@ -595,6 +608,13 @@ static struct sdma_driver_data sdma_imx6q = {
        .script_addrs = &sdma_script_imx6q,
 };
 
+static struct sdma_driver_data sdma_imx6ul = {
+       .chnenbl0 = SDMA_CHNENBL0_IMX35,
+       .num_events = 48,
+       .script_addrs = &sdma_script_imx6q,
+       .ecspi_fixed = true,
+};
+
 static struct sdma_script_start_addrs sdma_script_imx7d = {
        .ap_2_ap_addr = 644,
        .uart_2_mcu_addr = 819,
@@ -628,6 +648,7 @@ static const struct of_device_id sdma_dt_ids[] = {
        { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
        { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
        { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
+       { .compatible = "fsl,imx6ul-sdma", .data = &sdma_imx6ul, },
        { .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
        { /* sentinel */ }
 };
@@ -919,6 +940,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
        sdmac->pc_to_device = 0;
        sdmac->device_to_device = 0;
        sdmac->pc_to_pc = 0;
+       sdmac->is_ram_script = false;
 
        switch (peripheral_type) {
        case IMX_DMATYPE_MEMORY:
@@ -945,6 +967,17 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
                emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
                break;
        case IMX_DMATYPE_CSPI:
+               per_2_emi = sdma->script_addrs->app_2_mcu_addr;
+
+               /* Use rom script mcu_2_app if ERR009165 fixed */
+               if (sdmac->sdma->drvdata->ecspi_fixed) {
+                       emi_2_per = sdma->script_addrs->mcu_2_app_addr;
+               } else {
+                       emi_2_per = sdma->script_addrs->mcu_2_ecspi_addr;
+                       sdmac->is_ram_script = true;
+               }
+
+               break;
        case IMX_DMATYPE_EXT:
        case IMX_DMATYPE_SSI:
        case IMX_DMATYPE_SAI:
@@ -954,6 +987,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
        case IMX_DMATYPE_SSI_DUAL:
                per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
                emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
+               sdmac->is_ram_script = true;
                break;
        case IMX_DMATYPE_SSI_SP:
        case IMX_DMATYPE_MMC:
@@ -968,6 +1002,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
                per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
                emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
                per_2_per = sdma->script_addrs->per_2_per_addr;
+               sdmac->is_ram_script = true;
                break;
        case IMX_DMATYPE_ASRC_SP:
                per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
@@ -1008,9 +1043,6 @@ static int sdma_load_context(struct sdma_channel *sdmac)
        int ret;
        unsigned long flags;
 
-       if (sdmac->context_loaded)
-               return 0;
-
        if (sdmac->direction == DMA_DEV_TO_MEM)
                load_address = sdmac->pc_from_device;
        else if (sdmac->direction == DMA_DEV_TO_DEV)
@@ -1053,8 +1085,6 @@ static int sdma_load_context(struct sdma_channel *sdmac)
 
        spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
 
-       sdmac->context_loaded = true;
-
        return ret;
 }
 
@@ -1078,9 +1108,6 @@ static void sdma_channel_terminate_work(struct work_struct *work)
 {
        struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
                                                  terminate_worker);
-       unsigned long flags;
-       LIST_HEAD(head);
-
        /*
         * According to NXP R&D team a delay of one BD SDMA cost time
         * (maximum is 1ms) should be added after disable of the channel
@@ -1089,11 +1116,7 @@ static void sdma_channel_terminate_work(struct work_struct *work)
         */
        usleep_range(1000, 2000);
 
-       spin_lock_irqsave(&sdmac->vc.lock, flags);
-       vchan_get_all_descriptors(&sdmac->vc, &head);
-       spin_unlock_irqrestore(&sdmac->vc.lock, flags);
-       vchan_dma_desc_free_list(&sdmac->vc, &head);
-       sdmac->context_loaded = false;
+       vchan_dma_desc_free_list(&sdmac->vc, &sdmac->terminated);
 }
 
 static int sdma_terminate_all(struct dma_chan *chan)
@@ -1107,6 +1130,13 @@ static int sdma_terminate_all(struct dma_chan *chan)
 
        if (sdmac->desc) {
                vchan_terminate_vdesc(&sdmac->desc->vd);
+               /*
+                * move out current descriptor into terminated list so that
+                * it could be free in sdma_channel_terminate_work alone
+                * later without potential involving next descriptor raised
+                * up before the last descriptor terminated.
+                */
+               vchan_get_all_descriptors(&sdmac->vc, &sdmac->terminated);
                sdmac->desc = NULL;
                schedule_work(&sdmac->terminate_worker);
        }
@@ -1168,7 +1198,6 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
 static int sdma_config_channel(struct dma_chan *chan)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
-       int ret;
 
        sdma_disable_channel(chan);
 
@@ -1208,9 +1237,7 @@ static int sdma_config_channel(struct dma_chan *chan)
                sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
        }
 
-       ret = sdma_load_context(sdmac);
-
-       return ret;
+       return 0;
 }
 
 static int sdma_set_channel_priority(struct sdma_channel *sdmac,
@@ -1361,7 +1388,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
 
        sdmac->event_id0 = 0;
        sdmac->event_id1 = 0;
-       sdmac->context_loaded = false;
 
        sdma_set_channel_priority(sdmac, 0);
 
@@ -1374,6 +1400,11 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
 {
        struct sdma_desc *desc;
 
+       if (!sdmac->sdma->fw_loaded && sdmac->is_ram_script) {
+               dev_warn_once(sdmac->sdma->dev, "sdma firmware not ready!\n");
+               goto err_out;
+       }
+
        desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
        if (!desc)
                goto err_out;
@@ -1722,8 +1753,8 @@ static void sdma_issue_pending(struct dma_chan *chan)
 
 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1        34
 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2        38
-#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3        41
-#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4        42
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3        45
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4        46
 
 static void sdma_add_scripts(struct sdma_engine *sdma,
                const struct sdma_script_start_addrs *addr)
@@ -1747,6 +1778,19 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
        for (i = 0; i < sdma->script_number; i++)
                if (addr_arr[i] > 0)
                        saddr_arr[i] = addr_arr[i];
+
+       /*
+        * get uart_2_mcu_addr/uartsh_2_mcu_addr rom script specially because
+        * they are now replaced by uart_2_mcu_ram_addr/uartsh_2_mcu_ram_addr
+        * to be compatible with legacy freescale/nxp sdma firmware, and they
+        * are located in the bottom part of sdma_script_start_addrs which are
+        * beyond the SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1.
+        */
+       if (addr->uart_2_mcu_addr)
+               sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_addr;
+       if (addr->uartsh_2_mcu_addr)
+               sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_addr;
+
 }
 
 static void sdma_load_firmware(const struct firmware *fw, void *context)
@@ -1803,6 +1847,8 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
 
        sdma_add_scripts(sdma, addr);
 
+       sdma->fw_loaded = true;
+
        dev_info(sdma->dev, "loaded firmware %d.%d\n",
                        header->version_major,
                        header->version_minor);
@@ -2086,6 +2132,7 @@ static int sdma_probe(struct platform_device *pdev)
 
                sdmac->channel = i;
                sdmac->vc.desc_free = sdma_desc_free;
+               INIT_LIST_HEAD(&sdmac->terminated);
                INIT_WORK(&sdmac->terminate_worker,
                                sdma_channel_terminate_work);
                /*
index 104ad42..baab1ca 100644 (file)
@@ -618,6 +618,7 @@ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
        case IDMAC_SDC_1:
        case IDMAC_IC_7:
                ipu_channel_set_priority(ipu, channel, true);
+               break;
        default:
                break;
        }
@@ -978,6 +979,7 @@ static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan)
        case IDMAC_SDC_0:
        case IDMAC_SDC_1:
                n_desc = 4;
+               break;
        default:
                break;
        }
index c1a6914..4a51fdb 100644 (file)
@@ -813,6 +813,7 @@ inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308)
        case 16:
                if (is_mpc8308)
                        return false;
+               break;
        case 1:
        case 2:
        case 4:
index 96ad218..a358586 100644 (file)
@@ -4948,6 +4948,7 @@ static int setup_resources(struct udma_dev *ud)
                                                       ud->tchan_cnt),
                         ud->rchan_cnt - bitmap_weight(ud->rchan_map,
                                                       ud->rchan_cnt));
+               break;
        default:
                break;
        }
index 91164c5..2fc4c3f 100644 (file)
@@ -271,7 +271,7 @@ config EDAC_PND2
 config EDAC_IGEN6
        tristate "Intel client SoC Integrated MC"
        depends on PCI && PCI_MMCONFIG && ARCH_HAVE_NMI_SAFE_CMPXCHG
-       depends on X64_64 && X86_MCE_INTEL
+       depends on X86_64 && X86_MCE_INTEL
        help
          Support for error detection and correction on the Intel
          client SoC Integrated Memory Controller using In-Band ECC IP.
index aadb720..77fe6d0 100644 (file)
@@ -6,39 +6,7 @@
 
 menu "Firmware Drivers"
 
-config ARM_SCMI_PROTOCOL
-       tristate "ARM System Control and Management Interface (SCMI) Message Protocol"
-       depends on ARM || ARM64 || COMPILE_TEST
-       depends on MAILBOX || HAVE_ARM_SMCCC_DISCOVERY
-       help
-         ARM System Control and Management Interface (SCMI) protocol is a
-         set of operating system-independent software interfaces that are
-         used in system management. SCMI is extensible and currently provides
-         interfaces for: Discovery and self-description of the interfaces
-         it supports, Power domain management which is the ability to place
-         a given device or domain into the various power-saving states that
-         it supports, Performance management which is the ability to control
-         the performance of a domain that is composed of compute engines
-         such as application processors and other accelerators, Clock
-         management which is the ability to set and inquire rates on platform
-         managed clocks and Sensor management which is the ability to read
-         sensor data, and be notified of sensor value.
-
-         This protocol library provides interface for all the client drivers
-         making use of the features offered by the SCMI.
-
-config ARM_SCMI_POWER_DOMAIN
-       tristate "SCMI power domain driver"
-       depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
-       default y
-       select PM_GENERIC_DOMAINS if PM
-       help
-         This enables support for the SCMI power domains which can be
-         enabled or disabled via the SCP firmware
-
-         This driver can also be built as a module.  If so, the module
-         will be called scmi_pm_domain. Note this may needed early in boot
-         before rootfs may be available.
+source "drivers/firmware/arm_scmi/Kconfig"
 
 config ARM_SCPI_PROTOCOL
        tristate "ARM System Control and Power Interface (SCPI) Message Protocol"
index 83166e0..00fe595 100644 (file)
@@ -46,9 +46,6 @@ static int ffa_device_probe(struct device *dev)
        struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
        struct ffa_device *ffa_dev = to_ffa_dev(dev);
 
-       if (!ffa_device_match(dev, dev->driver))
-               return -ENODEV;
-
        return ffa_drv->probe(ffa_dev);
 }
 
@@ -99,6 +96,9 @@ int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
 {
        int ret;
 
+       if (!driver->probe)
+               return -EINVAL;
+
        driver->driver.bus = &ffa_bus_type;
        driver->driver.name = driver->name;
        driver->driver.owner = owner;
index b1edb4b..c9fb56a 100644 (file)
 #define PACK_TARGET_INFO(s, r)         \
        (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r)))
 
-/**
+/*
  * FF-A specification mentions explicitly about '4K pages'. This should
  * not be confused with the kernel PAGE_SIZE, which is the translation
  * granule kernel is configured and may be one among 4K, 16K and 64K.
@@ -149,8 +149,10 @@ static const int ffa_linux_errmap[] = {
 
 static inline int ffa_to_linux_errno(int errno)
 {
-       if (errno < FFA_RET_SUCCESS && errno >= -ARRAY_SIZE(ffa_linux_errmap))
-               return ffa_linux_errmap[-errno];
+       int err_idx = -errno;
+
+       if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap))
+               return ffa_linux_errmap[err_idx];
        return -EINVAL;
 }
 
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
new file mode 100644 (file)
index 0000000..7f4d243
--- /dev/null
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "ARM System Control and Management Interface Protocol"
+
+config ARM_SCMI_PROTOCOL
+       tristate "ARM System Control and Management Interface (SCMI) Message Protocol"
+       depends on ARM || ARM64 || COMPILE_TEST
+       help
+         ARM System Control and Management Interface (SCMI) protocol is a
+         set of operating system-independent software interfaces that are
+         used in system management. SCMI is extensible and currently provides
+         interfaces for: Discovery and self-description of the interfaces
+         it supports, Power domain management which is the ability to place
+         a given device or domain into the various power-saving states that
+         it supports, Performance management which is the ability to control
+         the performance of a domain that is composed of compute engines
+         such as application processors and other accelerators, Clock
+         management which is the ability to set and inquire rates on platform
+         managed clocks and Sensor management which is the ability to read
+         sensor data, and be notified of sensor value.
+
+         This protocol library provides interface for all the client drivers
+         making use of the features offered by the SCMI.
+
+if ARM_SCMI_PROTOCOL
+
+config ARM_SCMI_HAVE_TRANSPORT
+       bool
+       help
+         This declares whether at least one SCMI transport has been configured.
+         Used to trigger a build bug when trying to build SCMI without any
+         configured transport.
+
+config ARM_SCMI_HAVE_SHMEM
+       bool
+       help
+         This declares whether a shared memory based transport for SCMI is
+         available.
+
+config ARM_SCMI_HAVE_MSG
+       bool
+       help
+         This declares whether a message passing based transport for SCMI is
+         available.
+
+config ARM_SCMI_TRANSPORT_MAILBOX
+       bool "SCMI transport based on Mailbox"
+       depends on MAILBOX
+       select ARM_SCMI_HAVE_TRANSPORT
+       select ARM_SCMI_HAVE_SHMEM
+       default y
+       help
+         Enable mailbox based transport for SCMI.
+
+         If you want the ARM SCMI PROTOCOL stack to include support for a
+         transport based on mailboxes, answer Y.
+
+config ARM_SCMI_TRANSPORT_SMC
+       bool "SCMI transport based on SMC"
+       depends on HAVE_ARM_SMCCC_DISCOVERY
+       select ARM_SCMI_HAVE_TRANSPORT
+       select ARM_SCMI_HAVE_SHMEM
+       default y
+       help
+         Enable SMC based transport for SCMI.
+
+         If you want the ARM SCMI PROTOCOL stack to include support for a
+         transport based on SMC, answer Y.
+
+config ARM_SCMI_TRANSPORT_VIRTIO
+       bool "SCMI transport based on VirtIO"
+       depends on VIRTIO
+       select ARM_SCMI_HAVE_TRANSPORT
+       select ARM_SCMI_HAVE_MSG
+       help
+         This enables the virtio based transport for SCMI.
+
+         If you want the ARM SCMI PROTOCOL stack to include support for a
+         transport based on VirtIO, answer Y.
+
+endif #ARM_SCMI_PROTOCOL
+
+config ARM_SCMI_POWER_DOMAIN
+       tristate "SCMI power domain driver"
+       depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+       default y
+       select PM_GENERIC_DOMAINS if PM
+       help
+         This enables support for the SCMI power domains which can be
+         enabled or disabled via the SCP firmware
+
+         This driver can also be built as a module.  If so, the module
+         will be called scmi_pm_domain. Note this may needed early in boot
+         before rootfs may be available.
+
+endmenu
index 6a2ef63..1dcf123 100644 (file)
@@ -1,9 +1,11 @@
 # SPDX-License-Identifier: GPL-2.0-only
 scmi-bus-y = bus.o
 scmi-driver-y = driver.o notify.o
-scmi-transport-y = shmem.o
-scmi-transport-$(CONFIG_MAILBOX) += mailbox.o
-scmi-transport-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smc.o
+scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o
+scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o
+scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o
+scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
+scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o
 scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o
 scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \
                    $(scmi-transport-y)
index 784cf00..6c7e249 100644 (file)
@@ -104,11 +104,6 @@ static int scmi_dev_probe(struct device *dev)
 {
        struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
        struct scmi_device *scmi_dev = to_scmi_dev(dev);
-       const struct scmi_device_id *id;
-
-       id = scmi_dev_match_id(scmi_dev, scmi_drv);
-       if (!id)
-               return -ENODEV;
 
        if (!scmi_dev->handle)
                return -EPROBE_DEFER;
@@ -139,6 +134,9 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
 {
        int retval;
 
+       if (!driver->probe)
+               return -EINVAL;
+
        retval = scmi_protocol_device_request(driver->id_table);
        if (retval)
                return retval;
index 8685619..dea1bfb 100644 (file)
 #include <linux/device.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
+#include <linux/hashtable.h>
+#include <linux/list.h>
 #include <linux/module.h>
+#include <linux/refcount.h>
 #include <linux/scmi_protocol.h>
+#include <linux/spinlock.h>
 #include <linux/types.h>
 
 #include <asm/unaligned.h>
@@ -65,11 +69,22 @@ struct scmi_msg_resp_prot_version {
 #define MSG_XTRACT_TOKEN(hdr)  FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
 #define MSG_TOKEN_MAX          (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
 
+/*
+ * Size of @pending_xfers hashtable included in @scmi_xfers_info; ideally, in
+ * order to minimize space and collisions, this should equal max_msg, i.e. the
+ * maximum number of in-flight messages on a specific platform, but such value
+ * is only available at runtime while kernel hashtables are statically sized:
+ * pick instead as a fixed static size the maximum number of entries that can
+ * fit the whole table into one 4k page.
+ */
+#define SCMI_PENDING_XFERS_HT_ORDER_SZ         9
+
 /**
  * struct scmi_msg_hdr - Message(Tx/Rx) header
  *
  * @id: The identifier of the message being sent
  * @protocol_id: The identifier of the protocol used to send @id message
+ * @type: The SCMI type for this message
  * @seq: The token to identify the message. When a message returns, the
  *     platform returns the whole message header unmodified including the
  *     token
@@ -80,6 +95,7 @@ struct scmi_msg_resp_prot_version {
 struct scmi_msg_hdr {
        u8 id;
        u8 protocol_id;
+       u8 type;
        u16 seq;
        u32 status;
        bool poll_completion;
@@ -89,13 +105,14 @@ struct scmi_msg_hdr {
  * pack_scmi_header() - packs and returns 32-bit header
  *
  * @hdr: pointer to header containing all the information on message id,
- *     protocol id and sequence id.
+ *     protocol id, sequence id and type.
  *
  * Return: 32-bit packed message header to be sent to the platform.
  */
 static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
 {
        return FIELD_PREP(MSG_ID_MASK, hdr->id) |
+               FIELD_PREP(MSG_TYPE_MASK, hdr->type) |
                FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
                FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
 }
@@ -110,6 +127,7 @@ static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
 {
        hdr->id = MSG_XTRACT_ID(msg_hdr);
        hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
+       hdr->type = MSG_XTRACT_TYPE(msg_hdr);
 }
 
 /**
@@ -134,6 +152,27 @@ struct scmi_msg {
  *     buffer for the rx path as we use for the tx path.
  * @done: command message transmit completion event
  * @async_done: pointer to delayed response message received event completion
+ * @pending: True for xfers added to @pending_xfers hashtable
+ * @node: An hlist_node reference used to store this xfer, alternatively, on
+ *       the free list @free_xfers or in the @pending_xfers hashtable
+ * @users: A refcount to track the active users for this xfer.
+ *        This is meant to protect against the possibility that, when a command
+ *        transaction times out concurrently with the reception of a valid
+ *        response message, the xfer could be finally put on the TX path, and
+ *        so vanish, while on the RX path scmi_rx_callback() is still
+ *        processing it: in such a case this refcounting will ensure that, even
+ *        though the timed-out transaction will anyway cause the command
+ *        request to be reported as failed by time-out, the underlying xfer
+ *        cannot be discarded and possibly reused until the last one user on
+ *        the RX path has released it.
+ * @busy: An atomic flag to ensure exclusive write access to this xfer
+ * @state: The current state of this transfer, with states transitions deemed
+ *        valid being:
+ *         - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ]
+ *         - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK
+ *           (Missing synchronous response is assumed OK and ignored)
+ * @lock: A spinlock to protect state and busy fields.
+ * @priv: A pointer for transport private usage.
  */
 struct scmi_xfer {
        int transfer_id;
@@ -142,8 +181,36 @@ struct scmi_xfer {
        struct scmi_msg rx;
        struct completion done;
        struct completion *async_done;
+       bool pending;
+       struct hlist_node node;
+       refcount_t users;
+#define SCMI_XFER_FREE         0
+#define SCMI_XFER_BUSY         1
+       atomic_t busy;
+#define SCMI_XFER_SENT_OK      0
+#define SCMI_XFER_RESP_OK      1
+#define SCMI_XFER_DRESP_OK     2
+       int state;
+       /* A lock to protect state and busy fields */
+       spinlock_t lock;
+       void *priv;
 };
 
+/*
+ * An helper macro to lookup an xfer from the @pending_xfers hashtable
+ * using the message sequence number token as a key.
+ */
+#define XFER_FIND(__ht, __k)                                   \
+({                                                             \
+       typeof(__k) k_ = __k;                                   \
+       struct scmi_xfer *xfer_ = NULL;                         \
+                                                               \
+       hash_for_each_possible((__ht), xfer_, node, k_)         \
+               if (xfer_->hdr.seq == k_)                       \
+                       break;                                  \
+       xfer_;                                                  \
+})
+
 struct scmi_xfer_ops;
 
 /**
@@ -283,9 +350,13 @@ struct scmi_chan_info {
 /**
  * struct scmi_transport_ops - Structure representing a SCMI transport ops
  *
+ * @link_supplier: Optional callback to add link to a supplier device
  * @chan_available: Callback to check if channel is available or not
  * @chan_setup: Callback to allocate and setup a channel
  * @chan_free: Callback to free a channel
+ * @get_max_msg: Optional callback to provide max_msg dynamically
+ *              Returns the maximum number of messages for the channel type
+ *              (tx or rx) that can be pending simultaneously in the system
  * @send_message: Callback to send a message
  * @mark_txdone: Callback to mark tx as done
  * @fetch_response: Callback to fetch response
@@ -294,10 +365,12 @@ struct scmi_chan_info {
  * @poll_done: Callback to poll transfer status
  */
 struct scmi_transport_ops {
+       int (*link_supplier)(struct device *dev);
        bool (*chan_available)(struct device *dev, int idx);
        int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
                          bool tx);
        int (*chan_free)(int id, void *p, void *data);
+       unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo);
        int (*send_message)(struct scmi_chan_info *cinfo,
                            struct scmi_xfer *xfer);
        void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret);
@@ -317,25 +390,39 @@ struct scmi_device *scmi_child_dev_find(struct device *parent,
 /**
  * struct scmi_desc - Description of SoC integration
  *
+ * @transport_init: An optional function that a transport can provide to
+ *                 initialize some transport-specific setup during SCMI core
+ *                 initialization, so ahead of SCMI core probing.
+ * @transport_exit: An optional function that a transport can provide to
+ *                 de-initialize some transport-specific setup during SCMI core
+ *                 de-initialization, so after SCMI core removal.
  * @ops: Pointer to the transport specific ops structure
  * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
- * @max_msg: Maximum number of messages that can be pending
- *     simultaneously in the system
+ * @max_msg: Maximum number of messages for a channel type (tx or rx) that can
+ *     be pending simultaneously in the system. May be overridden by the
+ *     get_max_msg op.
  * @max_msg_size: Maximum size of data per message that can be handled.
  */
 struct scmi_desc {
+       int (*transport_init)(void);
+       void (*transport_exit)(void);
        const struct scmi_transport_ops *ops;
        int max_rx_timeout_ms;
        int max_msg;
        int max_msg_size;
 };
 
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
 extern const struct scmi_desc scmi_mailbox_desc;
-#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
+#endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
 extern const struct scmi_desc scmi_smc_desc;
 #endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
+extern const struct scmi_desc scmi_virtio_desc;
+#endif
 
-void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr);
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv);
 void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
 
 /* shmem related declarations */
@@ -352,8 +439,22 @@ void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
 bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
                     struct scmi_xfer *xfer);
 
+/* declarations for message passing transports */
+struct scmi_msg_payld;
+
+/* Maximum overhead of message w.r.t. struct scmi_desc.max_msg_size */
+#define SCMI_MSG_MAX_PROT_OVERHEAD (2 * sizeof(__le32))
+
+size_t msg_response_size(struct scmi_xfer *xfer);
+size_t msg_command_size(struct scmi_xfer *xfer);
+void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer);
+u32 msg_read_header(struct scmi_msg_payld *msg);
+void msg_fetch_response(struct scmi_msg_payld *msg, size_t len,
+                       struct scmi_xfer *xfer);
+void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len,
+                           size_t max_len, struct scmi_xfer *xfer);
+
 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
                                         void *priv);
 void *scmi_notification_instance_data_get(const struct scmi_handle *handle);
-
 #endif /* _SCMI_COMMON_H */
index 66e5e69..b406b3f 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/ktime.h>
+#include <linux/hashtable.h>
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
@@ -47,7 +48,6 @@ enum scmi_error_codes {
        SCMI_ERR_GENERIC = -8,  /* Generic Error */
        SCMI_ERR_HARDWARE = -9, /* Hardware Error */
        SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
-       SCMI_ERR_MAX
 };
 
 /* List of all SCMI devices active in system */
@@ -68,16 +68,23 @@ struct scmi_requested_dev {
 /**
  * struct scmi_xfers_info - Structure to manage transfer information
  *
- * @xfer_block: Preallocated Message array
  * @xfer_alloc_table: Bitmap table for allocated messages.
  *     Index of this bitmap table is also used for message
  *     sequence identifier.
  * @xfer_lock: Protection for message allocation
+ * @max_msg: Maximum number of messages that can be pending
+ * @free_xfers: A free list for available to use xfers. It is initialized with
+ *             a number of xfers equal to the maximum allowed in-flight
+ *             messages.
+ * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
+ *                currently in-flight messages.
  */
 struct scmi_xfers_info {
-       struct scmi_xfer *xfer_block;
        unsigned long *xfer_alloc_table;
        spinlock_t xfer_lock;
+       int max_msg;
+       struct hlist_head free_xfers;
+       DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
 };
 
 /**
@@ -166,22 +173,11 @@ static const int scmi_linux_errmap[] = {
 
 static inline int scmi_to_linux_errno(int errno)
 {
-       if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
-               return scmi_linux_errmap[-errno];
-       return -EIO;
-}
+       int err_idx = -errno;
 
-/**
- * scmi_dump_header_dbg() - Helper to dump a message header.
- *
- * @dev: Device pointer corresponding to the SCMI entity
- * @hdr: pointer to header.
- */
-static inline void scmi_dump_header_dbg(struct device *dev,
-                                       struct scmi_msg_hdr *hdr)
-{
-       dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
-               hdr->id, hdr->seq, hdr->protocol_id);
+       if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
+               return scmi_linux_errmap[err_idx];
+       return -EIO;
 }
 
 void scmi_notification_instance_data_set(const struct scmi_handle *handle,
@@ -204,45 +200,189 @@ void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
 }
 
 /**
+ * scmi_xfer_token_set  - Reserve and set new token for the xfer at hand
+ *
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ * @xfer: The xfer to act upon
+ *
+ * Pick the next unused monotonically increasing token and set it into
+ * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
+ * reuse of freshly completed or timed-out xfers, thus mitigating the risk
+ * of incorrect association of a late and expired xfer with a live in-flight
+ * transaction, both happening to re-use the same token identifier.
+ *
+ * Since platform is NOT required to answer our request in-order we should
+ * account for a few rare but possible scenarios:
+ *
+ *  - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
+ *    using find_next_zero_bit() starting from candidate next_token bit
+ *
+ *  - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
+ *    are plenty of free tokens at start, so try a second pass using
+ *    find_next_zero_bit() and starting from 0.
+ *
+ *  X = used in-flight
+ *
+ * Normal
+ * ------
+ *
+ *             |- xfer_id picked
+ *   -----------+----------------------------------------------------------
+ *   | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
+ *   ----------------------------------------------------------------------
+ *             ^
+ *             |- next_token
+ *
+ * Out-of-order pending at start
+ * -----------------------------
+ *
+ *       |- xfer_id picked, last_token fixed
+ *   -----+----------------------------------------------------------------
+ *   |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
+ *   ----------------------------------------------------------------------
+ *    ^
+ *    |- next_token
+ *
+ *
+ * Out-of-order pending at end
+ * ---------------------------
+ *
+ *       |- xfer_id picked, last_token fixed
+ *   -----+----------------------------------------------------------------
+ *   |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
+ *   ----------------------------------------------------------------------
+ *                                                             ^
+ *                                                             |- next_token
+ *
+ * Context: Assumes to be called with @xfer_lock already acquired.
+ *
+ * Return: 0 on Success or error
+ */
+static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
+                              struct scmi_xfer *xfer)
+{
+       unsigned long xfer_id, next_token;
+
+       /*
+        * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
+        * using the pre-allocated transfer_id as a base.
+        * Note that the global transfer_id is shared across all message types
+        * so there could be holes in the allocated set of monotonic sequence
+        * numbers, but that is going to limit the effectiveness of the
+        * mitigation only in very rare limit conditions.
+        */
+       next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
+
+       /* Pick the next available xfer_id >= next_token */
+       xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
+                                    MSG_TOKEN_MAX, next_token);
+       if (xfer_id == MSG_TOKEN_MAX) {
+               /*
+                * After heavily out-of-order responses, there are no free
+                * tokens ahead, but only at start of xfer_alloc_table so
+                * try again from the beginning.
+                */
+               xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
+                                            MSG_TOKEN_MAX, 0);
+               /*
+                * Something is wrong if we got here since there can be a
+                * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
+                * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
+                */
+               if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
+                       return -ENOMEM;
+       }
+
+       /* Update +/- last_token accordingly if we skipped some hole */
+       if (xfer_id != next_token)
+               atomic_add((int)(xfer_id - next_token), &transfer_last_id);
+
+       /* Set in-flight */
+       set_bit(xfer_id, minfo->xfer_alloc_table);
+       xfer->hdr.seq = (u16)xfer_id;
+
+       return 0;
+}
+
+/**
+ * scmi_xfer_token_clear  - Release the token
+ *
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ * @xfer: The xfer to act upon
+ */
+static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
+                                        struct scmi_xfer *xfer)
+{
+       clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
+}
+
+/**
  * scmi_xfer_get() - Allocate one message
  *
  * @handle: Pointer to SCMI entity handle
  * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ * @set_pending: If true a monotonic token is picked and the xfer is added to
+ *              the pending hash table.
  *
  * Helper function which is used by various message functions that are
  * exposed to clients of this driver for allocating a message traffic event.
  *
- * This function can sleep depending on pending requests already in the system
- * for the SCMI entity. Further, this also holds a spinlock to maintain
- * integrity of internal data structures.
+ * Picks an xfer from the free list @free_xfers (if any available) and, if
+ * required, sets a monotonically increasing token and stores the inflight xfer
+ * into the @pending_xfers hashtable for later retrieval.
+ *
+ * The successfully initialized xfer is refcounted.
+ *
+ * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and
+ *         @free_xfers.
  *
  * Return: 0 if all went fine, else corresponding error.
  */
 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
-                                      struct scmi_xfers_info *minfo)
+                                      struct scmi_xfers_info *minfo,
+                                      bool set_pending)
 {
-       u16 xfer_id;
+       int ret;
+       unsigned long flags;
        struct scmi_xfer *xfer;
-       unsigned long flags, bit_pos;
-       struct scmi_info *info = handle_to_scmi_info(handle);
 
-       /* Keep the locked section as small as possible */
        spin_lock_irqsave(&minfo->xfer_lock, flags);
-       bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
-                                     info->desc->max_msg);
-       if (bit_pos == info->desc->max_msg) {
+       if (hlist_empty(&minfo->free_xfers)) {
                spin_unlock_irqrestore(&minfo->xfer_lock, flags);
                return ERR_PTR(-ENOMEM);
        }
-       set_bit(bit_pos, minfo->xfer_alloc_table);
-       spin_unlock_irqrestore(&minfo->xfer_lock, flags);
 
-       xfer_id = bit_pos;
+       /* grab an xfer from the free_list */
+       xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
+       hlist_del_init(&xfer->node);
 
-       xfer = &minfo->xfer_block[xfer_id];
-       xfer->hdr.seq = xfer_id;
+       /*
+        * Allocate transfer_id early so that can be used also as base for
+        * monotonic sequence number generation if needed.
+        */
        xfer->transfer_id = atomic_inc_return(&transfer_last_id);
 
+       if (set_pending) {
+               /* Pick and set monotonic token */
+               ret = scmi_xfer_token_set(minfo, xfer);
+               if (!ret) {
+                       hash_add(minfo->pending_xfers, &xfer->node,
+                                xfer->hdr.seq);
+                       xfer->pending = true;
+               } else {
+                       dev_err(handle->dev,
+                               "Failed to get monotonic token %d\n", ret);
+                       hlist_add_head(&xfer->node, &minfo->free_xfers);
+                       xfer = ERR_PTR(ret);
+               }
+       }
+
+       if (!IS_ERR(xfer)) {
+               refcount_set(&xfer->users, 1);
+               atomic_set(&xfer->busy, SCMI_XFER_FREE);
+       }
+       spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
        return xfer;
 }
 
@@ -252,6 +392,9 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
  * @minfo: Pointer to Tx/Rx Message management info based on channel type
  * @xfer: message that was reserved by scmi_xfer_get
  *
+ * After refcount check, possibly release an xfer, clearing the token slot,
+ * removing xfer from @pending_xfers and putting it back into free_xfers.
+ *
  * This holds a spinlock to maintain integrity of internal data structures.
  */
 static void
@@ -259,17 +402,215 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
 {
        unsigned long flags;
 
+       spin_lock_irqsave(&minfo->xfer_lock, flags);
+       if (refcount_dec_and_test(&xfer->users)) {
+               if (xfer->pending) {
+                       scmi_xfer_token_clear(minfo, xfer);
+                       hash_del(&xfer->node);
+                       xfer->pending = false;
+               }
+               hlist_add_head(&xfer->node, &minfo->free_xfers);
+       }
+       spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+}
+
+/**
+ * scmi_xfer_lookup_unlocked  -  Helper to lookup an xfer_id
+ *
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
+ * @xfer_id: Token ID to lookup in @pending_xfers
+ *
+ * Refcounting is untouched.
+ *
+ * Context: Assumes to be called with @xfer_lock already acquired.
+ *
+ * Return: A valid xfer on Success or error otherwise
+ */
+static struct scmi_xfer *
+scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
+{
+       struct scmi_xfer *xfer = NULL;
+
+       if (test_bit(xfer_id, minfo->xfer_alloc_table))
+               xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
+
+       return xfer ?: ERR_PTR(-EINVAL);
+}
+
+/**
+ * scmi_msg_response_validate  - Validate message type against state of related
+ * xfer
+ *
+ * @cinfo: A reference to the channel descriptor.
+ * @msg_type: Message type to check
+ * @xfer: A reference to the xfer to validate against @msg_type
+ *
+ * This function checks if @msg_type is congruent with the current state of
+ * a pending @xfer; if an asynchronous delayed response is received before the
+ * related synchronous response (Out-of-Order Delayed Response) the missing
+ * synchronous response is assumed to be OK and completed, carrying on with the
+ * Delayed Response: this is done to address the case in which the underlying
+ * SCMI transport can deliver such out-of-order responses.
+ *
+ * Context: Assumes to be called with xfer->lock already acquired.
+ *
+ * Return: 0 on Success, error otherwise
+ */
+static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
+                                            u8 msg_type,
+                                            struct scmi_xfer *xfer)
+{
        /*
-        * Keep the locked section as small as possible
-        * NOTE: we might escape with smp_mb and no lock here..
-        * but just be conservative and symmetric.
+        * Even if a response was indeed expected on this slot at this point,
+        * a buggy platform could wrongly reply feeding us an unexpected
+        * delayed response we're not prepared to handle: bail-out safely
+        * blaming firmware.
         */
+       if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
+               dev_err(cinfo->dev,
+                       "Delayed Response for %d not expected! Buggy F/W ?\n",
+                       xfer->hdr.seq);
+               return -EINVAL;
+       }
+
+       switch (xfer->state) {
+       case SCMI_XFER_SENT_OK:
+               if (msg_type == MSG_TYPE_DELAYED_RESP) {
+                       /*
+                        * Delayed Response expected but delivered earlier.
+                        * Assume message RESPONSE was OK and skip state.
+                        */
+                       xfer->hdr.status = SCMI_SUCCESS;
+                       xfer->state = SCMI_XFER_RESP_OK;
+                       complete(&xfer->done);
+                       dev_warn(cinfo->dev,
+                                "Received valid OoO Delayed Response for %d\n",
+                                xfer->hdr.seq);
+               }
+               break;
+       case SCMI_XFER_RESP_OK:
+               if (msg_type != MSG_TYPE_DELAYED_RESP)
+                       return -EINVAL;
+               break;
+       case SCMI_XFER_DRESP_OK:
+               /* No further message expected once in SCMI_XFER_DRESP_OK */
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**
+ * scmi_xfer_state_update  - Update xfer state
+ *
+ * @xfer: A reference to the xfer to update
+ * @msg_type: Type of message being processed.
+ *
+ * Note that this message is assumed to have been already successfully validated
+ * by @scmi_msg_response_validate(), so here we just update the state.
+ *
+ * Context: Assumes to be called on an xfer exclusively acquired using the
+ *         busy flag.
+ */
+static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
+{
+       xfer->hdr.type = msg_type;
+
+       /* Unknown command types were already discarded earlier */
+       if (xfer->hdr.type == MSG_TYPE_COMMAND)
+               xfer->state = SCMI_XFER_RESP_OK;
+       else
+               xfer->state = SCMI_XFER_DRESP_OK;
+}
+
+static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
+{
+       int ret;
+
+       ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
+
+       return ret == SCMI_XFER_FREE;
+}
+
+/**
+ * scmi_xfer_command_acquire  -  Helper to lookup and acquire a command xfer
+ *
+ * @cinfo: A reference to the channel descriptor.
+ * @msg_hdr: A message header to use as lookup key
+ *
+ * When a valid xfer is found for the sequence number embedded in the provided
+ * msg_hdr, reference counting is properly updated and exclusive access to this
+ * xfer is granted till released with @scmi_xfer_command_release.
+ *
+ * Return: A valid @xfer on Success or error otherwise.
+ */
+static inline struct scmi_xfer *
+scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
+{
+       int ret;
+       unsigned long flags;
+       struct scmi_xfer *xfer;
+       struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+       struct scmi_xfers_info *minfo = &info->tx_minfo;
+       u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
+       u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
+
+       /* Are we even expecting this? */
        spin_lock_irqsave(&minfo->xfer_lock, flags);
-       clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
+       xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
+       if (IS_ERR(xfer)) {
+               dev_err(cinfo->dev,
+                       "Message for %d type %d is not expected!\n",
+                       xfer_id, msg_type);
+               spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+               return xfer;
+       }
+       refcount_inc(&xfer->users);
        spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+       spin_lock_irqsave(&xfer->lock, flags);
+       ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
+       /*
+        * If a pending xfer was found which was also in a congruent state with
+        * the received message, acquire exclusive access to it setting the busy
+        * flag.
+        * Spins only on the rare limit condition of concurrent reception of
+        * RESP and DRESP for the same xfer.
+        */
+       if (!ret) {
+               spin_until_cond(scmi_xfer_acquired(xfer));
+               scmi_xfer_state_update(xfer, msg_type);
+       }
+       spin_unlock_irqrestore(&xfer->lock, flags);
+
+       if (ret) {
+               dev_err(cinfo->dev,
+                       "Invalid message type:%d for %d - HDR:0x%X  state:%d\n",
+                       msg_type, xfer_id, msg_hdr, xfer->state);
+               /* On error the refcount incremented above has to be dropped */
+               __scmi_xfer_put(minfo, xfer);
+               xfer = ERR_PTR(-EINVAL);
+       }
+
+       return xfer;
 }
 
-static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
+static inline void scmi_xfer_command_release(struct scmi_info *info,
+                                            struct scmi_xfer *xfer)
+{
+       atomic_set(&xfer->busy, SCMI_XFER_FREE);
+       __scmi_xfer_put(&info->tx_minfo, xfer);
+}
+
+static inline void scmi_clear_channel(struct scmi_info *info,
+                                     struct scmi_chan_info *cinfo)
+{
+       if (info->desc->ops->clear_channel)
+               info->desc->ops->clear_channel(cinfo);
+}
+
+static void scmi_handle_notification(struct scmi_chan_info *cinfo,
+                                    u32 msg_hdr, void *priv)
 {
        struct scmi_xfer *xfer;
        struct device *dev = cinfo->dev;
@@ -278,16 +619,17 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
        ktime_t ts;
 
        ts = ktime_get_boottime();
-       xfer = scmi_xfer_get(cinfo->handle, minfo);
+       xfer = scmi_xfer_get(cinfo->handle, minfo, false);
        if (IS_ERR(xfer)) {
                dev_err(dev, "failed to get free message slot (%ld)\n",
                        PTR_ERR(xfer));
-               info->desc->ops->clear_channel(cinfo);
+               scmi_clear_channel(info, cinfo);
                return;
        }
 
        unpack_scmi_header(msg_hdr, &xfer->hdr);
-       scmi_dump_header_dbg(dev, &xfer->hdr);
+       if (priv)
+               xfer->priv = priv;
        info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
                                            xfer);
        scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
@@ -299,59 +641,41 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
 
        __scmi_xfer_put(minfo, xfer);
 
-       info->desc->ops->clear_channel(cinfo);
+       scmi_clear_channel(info, cinfo);
 }
 
 static void scmi_handle_response(struct scmi_chan_info *cinfo,
-                                u16 xfer_id, u8 msg_type)
+                                u32 msg_hdr, void *priv)
 {
        struct scmi_xfer *xfer;
-       struct device *dev = cinfo->dev;
        struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
-       struct scmi_xfers_info *minfo = &info->tx_minfo;
 
-       /* Are we even expecting this? */
-       if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
-               dev_err(dev, "message for %d is not expected!\n", xfer_id);
-               info->desc->ops->clear_channel(cinfo);
-               return;
-       }
-
-       xfer = &minfo->xfer_block[xfer_id];
-       /*
-        * Even if a response was indeed expected on this slot at this point,
-        * a buggy platform could wrongly reply feeding us an unexpected
-        * delayed response we're not prepared to handle: bail-out safely
-        * blaming firmware.
-        */
-       if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
-               dev_err(dev,
-                       "Delayed Response for %d not expected! Buggy F/W ?\n",
-                       xfer_id);
-               info->desc->ops->clear_channel(cinfo);
-               /* It was unexpected, so nobody will clear the xfer if not us */
-               __scmi_xfer_put(minfo, xfer);
+       xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
+       if (IS_ERR(xfer)) {
+               scmi_clear_channel(info, cinfo);
                return;
        }
 
        /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
-       if (msg_type == MSG_TYPE_DELAYED_RESP)
+       if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
                xfer->rx.len = info->desc->max_msg_size;
 
-       scmi_dump_header_dbg(dev, &xfer->hdr);
-
+       if (priv)
+               xfer->priv = priv;
        info->desc->ops->fetch_response(cinfo, xfer);
 
        trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
                           xfer->hdr.protocol_id, xfer->hdr.seq,
-                          msg_type);
+                          xfer->hdr.type);
 
-       if (msg_type == MSG_TYPE_DELAYED_RESP) {
-               info->desc->ops->clear_channel(cinfo);
+       if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
+               scmi_clear_channel(info, cinfo);
                complete(xfer->async_done);
        } else {
                complete(&xfer->done);
        }
+
+       scmi_xfer_command_release(info, xfer);
 }
 
 /**
@@ -359,6 +683,7 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
  *
  * @cinfo: SCMI channel info
  * @msg_hdr: Message header
+ * @priv: Transport specific private data.
  *
  * Processes one received message to appropriate transfer information and
  * signals completion of the transfer.
@@ -366,18 +691,17 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
  * NOTE: This function will be invoked in IRQ context, hence should be
  * as optimal as possible.
  */
-void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
 {
-       u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
        u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
 
        switch (msg_type) {
        case MSG_TYPE_NOTIFICATION:
-               scmi_handle_notification(cinfo, msg_hdr);
+               scmi_handle_notification(cinfo, msg_hdr, priv);
                break;
        case MSG_TYPE_COMMAND:
        case MSG_TYPE_DELAYED_RESP:
-               scmi_handle_response(cinfo, xfer_id, msg_type);
+               scmi_handle_response(cinfo, msg_hdr, priv);
                break;
        default:
                WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
@@ -389,7 +713,7 @@ void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
  * xfer_put() - Release a transmit message
  *
  * @ph: Pointer to SCMI protocol handle
- * @xfer: message that was reserved by scmi_xfer_get
+ * @xfer: message that was reserved by xfer_get_init
  */
 static void xfer_put(const struct scmi_protocol_handle *ph,
                     struct scmi_xfer *xfer)
@@ -407,7 +731,12 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
 {
        struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
 
+       /*
+        * Poll also on xfer->done so that polling can be forcibly terminated
+        * in case of out-of-order receptions of delayed responses
+        */
        return info->desc->ops->poll_done(cinfo, xfer) ||
+              try_wait_for_completion(&xfer->done) ||
               ktime_after(ktime_get(), stop);
 }
 
@@ -431,6 +760,12 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
        struct device *dev = info->dev;
        struct scmi_chan_info *cinfo;
 
+       if (xfer->hdr.poll_completion && !info->desc->ops->poll_done) {
+               dev_warn_once(dev,
+                             "Polling mode is not supported by transport.\n");
+               return -EINVAL;
+       }
+
        /*
         * Initialise protocol id now from protocol handle to avoid it being
         * overridden by mistake (or malice) by the protocol code mangling with
@@ -447,6 +782,16 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
                              xfer->hdr.protocol_id, xfer->hdr.seq,
                              xfer->hdr.poll_completion);
 
+       xfer->state = SCMI_XFER_SENT_OK;
+       /*
+        * Even though spinlocking is not needed here since no race is possible
+        * on xfer->state due to the monotonically increasing tokens allocation,
+        * we must anyway ensure xfer->state initialization is not re-ordered
+        * after the .send_message() to be sure that on the RX path an early
+        * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
+        */
+       smp_mb();
+
        ret = info->desc->ops->send_message(cinfo, xfer);
        if (ret < 0) {
                dev_dbg(dev, "Failed to send message %d\n", ret);
@@ -457,11 +802,22 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
                ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
 
                spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
-
-               if (ktime_before(ktime_get(), stop))
-                       info->desc->ops->fetch_response(cinfo, xfer);
-               else
+               if (ktime_before(ktime_get(), stop)) {
+                       unsigned long flags;
+
+                       /*
+                        * Do not fetch_response if an out-of-order delayed
+                        * response is being processed.
+                        */
+                       spin_lock_irqsave(&xfer->lock, flags);
+                       if (xfer->state == SCMI_XFER_SENT_OK) {
+                               info->desc->ops->fetch_response(cinfo, xfer);
+                               xfer->state = SCMI_XFER_RESP_OK;
+                       }
+                       spin_unlock_irqrestore(&xfer->lock, flags);
+               } else {
                        ret = -ETIMEDOUT;
+               }
        } else {
                /* And we wait for the response. */
                timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
@@ -556,7 +912,7 @@ static int xfer_get_init(const struct scmi_protocol_handle *ph,
            tx_size > info->desc->max_msg_size)
                return -ERANGE;
 
-       xfer = scmi_xfer_get(pi->handle, minfo);
+       xfer = scmi_xfer_get(pi->handle, minfo, true);
        if (IS_ERR(xfer)) {
                ret = PTR_ERR(xfer);
                dev_err(dev, "failed to get free message slot(%d)\n", ret);
@@ -565,6 +921,7 @@ static int xfer_get_init(const struct scmi_protocol_handle *ph,
 
        xfer->tx.len = tx_size;
        xfer->rx.len = rx_size ? : info->desc->max_msg_size;
+       xfer->hdr.type = MSG_TYPE_COMMAND;
        xfer->hdr.id = msg_id;
        xfer->hdr.poll_completion = false;
 
@@ -1025,24 +1382,32 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
        const struct scmi_desc *desc = sinfo->desc;
 
        /* Pre-allocated messages, no more than what hdr.seq can support */
-       if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
-               dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
-                       desc->max_msg, MSG_TOKEN_MAX);
+       if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
+               dev_err(dev,
+                       "Invalid maximum messages %d, not in range [1 - %lu]\n",
+                       info->max_msg, MSG_TOKEN_MAX);
                return -EINVAL;
        }
 
-       info->xfer_block = devm_kcalloc(dev, desc->max_msg,
-                                       sizeof(*info->xfer_block), GFP_KERNEL);
-       if (!info->xfer_block)
-               return -ENOMEM;
+       hash_init(info->pending_xfers);
 
-       info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
+       /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
+       info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX),
                                              sizeof(long), GFP_KERNEL);
        if (!info->xfer_alloc_table)
                return -ENOMEM;
 
-       /* Pre-initialize the buffer pointer to pre-allocated buffers */
-       for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
+       /*
+        * Preallocate a number of xfers equal to max inflight messages,
+        * pre-initialize the buffer pointer to pre-allocated buffers and
+        * attach all of them to the free list
+        */
+       INIT_HLIST_HEAD(&info->free_xfers);
+       for (i = 0; i < info->max_msg; i++) {
+               xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
+               if (!xfer)
+                       return -ENOMEM;
+
                xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
                                            GFP_KERNEL);
                if (!xfer->rx.buf)
@@ -1050,6 +1415,10 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
 
                xfer->tx.buf = xfer->rx.buf;
                init_completion(&xfer->done);
+               spin_lock_init(&xfer->lock);
+
+               /* Add initialized xfer to the free list */
+               hlist_add_head(&xfer->node, &info->free_xfers);
        }
 
        spin_lock_init(&info->xfer_lock);
@@ -1057,10 +1426,40 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
        return 0;
 }
 
+static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
+{
+       const struct scmi_desc *desc = sinfo->desc;
+
+       if (!desc->ops->get_max_msg) {
+               sinfo->tx_minfo.max_msg = desc->max_msg;
+               sinfo->rx_minfo.max_msg = desc->max_msg;
+       } else {
+               struct scmi_chan_info *base_cinfo;
+
+               base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
+               if (!base_cinfo)
+                       return -EINVAL;
+               sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
+
+               /* RX channel is optional so can be skipped */
+               base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
+               if (base_cinfo)
+                       sinfo->rx_minfo.max_msg =
+                               desc->ops->get_max_msg(base_cinfo);
+       }
+
+       return 0;
+}
+
 static int scmi_xfer_info_init(struct scmi_info *sinfo)
 {
-       int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
+       int ret;
+
+       ret = scmi_channels_max_msg_configure(sinfo);
+       if (ret)
+               return ret;
 
+       ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
        if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
                ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
 
@@ -1137,6 +1536,8 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
  * @proto_id and @name: if device was still not existent it is created as a
  * child of the specified SCMI instance @info and its transport properly
  * initialized as usual.
+ *
+ * Return: A properly initialized scmi device, NULL otherwise.
  */
 static inline struct scmi_device *
 scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
@@ -1386,6 +1787,21 @@ void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
        mutex_unlock(&scmi_requested_devices_mtx);
 }
 
+static int scmi_cleanup_txrx_channels(struct scmi_info *info)
+{
+       int ret;
+       struct idr *idr = &info->tx_idr;
+
+       ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
+       idr_destroy(&info->tx_idr);
+
+       idr = &info->rx_idr;
+       ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
+       idr_destroy(&info->rx_idr);
+
+       return ret;
+}
+
 static int scmi_probe(struct platform_device *pdev)
 {
        int ret;
@@ -1420,13 +1836,19 @@ static int scmi_probe(struct platform_device *pdev)
        handle->devm_protocol_get = scmi_devm_protocol_get;
        handle->devm_protocol_put = scmi_devm_protocol_put;
 
+       if (desc->ops->link_supplier) {
+               ret = desc->ops->link_supplier(dev);
+               if (ret)
+                       return ret;
+       }
+
        ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
        if (ret)
                return ret;
 
        ret = scmi_xfer_info_init(info);
        if (ret)
-               return ret;
+               goto clear_txrx_setup;
 
        if (scmi_notification_init(handle))
                dev_err(dev, "SCMI Notifications NOT available.\n");
@@ -1439,7 +1861,7 @@ static int scmi_probe(struct platform_device *pdev)
        ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
        if (ret) {
                dev_err(dev, "unable to communicate with SCMI\n");
-               return ret;
+               goto notification_exit;
        }
 
        mutex_lock(&scmi_list_mutex);
@@ -1478,6 +1900,12 @@ static int scmi_probe(struct platform_device *pdev)
        }
 
        return 0;
+
+notification_exit:
+       scmi_notification_exit(&info->handle);
+clear_txrx_setup:
+       scmi_cleanup_txrx_channels(info);
+       return ret;
 }
 
 void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
@@ -1489,7 +1917,6 @@ static int scmi_remove(struct platform_device *pdev)
 {
        int ret = 0, id;
        struct scmi_info *info = platform_get_drvdata(pdev);
-       struct idr *idr = &info->tx_idr;
        struct device_node *child;
 
        mutex_lock(&scmi_list_mutex);
@@ -1513,14 +1940,7 @@ static int scmi_remove(struct platform_device *pdev)
        idr_destroy(&info->active_protocols);
 
        /* Safe to free channels since no more users */
-       ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
-       idr_destroy(&info->tx_idr);
-
-       idr = &info->rx_idr;
-       ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
-       idr_destroy(&info->rx_idr);
-
-       return ret;
+       return scmi_cleanup_txrx_channels(info);
 }
 
 static ssize_t protocol_version_show(struct device *dev,
@@ -1571,12 +1991,15 @@ ATTRIBUTE_GROUPS(versions);
 
 /* Each compatible listed below must have descriptor associated with it */
 static const struct of_device_id scmi_of_match[] = {
-#ifdef CONFIG_MAILBOX
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
        { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
 #endif
-#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
        { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
 #endif
+#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
+       { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
+#endif
        { /* Sentinel */ },
 };
 
@@ -1592,10 +2015,69 @@ static struct platform_driver scmi_driver = {
        .remove = scmi_remove,
 };
 
+/**
+ * __scmi_transports_setup  - Common helper to call transport-specific
+ * .init/.exit code if provided.
+ *
+ * @init: A flag to distinguish between init and exit.
+ *
+ * Note that, if provided, we invoke .init/.exit functions for all the
+ * transports currently compiled in.
+ *
+ * Return: 0 on Success.
+ */
+static inline int __scmi_transports_setup(bool init)
+{
+       int ret = 0;
+       const struct of_device_id *trans;
+
+       for (trans = scmi_of_match; trans->data; trans++) {
+               const struct scmi_desc *tdesc = trans->data;
+
+               if ((init && !tdesc->transport_init) ||
+                   (!init && !tdesc->transport_exit))
+                       continue;
+
+               if (init)
+                       ret = tdesc->transport_init();
+               else
+                       tdesc->transport_exit();
+
+               if (ret) {
+                       pr_err("SCMI transport %s FAILED initialization!\n",
+                              trans->compatible);
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+static int __init scmi_transports_init(void)
+{
+       return __scmi_transports_setup(true);
+}
+
+static void __exit scmi_transports_exit(void)
+{
+       __scmi_transports_setup(false);
+}
+
 static int __init scmi_driver_init(void)
 {
+       int ret;
+
+       /* Bail out if no SCMI transport was configured */
+       if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
+               return -EINVAL;
+
        scmi_bus_init();
 
+       /* Initialize any compiled-in transport which provided an init/exit */
+       ret = scmi_transports_init();
+       if (ret)
+               return ret;
+
        scmi_base_register();
 
        scmi_clock_register();
@@ -1624,6 +2106,8 @@ static void __exit scmi_driver_exit(void)
 
        scmi_bus_exit();
 
+       scmi_transports_exit();
+
        platform_driver_unregister(&scmi_driver);
 }
 module_exit(scmi_driver_exit);
index e3dcb58..e09eb12 100644 (file)
@@ -43,7 +43,7 @@ static void rx_callback(struct mbox_client *cl, void *m)
 {
        struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
 
-       scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem));
+       scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
 }
 
 static bool mailbox_chan_available(struct device *dev, int idx)
diff --git a/drivers/firmware/arm_scmi/msg.c b/drivers/firmware/arm_scmi/msg.c
new file mode 100644 (file)
index 0000000..d33a704
--- /dev/null
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * For transports using message passing.
+ *
+ * Derived from shm.c.
+ *
+ * Copyright (C) 2019-2021 ARM Ltd.
+ * Copyright (C) 2020-2021 OpenSynergy GmbH
+ */
+
+#include <linux/types.h>
+
+#include "common.h"
+
+/*
+ * struct scmi_msg_payld - Transport SDU layout
+ *
+ * The SCMI specification requires all parameters, message headers, return
+ * arguments or any protocol data to be expressed in little endian format only.
+ */
+struct scmi_msg_payld {
+       __le32 msg_header;
+       __le32 msg_payload[];
+};
+
+/**
+ * msg_command_size() - Actual size of transport SDU for command.
+ *
+ * @xfer: message which core has prepared for sending
+ *
+ * Return: transport SDU size.
+ */
+size_t msg_command_size(struct scmi_xfer *xfer)
+{
+       return sizeof(struct scmi_msg_payld) + xfer->tx.len;
+}
+
+/**
+ * msg_response_size() - Maximum size of transport SDU for response.
+ *
+ * @xfer: message which core has prepared for sending
+ *
+ * Return: transport SDU size.
+ */
+size_t msg_response_size(struct scmi_xfer *xfer)
+{
+       return sizeof(struct scmi_msg_payld) + sizeof(__le32) + xfer->rx.len;
+}
+
+/**
+ * msg_tx_prepare() - Set up transport SDU for command.
+ *
+ * @msg: transport SDU for command
+ * @xfer: message which is being sent
+ */
+void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer)
+{
+       msg->msg_header = cpu_to_le32(pack_scmi_header(&xfer->hdr));
+       if (xfer->tx.buf)
+               memcpy(msg->msg_payload, xfer->tx.buf, xfer->tx.len);
+}
+
+/**
+ * msg_read_header() - Read SCMI header from transport SDU.
+ *
+ * @msg: transport SDU
+ *
+ * Return: SCMI header
+ */
+u32 msg_read_header(struct scmi_msg_payld *msg)
+{
+       return le32_to_cpu(msg->msg_header);
+}
+
+/**
+ * msg_fetch_response() - Fetch response SCMI payload from transport SDU.
+ *
+ * @msg: transport SDU with response
+ * @len: transport SDU size
+ * @xfer: message being responded to
+ */
+void msg_fetch_response(struct scmi_msg_payld *msg, size_t len,
+                       struct scmi_xfer *xfer)
+{
+       size_t prefix_len = sizeof(*msg) + sizeof(msg->msg_payload[0]);
+
+       xfer->hdr.status = le32_to_cpu(msg->msg_payload[0]);
+       xfer->rx.len = min_t(size_t, xfer->rx.len,
+                            len >= prefix_len ? len - prefix_len : 0);
+
+       /* Take a copy to the rx buffer.. */
+       memcpy(xfer->rx.buf, &msg->msg_payload[1], xfer->rx.len);
+}
+
+/**
+ * msg_fetch_notification() - Fetch notification payload from transport SDU.
+ *
+ * @msg: transport SDU with notification
+ * @len: transport SDU size
+ * @max_len: maximum SCMI payload size to fetch
+ * @xfer: notification message
+ */
+void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len,
+                           size_t max_len, struct scmi_xfer *xfer)
+{
+       xfer->rx.len = min_t(size_t, max_len,
+                            len >= sizeof(*msg) ? len - sizeof(*msg) : 0);
+
+       /* Take a copy to the rx buffer.. */
+       memcpy(xfer->rx.buf, msg->msg_payload, xfer->rx.len);
+}
index d860beb..0efd20c 100644 (file)
@@ -1457,6 +1457,8 @@ static void scmi_devm_release_notifier(struct device *dev, void *res)
  *
  * Generic devres managed helper to register a notifier_block against a
  * protocol event.
+ *
+ * Return: 0 on Success
  */
 static int scmi_devm_notifier_register(struct scmi_device *sdev,
                                       u8 proto_id, u8 evt_id,
@@ -1523,6 +1525,8 @@ static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)
  * Generic devres managed helper to explicitly un-register a notifier_block
  * against a protocol event, which was previously registered using the above
  * @scmi_devm_notifier_register.
+ *
+ * Return: 0 on Success
  */
 static int scmi_devm_notifier_unregister(struct scmi_device *sdev,
                                         u8 proto_id, u8 evt_id,
index 2c88aa2..3084715 100644 (file)
@@ -166,7 +166,8 @@ struct scmi_msg_sensor_reading_get {
 
 struct scmi_resp_sensor_reading_complete {
        __le32 id;
-       __le64 readings;
+       __le32 readings_low;
+       __le32 readings_high;
 };
 
 struct scmi_sensor_reading_resp {
@@ -717,7 +718,8 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
 
                        resp = t->rx.buf;
                        if (le32_to_cpu(resp->id) == sensor_id)
-                               *value = get_unaligned_le64(&resp->readings);
+                               *value =
+                                       get_unaligned_le64(&resp->readings_low);
                        else
                                ret = -EPROTO;
                }
index bed5596..4effecc 100644 (file)
@@ -154,7 +154,8 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
        if (scmi_info->irq)
                wait_for_completion(&scmi_info->tx_complete);
 
-       scmi_rx_callback(scmi_info->cinfo, shmem_read_header(scmi_info->shmem));
+       scmi_rx_callback(scmi_info->cinfo,
+                        shmem_read_header(scmi_info->shmem), NULL);
 
        mutex_unlock(&scmi_info->shmem_lock);
 
diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
new file mode 100644 (file)
index 0000000..224577f
--- /dev/null
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Virtio Transport driver for Arm System Control and Management Interface
+ * (SCMI).
+ *
+ * Copyright (C) 2020-2021 OpenSynergy.
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+/**
+ * DOC: Theory of Operation
+ *
+ * The scmi-virtio transport implements a driver for the virtio SCMI device.
+ *
+ * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx
+ * channel (virtio eventq, P2A channel). Each channel is implemented through a
+ * virtqueue. Access to each virtqueue is protected by spinlocks.
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+
+#include <uapi/linux/virtio_ids.h>
+#include <uapi/linux/virtio_scmi.h>
+
+#include "common.h"
+
+#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
+#define VIRTIO_SCMI_MAX_PDU_SIZE \
+       (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
+#define DESCRIPTORS_PER_TX_MSG 2
+
+/**
+ * struct scmi_vio_channel - Transport channel information
+ *
+ * @vqueue: Associated virtqueue
+ * @cinfo: SCMI Tx or Rx channel
+ * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only
+ * @is_rx: Whether channel is an Rx channel
+ * @ready: Whether transport user is ready to hear about channel
+ * @max_msg: Maximum number of pending messages for this channel.
+ * @lock: Protects access to all members except ready.
+ * @ready_lock: Protects access to ready. If required, it must be taken before
+ *              lock.
+ */
+struct scmi_vio_channel {
+       struct virtqueue *vqueue;
+       struct scmi_chan_info *cinfo;
+       struct list_head free_list;
+       bool is_rx;
+       bool ready;
+       unsigned int max_msg;
+       /* lock to protect access to all members except ready. */
+       spinlock_t lock;
+       /* lock to rotects access to ready flag. */
+       spinlock_t ready_lock;
+};
+
+/**
+ * struct scmi_vio_msg - Transport PDU information
+ *
+ * @request: SDU used for commands
+ * @input: SDU used for (delayed) responses and notifications
+ * @list: List which scmi_vio_msg may be part of
+ * @rx_len: Input SDU size in bytes, once input has been received
+ */
+struct scmi_vio_msg {
+       struct scmi_msg_payld *request;
+       struct scmi_msg_payld *input;
+       struct list_head list;
+       unsigned int rx_len;
+};
+
+/* Only one SCMI VirtIO device can possibly exist */
+static struct virtio_device *scmi_vdev;
+
+static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
+{
+       return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS);
+}
+
+static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
+                              struct scmi_vio_msg *msg)
+{
+       struct scatterlist sg_in;
+       int rc;
+       unsigned long flags;
+
+       sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE);
+
+       spin_lock_irqsave(&vioch->lock, flags);
+
+       rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
+       if (rc)
+               dev_err_once(vioch->cinfo->dev,
+                            "failed to add to virtqueue (%d)\n", rc);
+       else
+               virtqueue_kick(vioch->vqueue);
+
+       spin_unlock_irqrestore(&vioch->lock, flags);
+
+       return rc;
+}
+
+static void scmi_finalize_message(struct scmi_vio_channel *vioch,
+                                 struct scmi_vio_msg *msg)
+{
+       if (vioch->is_rx) {
+               scmi_vio_feed_vq_rx(vioch, msg);
+       } else {
+               unsigned long flags;
+
+               spin_lock_irqsave(&vioch->lock, flags);
+               list_add(&msg->list, &vioch->free_list);
+               spin_unlock_irqrestore(&vioch->lock, flags);
+       }
+}
+
+static void scmi_vio_complete_cb(struct virtqueue *vqueue)
+{
+       unsigned long ready_flags;
+       unsigned long flags;
+       unsigned int length;
+       struct scmi_vio_channel *vioch;
+       struct scmi_vio_msg *msg;
+       bool cb_enabled = true;
+
+       if (WARN_ON_ONCE(!vqueue->vdev->priv))
+               return;
+       vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index];
+
+       for (;;) {
+               spin_lock_irqsave(&vioch->ready_lock, ready_flags);
+
+               if (!vioch->ready) {
+                       if (!cb_enabled)
+                               (void)virtqueue_enable_cb(vqueue);
+                       goto unlock_ready_out;
+               }
+
+               spin_lock_irqsave(&vioch->lock, flags);
+               if (cb_enabled) {
+                       virtqueue_disable_cb(vqueue);
+                       cb_enabled = false;
+               }
+               msg = virtqueue_get_buf(vqueue, &length);
+               if (!msg) {
+                       if (virtqueue_enable_cb(vqueue))
+                               goto unlock_out;
+                       cb_enabled = true;
+               }
+               spin_unlock_irqrestore(&vioch->lock, flags);
+
+               if (msg) {
+                       msg->rx_len = length;
+                       scmi_rx_callback(vioch->cinfo,
+                                        msg_read_header(msg->input), msg);
+
+                       scmi_finalize_message(vioch, msg);
+               }
+
+               spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
+       }
+
+unlock_out:
+       spin_unlock_irqrestore(&vioch->lock, flags);
+unlock_ready_out:
+       spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
+}
+
+static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" };
+
+static vq_callback_t *scmi_vio_complete_callbacks[] = {
+       scmi_vio_complete_cb,
+       scmi_vio_complete_cb
+};
+
+static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo)
+{
+       struct scmi_vio_channel *vioch = base_cinfo->transport_info;
+
+       return vioch->max_msg;
+}
+
+static int virtio_link_supplier(struct device *dev)
+{
+       if (!scmi_vdev) {
+               dev_notice_once(dev,
+                               "Deferring probe after not finding a bound scmi-virtio device\n");
+               return -EPROBE_DEFER;
+       }
+
+       if (!device_link_add(dev, &scmi_vdev->dev,
+                            DL_FLAG_AUTOREMOVE_CONSUMER)) {
+               dev_err(dev, "Adding link to supplier virtio device failed\n");
+               return -ECANCELED;
+       }
+
+       return 0;
+}
+
+static bool virtio_chan_available(struct device *dev, int idx)
+{
+       struct scmi_vio_channel *channels, *vioch = NULL;
+
+       if (WARN_ON_ONCE(!scmi_vdev))
+               return false;
+
+       channels = (struct scmi_vio_channel *)scmi_vdev->priv;
+
+       switch (idx) {
+       case VIRTIO_SCMI_VQ_TX:
+               vioch = &channels[VIRTIO_SCMI_VQ_TX];
+               break;
+       case VIRTIO_SCMI_VQ_RX:
+               if (scmi_vio_have_vq_rx(scmi_vdev))
+                       vioch = &channels[VIRTIO_SCMI_VQ_RX];
+               break;
+       default:
+               return false;
+       }
+
+       return vioch && !vioch->cinfo;
+}
+
+static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+                            bool tx)
+{
+       unsigned long flags;
+       struct scmi_vio_channel *vioch;
+       int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX;
+       int i;
+
+       if (!scmi_vdev)
+               return -EPROBE_DEFER;
+
+       vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index];
+
+       for (i = 0; i < vioch->max_msg; i++) {
+               struct scmi_vio_msg *msg;
+
+               msg = devm_kzalloc(cinfo->dev, sizeof(*msg), GFP_KERNEL);
+               if (!msg)
+                       return -ENOMEM;
+
+               if (tx) {
+                       msg->request = devm_kzalloc(cinfo->dev,
+                                                   VIRTIO_SCMI_MAX_PDU_SIZE,
+                                                   GFP_KERNEL);
+                       if (!msg->request)
+                               return -ENOMEM;
+               }
+
+               msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE,
+                                         GFP_KERNEL);
+               if (!msg->input)
+                       return -ENOMEM;
+
+               if (tx) {
+                       spin_lock_irqsave(&vioch->lock, flags);
+                       list_add_tail(&msg->list, &vioch->free_list);
+                       spin_unlock_irqrestore(&vioch->lock, flags);
+               } else {
+                       scmi_vio_feed_vq_rx(vioch, msg);
+               }
+       }
+
+       spin_lock_irqsave(&vioch->lock, flags);
+       cinfo->transport_info = vioch;
+       /* Indirectly setting channel not available any more */
+       vioch->cinfo = cinfo;
+       spin_unlock_irqrestore(&vioch->lock, flags);
+
+       spin_lock_irqsave(&vioch->ready_lock, flags);
+       vioch->ready = true;
+       spin_unlock_irqrestore(&vioch->ready_lock, flags);
+
+       return 0;
+}
+
+static int virtio_chan_free(int id, void *p, void *data)
+{
+       unsigned long flags;
+       struct scmi_chan_info *cinfo = p;
+       struct scmi_vio_channel *vioch = cinfo->transport_info;
+
+       spin_lock_irqsave(&vioch->ready_lock, flags);
+       vioch->ready = false;
+       spin_unlock_irqrestore(&vioch->ready_lock, flags);
+
+       scmi_free_channel(cinfo, data, id);
+
+       spin_lock_irqsave(&vioch->lock, flags);
+       vioch->cinfo = NULL;
+       spin_unlock_irqrestore(&vioch->lock, flags);
+
+       return 0;
+}
+
+static int virtio_send_message(struct scmi_chan_info *cinfo,
+                              struct scmi_xfer *xfer)
+{
+       struct scmi_vio_channel *vioch = cinfo->transport_info;
+       struct scatterlist sg_out;
+       struct scatterlist sg_in;
+       struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in };
+       unsigned long flags;
+       int rc;
+       struct scmi_vio_msg *msg;
+
+       spin_lock_irqsave(&vioch->lock, flags);
+
+       if (list_empty(&vioch->free_list)) {
+               spin_unlock_irqrestore(&vioch->lock, flags);
+               return -EBUSY;
+       }
+
+       msg = list_first_entry(&vioch->free_list, typeof(*msg), list);
+       list_del(&msg->list);
+
+       msg_tx_prepare(msg->request, xfer);
+
+       sg_init_one(&sg_out, msg->request, msg_command_size(xfer));
+       sg_init_one(&sg_in, msg->input, msg_response_size(xfer));
+
+       rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC);
+       if (rc) {
+               list_add(&msg->list, &vioch->free_list);
+               dev_err_once(vioch->cinfo->dev,
+                            "%s() failed to add to virtqueue (%d)\n", __func__,
+                            rc);
+       } else {
+               virtqueue_kick(vioch->vqueue);
+       }
+
+       spin_unlock_irqrestore(&vioch->lock, flags);
+
+       return rc;
+}
+
+static void virtio_fetch_response(struct scmi_chan_info *cinfo,
+                                 struct scmi_xfer *xfer)
+{
+       struct scmi_vio_msg *msg = xfer->priv;
+
+       if (msg) {
+               msg_fetch_response(msg->input, msg->rx_len, xfer);
+               xfer->priv = NULL;
+       }
+}
+
+static void virtio_fetch_notification(struct scmi_chan_info *cinfo,
+                                     size_t max_len, struct scmi_xfer *xfer)
+{
+       struct scmi_vio_msg *msg = xfer->priv;
+
+       if (msg) {
+               msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer);
+               xfer->priv = NULL;
+       }
+}
+
+static const struct scmi_transport_ops scmi_virtio_ops = {
+       .link_supplier = virtio_link_supplier,
+       .chan_available = virtio_chan_available,
+       .chan_setup = virtio_chan_setup,
+       .chan_free = virtio_chan_free,
+       .get_max_msg = virtio_get_max_msg,
+       .send_message = virtio_send_message,
+       .fetch_response = virtio_fetch_response,
+       .fetch_notification = virtio_fetch_notification,
+};
+
+static int scmi_vio_probe(struct virtio_device *vdev)
+{
+       struct device *dev = &vdev->dev;
+       struct scmi_vio_channel *channels;
+       bool have_vq_rx;
+       int vq_cnt;
+       int i;
+       int ret;
+       struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT];
+
+       /* Only one SCMI VirtiO device allowed */
+       if (scmi_vdev)
+               return -EINVAL;
+
+       have_vq_rx = scmi_vio_have_vq_rx(vdev);
+       vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1;
+
+       channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL);
+       if (!channels)
+               return -ENOMEM;
+
+       if (have_vq_rx)
+               channels[VIRTIO_SCMI_VQ_RX].is_rx = true;
+
+       ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_complete_callbacks,
+                             scmi_vio_vqueue_names, NULL);
+       if (ret) {
+               dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt);
+               return ret;
+       }
+
+       for (i = 0; i < vq_cnt; i++) {
+               unsigned int sz;
+
+               spin_lock_init(&channels[i].lock);
+               spin_lock_init(&channels[i].ready_lock);
+               INIT_LIST_HEAD(&channels[i].free_list);
+               channels[i].vqueue = vqs[i];
+
+               sz = virtqueue_get_vring_size(channels[i].vqueue);
+               /* Tx messages need multiple descriptors. */
+               if (!channels[i].is_rx)
+                       sz /= DESCRIPTORS_PER_TX_MSG;
+
+               if (sz > MSG_TOKEN_MAX) {
+                       dev_info_once(dev,
+                                     "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n",
+                                     channels[i].is_rx ? "rx" : "tx",
+                                     sz, MSG_TOKEN_MAX);
+                       sz = MSG_TOKEN_MAX;
+               }
+               channels[i].max_msg = sz;
+       }
+
+       vdev->priv = channels;
+       scmi_vdev = vdev;
+
+       return 0;
+}
+
+static void scmi_vio_remove(struct virtio_device *vdev)
+{
+       vdev->config->reset(vdev);
+       vdev->config->del_vqs(vdev);
+       scmi_vdev = NULL;
+}
+
+static int scmi_vio_validate(struct virtio_device *vdev)
+{
+       if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
+               dev_err(&vdev->dev,
+                       "device does not comply with spec version 1.x\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static unsigned int features[] = {
+       VIRTIO_SCMI_F_P2A_CHANNELS,
+};
+
+static const struct virtio_device_id id_table[] = {
+       { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID },
+       { 0 }
+};
+
+static struct virtio_driver virtio_scmi_driver = {
+       .driver.name = "scmi-virtio",
+       .driver.owner = THIS_MODULE,
+       .feature_table = features,
+       .feature_table_size = ARRAY_SIZE(features),
+       .id_table = id_table,
+       .probe = scmi_vio_probe,
+       .remove = scmi_vio_remove,
+       .validate = scmi_vio_validate,
+};
+
+static int __init virtio_scmi_init(void)
+{
+       return register_virtio_driver(&virtio_scmi_driver);
+}
+
+static void __exit virtio_scmi_exit(void)
+{
+       unregister_virtio_driver(&virtio_scmi_driver);
+}
+
+const struct scmi_desc scmi_virtio_desc = {
+       .transport_init = virtio_scmi_init,
+       .transport_exit = virtio_scmi_exit,
+       .ops = &scmi_virtio_ops,
+       .max_rx_timeout_ms = 60000, /* for non-realtime virtio devices */
+       .max_msg = 0, /* overridden by virtio_get_max_msg() */
+       .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE,
+};
index 10d4457..eb9c65f 100644 (file)
@@ -34,7 +34,6 @@ static long __init parse_acpi_path(const struct efi_dev_path *node,
                        break;
                if (!adev->pnp.unique_id && node->acpi.uid == 0)
                        break;
-               acpi_dev_put(adev);
        }
        if (!adev)
                return -ENODEV;
index 4b7ee3f..847f33f 100644 (file)
@@ -896,6 +896,7 @@ static int __init efi_memreserve_map_root(void)
 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
 {
        struct resource *res, *parent;
+       int ret;
 
        res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
        if (!res)
@@ -908,7 +909,17 @@ static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
 
        /* we expect a conflict with a 'System RAM' region */
        parent = request_resource_conflict(&iomem_resource, res);
-       return parent ? request_resource(parent, res) : 0;
+       ret = parent ? request_resource(parent, res) : 0;
+
+       /*
+        * Given that efi_mem_reserve_iomem() can be called at any
+        * time, only call memblock_reserve() if the architecture
+        * keeps the infrastructure around.
+        */
+       if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
+               memblock_reserve(addr, size);
+
+       return ret;
 }
 
 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
index aa8da0a..ae87dde 100644 (file)
@@ -630,8 +630,8 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
  * @image:     EFI loaded image protocol
  * @load_addr: pointer to loaded initrd
  * @load_size: size of loaded initrd
- * @soft_limit:        preferred size of allocated memory for loading the initrd
- * @hard_limit:        minimum size of allocated memory
+ * @soft_limit:        preferred address for loading the initrd
+ * @hard_limit:        upper limit address for loading the initrd
  *
  * Return:     status code
  */
index d8bc013..38722d2 100644 (file)
@@ -180,7 +180,10 @@ void __init efi_mokvar_table_init(void)
                pr_err("EFI MOKvar config table is not valid\n");
                return;
        }
-       efi_mem_reserve(efi.mokvar_table, map_size_needed);
+
+       if (md.type == EFI_BOOT_SERVICES_DATA)
+               efi_mem_reserve(efi.mokvar_table, map_size_needed);
+
        efi_mokvar_table_size = map_size_needed;
 }
 
index c1955d3..8f66567 100644 (file)
@@ -62,9 +62,11 @@ int __init efi_tpm_eventlog_init(void)
        tbl_size = sizeof(*log_tbl) + log_tbl->size;
        memblock_reserve(efi.tpm_log, tbl_size);
 
-       if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
-           log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
-               pr_warn(FW_BUG "TPM Final Events table missing or invalid\n");
+       if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) {
+               pr_info("TPM Final Events table not present\n");
+               goto out;
+       } else if (log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
+               pr_warn(FW_BUG "TPM Final Events table invalid\n");
                goto out;
        }
 
index 440d99c..3e9fa4b 100644 (file)
@@ -296,25 +296,61 @@ static int bpmp_debug_show(struct seq_file *m, void *p)
        struct file *file = m->private;
        struct inode *inode = file_inode(file);
        struct tegra_bpmp *bpmp = inode->i_private;
-       char *databuf = NULL;
        char fnamebuf[256];
        const char *filename;
-       uint32_t nbytes = 0;
-       size_t len;
-       int err;
-
-       len = seq_get_buf(m, &databuf);
-       if (!databuf)
-               return -ENOMEM;
+       struct mrq_debug_request req = {
+               .cmd = cpu_to_le32(CMD_DEBUG_READ),
+       };
+       struct mrq_debug_response resp;
+       struct tegra_bpmp_message msg = {
+               .mrq = MRQ_DEBUG,
+               .tx = {
+                       .data = &req,
+                       .size = sizeof(req),
+               },
+               .rx = {
+                       .data = &resp,
+                       .size = sizeof(resp),
+               },
+       };
+       uint32_t fd = 0, len = 0;
+       int remaining, err;
 
        filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
        if (!filename)
                return -ENOENT;
 
-       err = mrq_debug_read(bpmp, filename, databuf, len, &nbytes);
-       if (!err)
-               seq_commit(m, nbytes);
+       mutex_lock(&bpmp_debug_lock);
+       err = mrq_debug_open(bpmp, filename, &fd, &len, 0);
+       if (err)
+               goto out;
+
+       req.frd.fd = fd;
+       remaining = len;
+
+       while (remaining > 0) {
+               err = tegra_bpmp_transfer(bpmp, &msg);
+               if (err < 0) {
+                       goto close;
+               } else if (msg.rx.ret < 0) {
+                       err = -EINVAL;
+                       goto close;
+               }
 
+               if (resp.frd.readlen > remaining) {
+                       pr_err("%s: read data length invalid\n", __func__);
+                       err = -EINVAL;
+                       goto close;
+               }
+
+               seq_write(m, resp.frd.data, resp.frd.readlen);
+               remaining -= resp.frd.readlen;
+       }
+
+close:
+       err = mrq_debug_close(bpmp, fd);
+out:
+       mutex_unlock(&bpmp_debug_lock);
        return err;
 }
 
index c0316ea..8ac6eb9 100644 (file)
@@ -619,6 +619,13 @@ struct amdgpu_video_codec_info {
        u32 max_level;
 };
 
+#define codec_info_build(type, width, height, level) \
+                        .codec_type = type,\
+                        .max_width = width,\
+                        .max_height = height,\
+                        .max_pixels_per_frame = height * width,\
+                        .max_level = level,
+
 struct amdgpu_video_codecs {
        const u32 codec_count;
        const struct amdgpu_video_codec_info *codec_array;
index 84a1b4b..6cc0d4f 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/slab.h>
 #include <linux/power_supply.h>
 #include <linux/pm_runtime.h>
+#include <linux/suspend.h>
 #include <acpi/video.h>
 #include <acpi/actbl.h>
 
@@ -1042,7 +1043,7 @@ bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
 #if defined(CONFIG_AMD_PMC) || defined(CONFIG_AMD_PMC_MODULE)
        if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
                if (adev->flags & AMD_IS_APU)
-                       return true;
+                       return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
        }
 #endif
        return false;
index db16b3e..cf62f43 100644 (file)
@@ -269,7 +269,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
                struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
                uint64_t *size);
 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
-               struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed);
+               struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
                struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
 int amdgpu_amdkfd_gpuvm_sync_memory(
index 3b8e1ee..4fb1575 100644 (file)
@@ -1057,8 +1057,7 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
 
 static int update_gpuvm_pte(struct kgd_mem *mem,
                            struct kfd_mem_attachment *entry,
-                           struct amdgpu_sync *sync,
-                           bool *table_freed)
+                           struct amdgpu_sync *sync)
 {
        struct amdgpu_bo_va *bo_va = entry->bo_va;
        struct amdgpu_device *adev = entry->adev;
@@ -1069,7 +1068,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
                return ret;
 
        /* Update the page tables  */
-       ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
+       ret = amdgpu_vm_bo_update(adev, bo_va, false);
        if (ret) {
                pr_err("amdgpu_vm_bo_update failed\n");
                return ret;
@@ -1081,8 +1080,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
 static int map_bo_to_gpuvm(struct kgd_mem *mem,
                           struct kfd_mem_attachment *entry,
                           struct amdgpu_sync *sync,
-                          bool no_update_pte,
-                          bool *table_freed)
+                          bool no_update_pte)
 {
        int ret;
 
@@ -1099,7 +1097,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
        if (no_update_pte)
                return 0;
 
-       ret = update_gpuvm_pte(mem, entry, sync, table_freed);
+       ret = update_gpuvm_pte(mem, entry, sync);
        if (ret) {
                pr_err("update_gpuvm_pte() failed\n");
                goto update_gpuvm_pte_failed;
@@ -1393,8 +1391,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
                alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
                alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
-                       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
-                       AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+                       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
        } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
                domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
                alloc_flags = 0;
@@ -1597,8 +1594,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
 }
 
 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
-               struct kgd_dev *kgd, struct kgd_mem *mem,
-               void *drm_priv, bool *table_freed)
+               struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
        struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
@@ -1686,7 +1682,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
                         entry->va, entry->va + bo_size, entry);
 
                ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
-                                     is_invalid_userptr, table_freed);
+                                     is_invalid_userptr);
                if (ret) {
                        pr_err("Failed to map bo to gpuvm\n");
                        goto out_unreserve;
@@ -2136,7 +2132,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
                                continue;
 
                        kfd_mem_dmaunmap_attachment(mem, attachment);
-                       ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
+                       ret = update_gpuvm_pte(mem, attachment, &sync);
                        if (ret) {
                                pr_err("%s: update PTE failed\n", __func__);
                                /* make sure this gets validated again */
@@ -2342,7 +2338,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
                                continue;
 
                        kfd_mem_dmaunmap_attachment(mem, attachment);
-                       ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
+                       ret = update_gpuvm_pte(mem, attachment, &sync_obj);
                        if (ret) {
                                pr_debug("Memory eviction: update PTE failed. Try again\n");
                                goto validate_map_fail;
index 76fe5b7..30fa1f6 100644 (file)
@@ -781,7 +781,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
        if (r)
                return r;
 
-       r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL);
+       r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
        if (r)
                return r;
 
@@ -792,7 +792,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
        if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
                bo_va = fpriv->csa_va;
                BUG_ON(!bo_va);
-               r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+               r = amdgpu_vm_bo_update(adev, bo_va, false);
                if (r)
                        return r;
 
@@ -811,7 +811,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
                if (bo_va == NULL)
                        continue;
 
-               r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+               r = amdgpu_vm_bo_update(adev, bo_va, false);
                if (r)
                        return r;
 
index d303e88..f3fd5ec 100644 (file)
@@ -3504,13 +3504,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        r = amdgpu_device_get_job_timeout_settings(adev);
        if (r) {
                dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
-               goto failed_unmap;
+               return r;
        }
 
        /* early init functions */
        r = amdgpu_device_ip_early_init(adev);
        if (r)
-               goto failed_unmap;
+               return r;
 
        /* doorbell bar mapping and doorbell index init*/
        amdgpu_device_doorbell_init(adev);
@@ -3736,10 +3736,6 @@ release_ras_con:
 failed:
        amdgpu_vf_error_trans_all(adev);
 
-failed_unmap:
-       iounmap(adev->rmmio);
-       adev->rmmio = NULL;
-
        return r;
 }
 
index 71beb0d..361b86b 100644 (file)
@@ -1168,6 +1168,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
 
        /* Renoir */
+       {0x1002, 0x15E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
        {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
        {0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
        {0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
@@ -1189,6 +1190,10 @@ static const struct pci_device_id pciidlist[] = {
        /* Van Gogh */
        {0x1002, 0x163F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VANGOGH|AMD_IS_APU},
 
+       /* Yellow Carp */
+       {0x1002, 0x164D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
+       {0x1002, 0x1681, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
+
        /* Navy_Flounder */
        {0x1002, 0x73C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
        {0x1002, 0x73C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
index b3404c4..854fc49 100644 (file)
@@ -255,6 +255,15 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
        if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
                return -EPERM;
 
+       /* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
+        * for debugger access to invisible VRAM. Should have used MAP_SHARED
+        * instead. Clearing VM_MAYWRITE prevents the mapping from ever
+        * becoming writable and makes is_cow_mapping(vm_flags) false.
+        */
+       if (is_cow_mapping(vma->vm_flags) &&
+           !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
+               vma->vm_flags &= ~VM_MAYWRITE;
+
        return drm_gem_ttm_mmap(obj, vma);
 }
 
@@ -612,7 +621,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 
        if (operation == AMDGPU_VA_OP_MAP ||
            operation == AMDGPU_VA_OP_REPLACE) {
-               r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+               r = amdgpu_vm_bo_update(adev, bo_va, false);
                if (r)
                        goto error;
        }
index 32ce0e6..83af307 100644 (file)
@@ -278,6 +278,21 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
        return true;
 }
 
+static void amdgpu_restore_msix(struct amdgpu_device *adev)
+{
+       u16 ctrl;
+
+       pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+       if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
+               return;
+
+       /* VF FLR */
+       ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
+       pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
+       ctrl |= PCI_MSIX_FLAGS_ENABLE;
+       pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
+}
+
 /**
  * amdgpu_irq_init - initialize interrupt handling
  *
@@ -569,6 +584,9 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
 {
        int i, j, k;
 
+       if (amdgpu_sriov_vf(adev))
+               amdgpu_restore_msix(adev);
+
        for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
                if (!adev->irq.client[i].sources)
                        continue;
index c13b02c..fc66aca 100644 (file)
@@ -809,7 +809,7 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
 
 /* query/inject/cure begin */
 int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
-       struct ras_query_if *info)
+                                 struct ras_query_if *info)
 {
        struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
        struct ras_err_data err_data = {0, 0, 0, NULL};
@@ -1043,17 +1043,32 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
        return ret;
 }
 
-/* get the total error counts on all IPs */
-void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
-                                 unsigned long *ce_count,
-                                 unsigned long *ue_count)
+/**
+ * amdgpu_ras_query_error_count -- Get error counts of all IPs
+ * adev: pointer to AMD GPU device
+ * ce_count: pointer to an integer to be set to the count of correctible errors.
+ * ue_count: pointer to an integer to be set to the count of uncorrectible
+ * errors.
+ *
+ * If set, @ce_count or @ue_count, count and return the corresponding
+ * error counts in those integer pointers. Return 0 if the device
+ * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
+ */
+int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+                                unsigned long *ce_count,
+                                unsigned long *ue_count)
 {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj;
        unsigned long ce, ue;
 
        if (!adev->ras_enabled || !con)
-               return;
+               return -EOPNOTSUPP;
+
+       /* Don't count since no reporting.
+        */
+       if (!ce_count && !ue_count)
+               return 0;
 
        ce = 0;
        ue = 0;
@@ -1061,9 +1076,11 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
                struct ras_query_if info = {
                        .head = obj->head,
                };
+               int res;
 
-               if (amdgpu_ras_query_error_status(adev, &info))
-                       return;
+               res = amdgpu_ras_query_error_status(adev, &info);
+               if (res)
+                       return res;
 
                ce += info.ce_count;
                ue += info.ue_count;
@@ -1074,6 +1091,8 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
 
        if (ue_count)
                *ue_count = ue;
+
+       return 0;
 }
 /* query/inject/cure end */
 
@@ -2137,9 +2156,10 @@ static void amdgpu_ras_counte_dw(struct work_struct *work)
 
        /* Cache new values.
         */
-       amdgpu_ras_query_error_count(adev, &ce_count, &ue_count);
-       atomic_set(&con->ras_ce_count, ce_count);
-       atomic_set(&con->ras_ue_count, ue_count);
+       if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
+               atomic_set(&con->ras_ce_count, ce_count);
+               atomic_set(&con->ras_ue_count, ue_count);
+       }
 
        pm_runtime_mark_last_busy(dev->dev);
 Out:
@@ -2312,9 +2332,10 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
 
        /* Those are the cached values at init.
         */
-       amdgpu_ras_query_error_count(adev, &ce_count, &ue_count);
-       atomic_set(&con->ras_ce_count, ce_count);
-       atomic_set(&con->ras_ue_count, ue_count);
+       if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
+               atomic_set(&con->ras_ce_count, ce_count);
+               atomic_set(&con->ras_ue_count, ue_count);
+       }
 
        return 0;
 cleanup:
index 256cea5..b504ed8 100644 (file)
@@ -490,9 +490,9 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
 void amdgpu_ras_resume(struct amdgpu_device *adev);
 void amdgpu_ras_suspend(struct amdgpu_device *adev);
 
-void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
-                                 unsigned long *ce_count,
-                                 unsigned long *ue_count);
+int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+                                unsigned long *ce_count,
+                                unsigned long *ue_count);
 
 /* error handling functions */
 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
index 79cfa2d..078c068 100644 (file)
@@ -1758,7 +1758,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
        r = vm->update_funcs->commit(&params, fence);
 
        if (table_freed)
-               *table_freed = *table_freed || params.table_freed;
+               *table_freed = params.table_freed;
 
 error_unlock:
        amdgpu_vm_eviction_unlock(vm);
@@ -1816,7 +1816,6 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
  * @adev: amdgpu_device pointer
  * @bo_va: requested BO and VM object
  * @clear: if true clear the entries
- * @table_freed: return true if page table is freed
  *
  * Fill in the page table entries for @bo_va.
  *
@@ -1824,7 +1823,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
  * 0 for success, -EINVAL for failure.
  */
 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
-                       bool clear, bool *table_freed)
+                       bool clear)
 {
        struct amdgpu_bo *bo = bo_va->base.bo;
        struct amdgpu_vm *vm = bo_va->base.vm;
@@ -1903,7 +1902,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
                                                resv, mapping->start,
                                                mapping->last, update_flags,
                                                mapping->offset, mem,
-                                               pages_addr, last_update, table_freed);
+                                               pages_addr, last_update, NULL);
                if (r)
                        return r;
        }
@@ -2155,7 +2154,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
 
        list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
                /* Per VM BOs never need to bo cleared in the page tables */
-               r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+               r = amdgpu_vm_bo_update(adev, bo_va, false);
                if (r)
                        return r;
        }
@@ -2174,7 +2173,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
                else
                        clear = true;
 
-               r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL);
+               r = amdgpu_vm_bo_update(adev, bo_va, clear);
                if (r)
                        return r;
 
index ddb85a8..f8fa653 100644 (file)
@@ -406,7 +406,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                                struct dma_fence **fence, bool *free_table);
 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
                        struct amdgpu_bo_va *bo_va,
-                       bool clear, bool *table_freed);
+                       bool clear);
 bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
                             struct amdgpu_bo *bo, bool evicted);
index 3332442..7e0d8c0 100644 (file)
@@ -766,7 +766,7 @@ static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
 
 static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VBLANK6 + 1;
+       adev->crtc_irq.num_types = adev->mode_info.num_crtc;
        adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
 }
 
index f5e9c02..a64b2c7 100644 (file)
@@ -3300,6 +3300,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000)
 };
@@ -3379,6 +3380,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000020),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1_Vangogh, 0xffffffff, 0x00070103),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQG_CONFIG, 0x000017ff, 0x00001000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00400000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
@@ -3445,6 +3447,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x01030000, 0x01030000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x03a00000, 0x00a00000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG,  0x00000020, 0x00000020)
index 3ee4815..ff2307d 100644 (file)
@@ -252,7 +252,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
         * otherwise the mailbox msg will be ruined/reseted by
         * the VF FLR.
         */
-       if (!down_read_trylock(&adev->reset_sem))
+       if (!down_write_trylock(&adev->reset_sem))
                return;
 
        amdgpu_virt_fini_data_exchange(adev);
@@ -268,7 +268,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
 
 flr_done:
        atomic_set(&adev->in_gpu_reset, 0);
-       up_read(&adev->reset_sem);
+       up_write(&adev->reset_sem);
 
        /* Trigger recovery for world switch failure if no TDR */
        if (amdgpu_device_should_recover_gpu(adev)
index 48e588d..9f7aac4 100644 (file)
@@ -273,7 +273,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
         * otherwise the mailbox msg will be ruined/reseted by
         * the VF FLR.
         */
-       if (!down_read_trylock(&adev->reset_sem))
+       if (!down_write_trylock(&adev->reset_sem))
                return;
 
        amdgpu_virt_fini_data_exchange(adev);
@@ -289,7 +289,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
 
 flr_done:
        atomic_set(&adev->in_gpu_reset, 0);
-       up_read(&adev->reset_sem);
+       up_write(&adev->reset_sem);
 
        /* Trigger recovery for world switch failure if no TDR */
        if (amdgpu_device_should_recover_gpu(adev)
index 94a2c07..94d029d 100644 (file)
 #include "smuio_v11_0.h"
 #include "smuio_v11_0_6.h"
 
-#define codec_info_build(type, width, height, level) \
-                        .codec_type = type,\
-                        .max_width = width,\
-                        .max_height = height,\
-                        .max_pixels_per_frame = height * width,\
-                        .max_level = level,
-
 static const struct amd_ip_funcs nv_common_ip_funcs;
 
 /* Navi */
 static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 2304,
-               .max_pixels_per_frame = 4096 * 2304,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 4096,
-               .max_height = 2304,
-               .max_pixels_per_frame = 4096 * 2304,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
 };
 
 static const struct amdgpu_video_codecs nv_video_codecs_encode =
@@ -101,55 +82,13 @@ static const struct amdgpu_video_codecs nv_video_codecs_encode =
 /* Navi1x */
 static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 3,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 5,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 52,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 4,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 8192 * 4352,
-               .max_level = 186,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 8192 * 4352,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
 };
 
 static const struct amdgpu_video_codecs nv_video_codecs_decode =
@@ -161,62 +100,14 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode =
 /* Sienna Cichlid */
 static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 3,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 5,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 52,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 4,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 8192 * 4352,
-               .max_level = 186,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 8192 * 4352,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 8192 * 4352,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
 };
 
 static const struct amdgpu_video_codecs sc_video_codecs_decode =
@@ -228,80 +119,20 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode =
 /* SRIOV Sienna Cichlid, not const since data is controlled by host */
 static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 2304,
-               .max_pixels_per_frame = 4096 * 2304,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 4096,
-               .max_height = 2304,
-               .max_pixels_per_frame = 4096 * 2304,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
 };
 
 static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 3,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 5,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 52,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 4,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 8192 * 4352,
-               .max_level = 186,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 8192 * 4352,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 8192 * 4352,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
 };
 
 static struct amdgpu_video_codecs sriov_sc_video_codecs_encode =
@@ -333,6 +164,19 @@ static const struct amdgpu_video_codecs bg_video_codecs_encode = {
        .codec_array = NULL,
 };
 
+/* Yellow Carp*/
+static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+};
+
+static const struct amdgpu_video_codecs yc_video_codecs_decode = {
+       .codec_count = ARRAY_SIZE(yc_video_codecs_decode_array),
+       .codec_array = yc_video_codecs_decode_array,
+};
+
 static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
                                 const struct amdgpu_video_codecs **codecs)
 {
@@ -353,12 +197,17 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
        case CHIP_NAVY_FLOUNDER:
        case CHIP_DIMGREY_CAVEFISH:
        case CHIP_VANGOGH:
-       case CHIP_YELLOW_CARP:
                if (encode)
                        *codecs = &nv_video_codecs_encode;
                else
                        *codecs = &sc_video_codecs_decode;
                return 0;
+       case CHIP_YELLOW_CARP:
+               if (encode)
+                       *codecs = &nv_video_codecs_encode;
+               else
+                       *codecs = &yc_video_codecs_decode;
+               return 0;
        case CHIP_BEIGE_GOBY:
                if (encode)
                        *codecs = &bg_video_codecs_encode;
@@ -1387,7 +1236,10 @@ static int nv_common_early_init(void *handle)
                        AMD_PG_SUPPORT_VCN |
                        AMD_PG_SUPPORT_VCN_DPG |
                        AMD_PG_SUPPORT_JPEG;
-               adev->external_rev_id = adev->rev_id + 0x01;
+               if (adev->pdev->device == 0x1681)
+                       adev->external_rev_id = adev->rev_id + 0x19;
+               else
+                       adev->external_rev_id = adev->rev_id + 0x01;
                break;
        default:
                /* FIXME: not supported yet */
index 618e5b6..536d41f 100644 (file)
@@ -67,7 +67,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
 
        err = psp_init_asd_microcode(psp, chip_name);
        if (err)
-               goto out;
+               return err;
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
        err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
@@ -80,7 +80,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
        } else {
                err = amdgpu_ucode_validate(adev->psp.ta_fw);
                if (err)
-                       goto out2;
+                       goto out;
 
                ta_hdr = (const struct ta_firmware_header_v1_0 *)
                                 adev->psp.ta_fw->data;
@@ -105,10 +105,9 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
 
        return 0;
 
-out2:
+out:
        release_firmware(adev->psp.ta_fw);
        adev->psp.ta_fw = NULL;
-out:
        if (err) {
                dev_err(adev->dev,
                        "psp v12.0: Failed to load firmware \"%s\"\n",
index b024364..b7d350b 100644 (file)
 /* Vega, Raven, Arcturus */
 static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 2304,
-               .max_pixels_per_frame = 4096 * 2304,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 4096,
-               .max_height = 2304,
-               .max_pixels_per_frame = 4096 * 2304,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
 };
 
 static const struct amdgpu_video_codecs vega_video_codecs_encode =
@@ -113,48 +101,12 @@ static const struct amdgpu_video_codecs vega_video_codecs_encode =
 /* Vega */
 static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 3,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 5,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 52,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 4,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 186,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
 };
 
 static const struct amdgpu_video_codecs vega_video_codecs_decode =
@@ -166,55 +118,13 @@ static const struct amdgpu_video_codecs vega_video_codecs_decode =
 /* Raven */
 static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 3,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 5,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 52,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 4,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 186,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
 };
 
 static const struct amdgpu_video_codecs rv_video_codecs_decode =
@@ -226,55 +136,13 @@ static const struct amdgpu_video_codecs rv_video_codecs_decode =
 /* Renoir, Arcturus */
 static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
 {
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 3,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 5,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 52,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 4,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 186,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
-               .max_width = 4096,
-               .max_height = 4096,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 0,
-       },
-       {
-               .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
-               .max_width = 8192,
-               .max_height = 4352,
-               .max_pixels_per_frame = 4096 * 4096,
-               .max_level = 0,
-       },
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+       {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
 };
 
 static const struct amdgpu_video_codecs rn_video_codecs_decode =
index 67541c3..e48acdd 100644 (file)
@@ -1393,7 +1393,6 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
        long err = 0;
        int i;
        uint32_t *devices_arr = NULL;
-       bool table_freed = false;
 
        dev = kfd_device_by_id(GET_GPU_ID(args->handle));
        if (!dev)
@@ -1451,8 +1450,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
                        goto get_mem_obj_from_handle_failed;
                }
                err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
-                       peer->kgd, (struct kgd_mem *)mem,
-                       peer_pdd->drm_priv, &table_freed);
+                       peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv);
                if (err) {
                        pr_err("Failed to map to gpu %d/%d\n",
                               i, args->n_devices);
@@ -1470,17 +1468,16 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
        }
 
        /* Flush TLBs after waiting for the page table updates to complete */
-       if (table_freed) {
-               for (i = 0; i < args->n_devices; i++) {
-                       peer = kfd_device_by_id(devices_arr[i]);
-                       if (WARN_ON_ONCE(!peer))
-                               continue;
-                       peer_pdd = kfd_get_process_device_data(peer, p);
-                       if (WARN_ON_ONCE(!peer_pdd))
-                               continue;
-                       kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
-               }
+       for (i = 0; i < args->n_devices; i++) {
+               peer = kfd_device_by_id(devices_arr[i]);
+               if (WARN_ON_ONCE(!peer))
+                       continue;
+               peer_pdd = kfd_get_process_device_data(peer, p);
+               if (WARN_ON_ONCE(!peer_pdd))
+                       continue;
+               kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
        }
+
        kfree(devices_arr);
 
        return err;
@@ -1568,27 +1565,10 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
                }
                args->n_success = i+1;
        }
-       mutex_unlock(&p->mutex);
-
-       err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
-       if (err) {
-               pr_debug("Sync memory failed, wait interrupted by user signal\n");
-               goto sync_memory_failed;
-       }
-
-       /* Flush TLBs after waiting for the page table updates to complete */
-       for (i = 0; i < args->n_devices; i++) {
-               peer = kfd_device_by_id(devices_arr[i]);
-               if (WARN_ON_ONCE(!peer))
-                       continue;
-               peer_pdd = kfd_get_process_device_data(peer, p);
-               if (WARN_ON_ONCE(!peer_pdd))
-                       continue;
-               kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
-       }
-
        kfree(devices_arr);
 
+       mutex_unlock(&p->mutex);
+
        return 0;
 
 bind_process_to_device_failed:
@@ -1596,7 +1576,6 @@ get_mem_obj_from_handle_failed:
 unmap_memory_from_gpu_failed:
        mutex_unlock(&p->mutex);
 copy_from_user_failed:
-sync_memory_failed:
        kfree(devices_arr);
        return err;
 }
index 21ec8a1..8a2c6fc 100644 (file)
@@ -714,8 +714,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
        if (err)
                goto err_alloc_mem;
 
-       err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem,
-                       pdd->drm_priv, NULL);
+       err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->drm_priv);
        if (err)
                goto err_map_mem;
 
index 9a71d89..c7b364e 100644 (file)
@@ -2375,21 +2375,27 @@ static bool svm_range_skip_recover(struct svm_range *prange)
 
 static void
 svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
-                     struct svm_range *prange, int32_t gpuidx)
+                     int32_t gpuidx)
 {
        struct kfd_process_device *pdd;
 
-       if (gpuidx == MAX_GPU_INSTANCE)
-               /* fault is on different page of same range
-                * or fault is skipped to recover later
-                */
-               pdd = svm_range_get_pdd_by_adev(prange, adev);
-       else
-               /* fault recovered
-                * or fault cannot recover because GPU no access on the range
-                */
-               pdd = kfd_process_device_from_gpuidx(p, gpuidx);
+       /* fault is on different page of same range
+        * or fault is skipped to recover later
+        * or fault is on invalid virtual address
+        */
+       if (gpuidx == MAX_GPU_INSTANCE) {
+               uint32_t gpuid;
+               int r;
 
+               r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx);
+               if (r < 0)
+                       return;
+       }
+
+       /* fault is recovered
+        * or fault cannot recover because GPU no access on the range
+        */
+       pdd = kfd_process_device_from_gpuidx(p, gpuidx);
        if (pdd)
                WRITE_ONCE(pdd->faults, pdd->faults + 1);
 }
@@ -2525,7 +2531,7 @@ out_unlock_svms:
        mutex_unlock(&svms->lock);
        mmap_read_unlock(mm);
 
-       svm_range_count_fault(adev, p, prange, gpuidx);
+       svm_range_count_fault(adev, p, gpuidx);
 
        mmput(mm);
 out:
index 01e1062..b53f49a 100644 (file)
@@ -2429,9 +2429,9 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
        max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
        min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
 
-       if (caps->ext_caps->bits.oled == 1 ||
+       if (caps->ext_caps->bits.oled == 1 /*||
            caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
-           caps->ext_caps->bits.hdr_aux_backlight_control == 1)
+           caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
                caps->aux_support = true;
 
        if (amdgpu_backlight == 0)
@@ -9191,7 +9191,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||          \
        defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
        /* restore the backlight level */
-       if (dm->backlight_dev)
+       if (dm->backlight_dev && (amdgpu_dm_backlight_get_level(dm) != dm->brightness[0]))
                amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
 #endif
        /*
index 6e0c5c6..a5331b9 100644 (file)
@@ -197,7 +197,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
 
        REG_UPDATE(DENTIST_DISPCLK_CNTL,
                        DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
-//     REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 5, 100);
+       REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 1000);
        REG_UPDATE(DENTIST_DISPCLK_CNTL,
                        DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
        REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
index 513676a..af7004b 100644 (file)
@@ -190,6 +190,10 @@ void dcn3_init_clocks(struct clk_mgr *clk_mgr_base)
                        &clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
                        &num_levels);
 
+       /* SOCCLK */
+       dcn3_init_single_clock(clk_mgr, PPCLK_SOCCLK,
+                                       &clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
+                                       &num_levels);
        // DPREFCLK ???
 
        /* DISPCLK */
index 7b7d884..4a4894e 100644 (file)
 
 #include "dc_dmub_srv.h"
 
+#include "yellow_carp_offset.h"
+
+#define regCLK1_CLK_PLL_REQ                    0x0237
+#define regCLK1_CLK_PLL_REQ_BASE_IDX           0
+
+#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT    0x0
+#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT   0xc
+#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT   0x10
+#define CLK1_CLK_PLL_REQ__FbMult_int_MASK      0x000001FFL
+#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK     0x0000F000L
+#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK     0xFFFF0000L
+
+#define REG(reg_name) \
+       (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+
 #define TO_CLK_MGR_DCN31(clk_mgr)\
        container_of(clk_mgr, struct clk_mgr_dcn31, base)
 
@@ -124,10 +139,10 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
         * also if safe to lower is false, we just go in the higher state
         */
        if (safe_to_lower) {
-               if (new_clocks->z9_support == DCN_Z9_SUPPORT_ALLOW &&
-                               new_clocks->z9_support != clk_mgr_base->clks.z9_support) {
+               if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_ALLOW &&
+                               new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
                        dcn31_smu_set_Z9_support(clk_mgr, true);
-                       clk_mgr_base->clks.z9_support = new_clocks->z9_support;
+                       clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
                }
 
                if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
@@ -148,10 +163,10 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
                        }
                }
        } else {
-               if (new_clocks->z9_support == DCN_Z9_SUPPORT_DISALLOW &&
-                               new_clocks->z9_support != clk_mgr_base->clks.z9_support) {
+               if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW &&
+                               new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
                        dcn31_smu_set_Z9_support(clk_mgr, false);
-                       clk_mgr_base->clks.z9_support = new_clocks->z9_support;
+                       clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
                }
 
                if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
@@ -229,7 +244,32 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
 
 static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
 {
-       return 0;
+       /* get FbMult value */
+       struct fixed31_32 pll_req;
+       unsigned int fbmult_frac_val = 0;
+       unsigned int fbmult_int_val = 0;
+
+       /*
+        * Register value of fbmult is in 8.16 format, we are converting to 31.32
+        * to leverage the fix point operations available in driver
+        */
+
+       REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
+       REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
+
+       pll_req = dc_fixpt_from_int(fbmult_int_val);
+
+       /*
+        * since fractional part is only 16 bit in register definition but is 32 bit
+        * in our fix point definiton, need to shift left by 16 to obtain correct value
+        */
+       pll_req.value |= fbmult_frac_val << 16;
+
+       /* multiply by REFCLK period */
+       pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
+
+       /* integer part is now VCO frequency in kHz */
+       return dc_fixpt_floor(pll_req);
 }
 
 static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base)
@@ -246,7 +286,7 @@ static void dcn31_init_clocks(struct clk_mgr *clk_mgr)
        clk_mgr->clks.p_state_change_support = true;
        clk_mgr->clks.prev_p_state_change_support = true;
        clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
-       clk_mgr->clks.z9_support = DCN_Z9_SUPPORT_UNKNOWN;
+       clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
 }
 
 static bool dcn31_are_clock_states_equal(struct dc_clocks *a,
@@ -260,7 +300,7 @@ static bool dcn31_are_clock_states_equal(struct dc_clocks *a,
                return false;
        else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
                return false;
-       else if (a->z9_support != b->z9_support)
+       else if (a->zstate_support != b->zstate_support)
                return false;
        else if (a->dtbclk_en != b->dtbclk_en)
                return false;
@@ -592,6 +632,7 @@ void dcn31_clk_mgr_construct(
        clk_mgr->base.dprefclk_ss_percentage = 0;
        clk_mgr->base.dprefclk_ss_divider = 1000;
        clk_mgr->base.ss_on_dprefclk = false;
+       clk_mgr->base.dfs_ref_freq_khz = 48000;
 
        clk_mgr->smu_wm_set.wm_set = (struct dcn31_watermarks *)dm_helpers_allocate_gpu_mem(
                                clk_mgr->base.base.ctx,
index cc21cf7..f8f1005 100644 (file)
 #define __DCN31_CLK_MGR_H__
 #include "clk_mgr_internal.h"
 
-//CLK1_CLK_PLL_REQ
-#ifndef CLK11_CLK1_CLK_PLL_REQ__FbMult_int__SHIFT
-#define CLK11_CLK1_CLK_PLL_REQ__FbMult_int__SHIFT                                                                   0x0
-#define CLK11_CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT                                                                  0xc
-#define CLK11_CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT                                                                  0x10
-#define CLK11_CLK1_CLK_PLL_REQ__FbMult_int_MASK                                                                     0x000001FFL
-#define CLK11_CLK1_CLK_PLL_REQ__PllSpineDiv_MASK                                                                    0x0000F000L
-#define CLK11_CLK1_CLK_PLL_REQ__FbMult_frac_MASK                                                                    0xFFFF0000L
-//CLK1_CLK0_DFS_CNTL
-#define CLK11_CLK1_CLK0_DFS_CNTL__CLK0_DIVIDER__SHIFT                                                               0x0
-#define CLK11_CLK1_CLK0_DFS_CNTL__CLK0_DIVIDER_MASK                                                                 0x0000007FL
-/*DPREF clock related*/
-#define CLK0_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT                                                               0x0
-#define CLK0_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK                                                                 0x0000007FL
-#define CLK1_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT                                                               0x0
-#define CLK1_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK                                                                 0x0000007FL
-#define CLK2_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT                                                               0x0
-#define CLK2_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK                                                                 0x0000007FL
-#define CLK3_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT                                                               0x0
-#define CLK3_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK                                                                 0x0000007FL
-
-//CLK3_0_CLK3_CLK_PLL_REQ
-#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_int__SHIFT                                                            0x0
-#define CLK3_0_CLK3_CLK_PLL_REQ__PllSpineDiv__SHIFT                                                           0xc
-#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_frac__SHIFT                                                           0x10
-#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_int_MASK                                                              0x000001FFL
-#define CLK3_0_CLK3_CLK_PLL_REQ__PllSpineDiv_MASK                                                             0x0000F000L
-#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_frac_MASK                                                             0xFFFF0000L
-
-#define mmCLK0_CLK3_DFS_CNTL                            0x16C60
-#define mmCLK00_CLK0_CLK3_DFS_CNTL                      0x16C60
-#define mmCLK01_CLK0_CLK3_DFS_CNTL                      0x16E60
-#define mmCLK02_CLK0_CLK3_DFS_CNTL                      0x17060
-#define mmCLK03_CLK0_CLK3_DFS_CNTL                      0x17260
-
-#define mmCLK0_CLK_PLL_REQ                              0x16C10
-#define mmCLK00_CLK0_CLK_PLL_REQ                        0x16C10
-#define mmCLK01_CLK0_CLK_PLL_REQ                        0x16E10
-#define mmCLK02_CLK0_CLK_PLL_REQ                        0x17010
-#define mmCLK03_CLK0_CLK_PLL_REQ                        0x17210
-
-#define mmCLK1_CLK_PLL_REQ                              0x1B00D
-#define mmCLK10_CLK1_CLK_PLL_REQ                        0x1B00D
-#define mmCLK11_CLK1_CLK_PLL_REQ                        0x1B20D
-#define mmCLK12_CLK1_CLK_PLL_REQ                        0x1B40D
-#define mmCLK13_CLK1_CLK_PLL_REQ                        0x1B60D
-
-#define mmCLK2_CLK_PLL_REQ                              0x17E0D
-
-/*AMCLK*/
-#define mmCLK11_CLK1_CLK0_DFS_CNTL                      0x1B23F
-#define mmCLK11_CLK1_CLK_PLL_REQ                        0x1B20D
-#endif
-
 struct dcn31_watermarks;
 
 struct dcn31_smu_watermark_set {
index 66db5e9..dad4a4c 100644 (file)
@@ -31,8 +31,8 @@
 #include "dcn31_smu.h"
 
 #include "yellow_carp_offset.h"
-#include "mp/mp_13_0_1_offset.h"
-#include "mp/mp_13_0_1_sh_mask.h"
+#include "mp/mp_13_0_2_offset.h"
+#include "mp/mp_13_0_2_sh_mask.h"
 
 #define REG(reg_name) \
        (MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
index b8832bd..9fb8c46 100644 (file)
@@ -1620,11 +1620,12 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train
 {
        enum dc_status status = DC_OK;
 
-       if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
-               status = configure_lttpr_mode_non_transparent(link, lt_settings);
-       else
+       if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT)
                status = configure_lttpr_mode_transparent(link);
 
+       else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+               status = configure_lttpr_mode_non_transparent(link, lt_settings);
+
        return status;
 }
 
@@ -1784,7 +1785,6 @@ bool perform_link_training_with_retries(
                link_enc = stream->link_enc;
        else
                link_enc = link->link_enc;
-       ASSERT(link_enc);
 
        /* We need to do this before the link training to ensure the idle pattern in SST
         * mode will be sent right after the link training
@@ -1820,8 +1820,7 @@ bool perform_link_training_with_retries(
                                         */
                                        panel_mode = DP_PANEL_MODE_DEFAULT;
                                }
-                       } else
-                               panel_mode = DP_PANEL_MODE_DEFAULT;
+                       }
                }
 #endif
 
@@ -4650,7 +4649,10 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
                }
        }
 
-       if (link->dpcd_caps.panel_mode_edp) {
+       if (link->dpcd_caps.panel_mode_edp &&
+               (link->connector_signal == SIGNAL_TYPE_EDP ||
+                (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+                 link->is_internal_display))) {
                return DP_PANEL_MODE_EDP;
        }
 
@@ -4914,9 +4916,7 @@ bool dc_link_set_default_brightness_aux(struct dc_link *link)
 {
        uint32_t default_backlight;
 
-       if (link &&
-               (link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
-               link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
+       if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
                if (!dc_link_read_default_bl_aux(link, &default_backlight))
                        default_backlight = 150000;
                // if < 5 nits or > 5000, it might be wrong readback
index a6a6724..1596f6b 100644 (file)
@@ -1062,7 +1062,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
         * so use only 30 bpp on DCE_VERSION_11_0. Testing with DCE 11.2 and 8.3
         * did not show such problems, so this seems to be the exception.
         */
-       if (plane_state->ctx->dce_version != DCE_VERSION_11_0)
+       if (plane_state->ctx->dce_version > DCE_VERSION_11_0)
                pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
        else
                pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
index 45640f1..8dcea8f 100644 (file)
@@ -354,10 +354,10 @@ enum dcn_pwr_state {
 };
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-enum dcn_z9_support_state {
-       DCN_Z9_SUPPORT_UNKNOWN,
-       DCN_Z9_SUPPORT_ALLOW,
-       DCN_Z9_SUPPORT_DISALLOW,
+enum dcn_zstate_support_state {
+       DCN_ZSTATE_SUPPORT_UNKNOWN,
+       DCN_ZSTATE_SUPPORT_ALLOW,
+       DCN_ZSTATE_SUPPORT_DISALLOW,
 };
 #endif
 /*
@@ -378,7 +378,7 @@ struct dc_clocks {
        int dramclk_khz;
        bool p_state_change_support;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-       enum dcn_z9_support_state z9_support;
+       enum dcn_zstate_support_state zstate_support;
        bool dtbclk_en;
 #endif
        enum dcn_pwr_state pwr_state;
index df6539e..0464a8f 100644 (file)
@@ -636,6 +636,7 @@ struct dce_hwseq_registers {
        uint32_t ODM_MEM_PWR_CTRL3;
        uint32_t DMU_MEM_PWR_CNTL;
        uint32_t MMHUBBUB_MEM_PWR_CNTL;
+       uint32_t DCHUBBUB_ARB_HOSTVM_CNTL;
 };
  /* set field name */
 #define HWS_SF(blk_name, reg_name, field_name, post_fix)\
@@ -1110,7 +1111,8 @@ struct dce_hwseq_registers {
        type DOMAIN_POWER_FORCEON;\
        type DOMAIN_POWER_GATE;\
        type DOMAIN_PGFSM_PWR_STATUS;\
-       type HPO_HDMISTREAMCLK_G_GATE_DIS;
+       type HPO_HDMISTREAMCLK_G_GATE_DIS;\
+       type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;
 
 struct dce_hwseq_shift {
        HWSEQ_REG_FIELD_LIST(uint8_t)
index 673b93f..cb9767d 100644 (file)
@@ -217,6 +217,8 @@ static void dpp1_dscl_set_lb(
        const struct line_buffer_params *lb_params,
        enum lb_memory_config mem_size_config)
 {
+       uint32_t max_partitions = 63; /* Currently hardcoded on all ASICs before DCN 3.2 */
+
        /* LB */
        if (dpp->base.caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
                /* DSCL caps: pixel data processed in fixed format */
@@ -239,9 +241,12 @@ static void dpp1_dscl_set_lb(
                        LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
        }
 
+       if (dpp->base.caps->max_lb_partitions == 31)
+               max_partitions = 31;
+
        REG_SET_2(LB_MEMORY_CTRL, 0,
                MEMORY_CONFIG, mem_size_config,
-               LB_MAX_PARTITIONS, 63);
+               LB_MAX_PARTITIONS, max_partitions);
 }
 
 static const uint16_t *dpp1_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio)
index 1b05a37..b173fa3 100644 (file)
@@ -2093,8 +2093,10 @@ int dcn20_populate_dml_pipes_from_context(
                                - timing->v_border_bottom;
                pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
                pipes[pipe_cnt].pipe.dest.vtotal = v_total;
-               pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;
-               pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;
+               pipes[pipe_cnt].pipe.dest.hactive =
+                       timing->h_addressable + timing->h_border_left + timing->h_border_right;
+               pipes[pipe_cnt].pipe.dest.vactive =
+                       timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
                pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
                pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
                if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
@@ -3079,6 +3081,37 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
        return false;
 }
 
+static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struct dc_state *context)
+{
+       int plane_count;
+       int i;
+
+       plane_count = 0;
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               if (context->res_ctx.pipe_ctx[i].plane_state)
+                       plane_count++;
+       }
+
+       /*
+        * Zstate is allowed in following scenarios:
+        *      1. Single eDP with PSR enabled
+        *      2. 0 planes (No memory requests)
+        *      3. Single eDP without PSR but > 5ms stutter period
+        */
+       if (plane_count == 0)
+               return DCN_ZSTATE_SUPPORT_ALLOW;
+       else if (context->stream_count == 1 &&  context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+               struct dc_link *link = context->streams[0]->sink->link;
+
+               if ((link->link_index == 0 && link->psr_settings.psr_feature_enabled)
+                               || context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
+                       return DCN_ZSTATE_SUPPORT_ALLOW;
+               else
+                       return DCN_ZSTATE_SUPPORT_DISALLOW;
+       } else
+               return DCN_ZSTATE_SUPPORT_DISALLOW;
+}
+
 void dcn20_calculate_dlg_params(
                struct dc *dc, struct dc_state *context,
                display_e2e_pipe_params_st *pipes,
@@ -3086,7 +3119,6 @@ void dcn20_calculate_dlg_params(
                int vlevel)
 {
        int i, pipe_idx;
-       int plane_count;
 
        /* Writeback MCIF_WB arbitration parameters */
        dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
@@ -3102,17 +3134,7 @@ void dcn20_calculate_dlg_params(
                                                        != dm_dram_clock_change_unsupported;
        context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
 
-       context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ?
-                       DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW;
-
-       plane_count = 0;
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               if (context->res_ctx.pipe_ctx[i].plane_state)
-                       plane_count++;
-       }
-
-       if (plane_count == 0)
-               context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW;
+       context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context);
 
        context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
 
index f3d98e3..bf0a198 100644 (file)
@@ -109,6 +109,7 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = {
        .max_page_table_levels = 4,
        .pte_chunk_size_kbytes = 2,
        .meta_chunk_size_kbytes = 2,
+       .min_meta_chunk_size_bytes = 256,
        .writeback_chunk_size_kbytes = 2,
        .line_buffer_size_bits = 789504,
        .is_line_buffer_bpp_fixed = 0,
index 2140b75..23a52d4 100644 (file)
@@ -383,13 +383,6 @@ bool dpp3_get_optimal_number_of_taps(
        int min_taps_y, min_taps_c;
        enum lb_memory_config lb_config;
 
-       /* Some ASICs does not support  FP16 scaling, so we reject modes require this*/
-       if (scl_data->viewport.width  != scl_data->h_active &&
-               scl_data->viewport.height != scl_data->v_active &&
-               dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
-               scl_data->format == PIXEL_FORMAT_FP16)
-               return false;
-
        if (scl_data->viewport.width > scl_data->h_active &&
                dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
                scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
@@ -1440,15 +1433,6 @@ bool dpp3_construct(
        dpp->tf_shift = tf_shift;
        dpp->tf_mask = tf_mask;
 
-       dpp->lb_pixel_depth_supported =
-               LB_PIXEL_DEPTH_18BPP |
-               LB_PIXEL_DEPTH_24BPP |
-               LB_PIXEL_DEPTH_30BPP |
-               LB_PIXEL_DEPTH_36BPP;
-
-       dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
-       dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
-
        return true;
 }
 
index 3fa86cd..ac644ae 100644 (file)
        SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \
        SRI(CURSOR_CONTROL, CURSOR0_, id),\
        SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\
+       SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \
        SRI(DSCL_MEM_PWR_CTRL, DSCL, id)
 
 #define DPP_REG_LIST_DCN30(id)\
        SRI(CM_SHAPER_LUT_DATA, CM, id),\
        SRI(CM_MEM_PWR_CTRL2, CM, id), \
        SRI(CM_MEM_PWR_STATUS2, CM, id), \
-       SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \
-       SRI(DSCL_MEM_PWR_CTRL, DSCL, id), \
        SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM, id),\
        SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM, id),\
        SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM, id),\
index 16a75ba..7d3ff5d 100644 (file)
@@ -1398,11 +1398,18 @@ void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
                        dcn3_02_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
                        dcn3_02_soc.clock_limits[i].dppclk_mhz  = max_dppclk_mhz;
                        dcn3_02_soc.clock_limits[i].phyclk_mhz  = max_phyclk_mhz;
-                       dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[0].dtbclk_mhz;
+                       /* Populate from bw_params for DTBCLK, SOCCLK */
+                       if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0)
+                               dcn3_02_soc.clock_limits[i].dtbclk_mhz  = dcn3_02_soc.clock_limits[i-1].dtbclk_mhz;
+                       else
+                               dcn3_02_soc.clock_limits[i].dtbclk_mhz  = bw_params->clk_table.entries[i].dtbclk_mhz;
+                       if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
+                               dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[i-1].socclk_mhz;
+                       else
+                               dcn3_02_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
                        /* These clocks cannot come from bw_params, always fill from dcn3_02_soc[1] */
-                       /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
+                       /* FCLK, PHYCLK_D18, DSCCLK */
                        dcn3_02_soc.clock_limits[i].phyclk_d18_mhz = dcn3_02_soc.clock_limits[0].phyclk_d18_mhz;
-                       dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[0].socclk_mhz;
                        dcn3_02_soc.clock_limits[i].dscclk_mhz = dcn3_02_soc.clock_limits[0].dscclk_mhz;
                }
                /* re-init DML with updated bb */
index 34b8946..833ab13 100644 (file)
@@ -1326,11 +1326,18 @@ void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
                        dcn3_03_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
                        dcn3_03_soc.clock_limits[i].dppclk_mhz  = max_dppclk_mhz;
                        dcn3_03_soc.clock_limits[i].phyclk_mhz  = max_phyclk_mhz;
-                       dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[0].dtbclk_mhz;
+                       /* Populate from bw_params for DTBCLK, SOCCLK */
+                       if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0)
+                               dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[i-1].dtbclk_mhz;
+                       else
+                               dcn3_03_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
+                       if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
+                               dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[i-1].socclk_mhz;
+                       else
+                               dcn3_03_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
                        /* These clocks cannot come from bw_params, always fill from dcn3_03_soc[1] */
-                       /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
+                       /* FCLK, PHYCLK_D18, DSCCLK */
                        dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz;
-                       dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[0].socclk_mhz;
                        dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz;
                }
                /* re-init DML with updated bb */
index fc1fc1a..6ac6faf 100644 (file)
@@ -47,6 +47,7 @@
 #include "dce/dmub_outbox.h"
 #include "dc_link_dp.h"
 #include "inc/link_dpcd.h"
+#include "dcn10/dcn10_hw_sequencer.h"
 
 #define DC_LOGGER_INIT(logger)
 
@@ -390,7 +391,7 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
        is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal);
        is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
 
-       if (!is_hdmi_tmds)
+       if (!is_hdmi_tmds && !is_dp)
                return;
 
        if (is_hdmi_tmds)
@@ -594,3 +595,20 @@ bool dcn31_is_abm_supported(struct dc *dc,
        }
        return false;
 }
+
+static void apply_riommu_invalidation_wa(struct dc *dc)
+{
+       struct dce_hwseq *hws = dc->hwseq;
+
+       if (!hws->wa.early_riommu_invalidation)
+               return;
+
+       REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, 0);
+}
+
+void dcn31_init_pipes(struct dc *dc, struct dc_state *context)
+{
+       dcn10_init_pipes(dc, context);
+       apply_riommu_invalidation_wa(dc);
+
+}
index ff72f0f..40dfebe 100644 (file)
@@ -52,5 +52,6 @@ void dcn31_reset_hw_ctx_wrap(
                struct dc_state *context);
 bool dcn31_is_abm_supported(struct dc *dc,
                struct dc_state *context, struct dc_stream_state *stream);
+void dcn31_init_pipes(struct dc *dc, struct dc_state *context);
 
 #endif /* __DC_HWSS_DCN31_H__ */
index e3048f8..aaf2dbd 100644 (file)
@@ -93,7 +93,6 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
        .set_flip_control_gsl = dcn20_set_flip_control_gsl,
        .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
        .calc_vupdate_position = dcn10_calc_vupdate_position,
-       .apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations,
        .set_backlight_level = dcn21_set_backlight_level,
        .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
        .set_pipe = dcn21_set_pipe,
@@ -104,7 +103,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
 };
 
 static const struct hwseq_private_funcs dcn31_private_funcs = {
-       .init_pipes = dcn10_init_pipes,
+       .init_pipes = dcn31_init_pipes,
        .update_plane_addr = dcn20_update_plane_addr,
        .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
        .update_mpcc = dcn20_update_mpcc,
index c67bc95..38c010a 100644 (file)
@@ -220,6 +220,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
        .sr_exit_z8_time_us = 402.0,
        .sr_enter_plus_exit_z8_time_us = 520.0,
        .writeback_latency_us = 12.0,
+       .dram_channel_width_bytes = 4,
        .round_trip_ping_latency_dcfclk_cycles = 106,
        .urgent_latency_pixel_data_only_us = 4.0,
        .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -741,6 +742,7 @@ static const struct dccg_mask dccg_mask = {
 
 #define HWSEQ_DCN31_REG_LIST()\
        SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
+       SR(DCHUBBUB_ARB_HOSTVM_CNTL), \
        SR(DIO_MEM_PWR_CTRL), \
        SR(ODM_MEM_PWR_CTRL3), \
        SR(DMU_MEM_PWR_CNTL), \
@@ -801,6 +803,7 @@ static const struct dce_hwseq_registers hwseq_reg = {
 #define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\
        HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
        HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
+       HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \
        HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
        HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \
        HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \
@@ -1299,6 +1302,7 @@ static struct dce_hwseq *dcn31_hwseq_create(
                hws->regs = &hwseq_reg;
                hws->shifts = &hwseq_shift;
                hws->masks = &hwseq_mask;
+               hws->wa.early_riommu_invalidation = true;
        }
        return hws;
 }
index c26e742..6655bb9 100644 (file)
@@ -841,6 +841,9 @@ static bool CalculatePrefetchSchedule(
        else
                *DestinationLinesForPrefetch = dst_y_prefetch_equ;
 
+       // Limit to prevent overflow in DST_Y_PREFETCH register
+       *DestinationLinesForPrefetch = dml_min(*DestinationLinesForPrefetch, 63.75);
+
        dml_print("DML: VStartup: %d\n", VStartup);
        dml_print("DML: TCalc: %f\n", TCalc);
        dml_print("DML: TWait: %f\n", TWait);
@@ -4889,7 +4892,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                                }
                        } while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true)
                                        && (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup[0][0]
-                                               || mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode));
+                                               || mode_lib->vba.NextPrefetchMode <= mode_lib->vba.MaxPrefetchMode));
 
                        if (locals->PrefetchSupported[i][j] == true && locals->VRatioInPrefetchSupported[i][j] == true) {
                                mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i][0];
index 2a0db2b..9ac9d5e 100644 (file)
@@ -289,6 +289,9 @@ struct dpp_caps {
        /* DSCL processing pixel data in fixed or float format */
        enum dscl_data_processing_format dscl_data_proc_format;
 
+       /* max LB partitions */
+       unsigned int max_lb_partitions;
+
        /* Calculates the number of partitions in the line buffer.
         * The implementation of this function is overloaded for
         * different versions of DSCL LB.
index f7f7e4f..082549f 100644 (file)
@@ -41,6 +41,7 @@ struct dce_hwseq_wa {
        bool DEGVIDCN10_254;
        bool DEGVIDCN21;
        bool disallow_self_refresh_during_multi_plane_transition;
+       bool early_riommu_invalidation;
 };
 
 struct hwseq_wa_state {
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h
deleted file mode 100644 (file)
index dfacc6b..0000000
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright 2020 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- */
-#ifndef _mp_13_0_1_OFFSET_HEADER
-#define _mp_13_0_1_OFFSET_HEADER
-
-
-
-// addressBlock: mp_SmuMp0_SmnDec
-// base address: 0x0
-#define regMP0_SMN_C2PMSG_32                                                                            0x0060
-#define regMP0_SMN_C2PMSG_32_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_33                                                                            0x0061
-#define regMP0_SMN_C2PMSG_33_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_34                                                                            0x0062
-#define regMP0_SMN_C2PMSG_34_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_35                                                                            0x0063
-#define regMP0_SMN_C2PMSG_35_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_36                                                                            0x0064
-#define regMP0_SMN_C2PMSG_36_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_37                                                                            0x0065
-#define regMP0_SMN_C2PMSG_37_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_38                                                                            0x0066
-#define regMP0_SMN_C2PMSG_38_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_39                                                                            0x0067
-#define regMP0_SMN_C2PMSG_39_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_40                                                                            0x0068
-#define regMP0_SMN_C2PMSG_40_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_41                                                                            0x0069
-#define regMP0_SMN_C2PMSG_41_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_42                                                                            0x006a
-#define regMP0_SMN_C2PMSG_42_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_43                                                                            0x006b
-#define regMP0_SMN_C2PMSG_43_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_44                                                                            0x006c
-#define regMP0_SMN_C2PMSG_44_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_45                                                                            0x006d
-#define regMP0_SMN_C2PMSG_45_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_46                                                                            0x006e
-#define regMP0_SMN_C2PMSG_46_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_47                                                                            0x006f
-#define regMP0_SMN_C2PMSG_47_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_48                                                                            0x0070
-#define regMP0_SMN_C2PMSG_48_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_49                                                                            0x0071
-#define regMP0_SMN_C2PMSG_49_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_50                                                                            0x0072
-#define regMP0_SMN_C2PMSG_50_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_51                                                                            0x0073
-#define regMP0_SMN_C2PMSG_51_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_52                                                                            0x0074
-#define regMP0_SMN_C2PMSG_52_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_53                                                                            0x0075
-#define regMP0_SMN_C2PMSG_53_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_54                                                                            0x0076
-#define regMP0_SMN_C2PMSG_54_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_55                                                                            0x0077
-#define regMP0_SMN_C2PMSG_55_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_56                                                                            0x0078
-#define regMP0_SMN_C2PMSG_56_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_57                                                                            0x0079
-#define regMP0_SMN_C2PMSG_57_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_58                                                                            0x007a
-#define regMP0_SMN_C2PMSG_58_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_59                                                                            0x007b
-#define regMP0_SMN_C2PMSG_59_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_60                                                                            0x007c
-#define regMP0_SMN_C2PMSG_60_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_61                                                                            0x007d
-#define regMP0_SMN_C2PMSG_61_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_62                                                                            0x007e
-#define regMP0_SMN_C2PMSG_62_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_63                                                                            0x007f
-#define regMP0_SMN_C2PMSG_63_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_64                                                                            0x0080
-#define regMP0_SMN_C2PMSG_64_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_65                                                                            0x0081
-#define regMP0_SMN_C2PMSG_65_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_66                                                                            0x0082
-#define regMP0_SMN_C2PMSG_66_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_67                                                                            0x0083
-#define regMP0_SMN_C2PMSG_67_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_68                                                                            0x0084
-#define regMP0_SMN_C2PMSG_68_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_69                                                                            0x0085
-#define regMP0_SMN_C2PMSG_69_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_70                                                                            0x0086
-#define regMP0_SMN_C2PMSG_70_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_71                                                                            0x0087
-#define regMP0_SMN_C2PMSG_71_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_72                                                                            0x0088
-#define regMP0_SMN_C2PMSG_72_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_73                                                                            0x0089
-#define regMP0_SMN_C2PMSG_73_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_74                                                                            0x008a
-#define regMP0_SMN_C2PMSG_74_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_75                                                                            0x008b
-#define regMP0_SMN_C2PMSG_75_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_76                                                                            0x008c
-#define regMP0_SMN_C2PMSG_76_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_77                                                                            0x008d
-#define regMP0_SMN_C2PMSG_77_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_78                                                                            0x008e
-#define regMP0_SMN_C2PMSG_78_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_79                                                                            0x008f
-#define regMP0_SMN_C2PMSG_79_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_80                                                                            0x0090
-#define regMP0_SMN_C2PMSG_80_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_81                                                                            0x0091
-#define regMP0_SMN_C2PMSG_81_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_82                                                                            0x0092
-#define regMP0_SMN_C2PMSG_82_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_83                                                                            0x0093
-#define regMP0_SMN_C2PMSG_83_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_84                                                                            0x0094
-#define regMP0_SMN_C2PMSG_84_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_85                                                                            0x0095
-#define regMP0_SMN_C2PMSG_85_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_86                                                                            0x0096
-#define regMP0_SMN_C2PMSG_86_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_87                                                                            0x0097
-#define regMP0_SMN_C2PMSG_87_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_88                                                                            0x0098
-#define regMP0_SMN_C2PMSG_88_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_89                                                                            0x0099
-#define regMP0_SMN_C2PMSG_89_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_90                                                                            0x009a
-#define regMP0_SMN_C2PMSG_90_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_91                                                                            0x009b
-#define regMP0_SMN_C2PMSG_91_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_92                                                                            0x009c
-#define regMP0_SMN_C2PMSG_92_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_93                                                                            0x009d
-#define regMP0_SMN_C2PMSG_93_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_94                                                                            0x009e
-#define regMP0_SMN_C2PMSG_94_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_95                                                                            0x009f
-#define regMP0_SMN_C2PMSG_95_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_96                                                                            0x00a0
-#define regMP0_SMN_C2PMSG_96_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_97                                                                            0x00a1
-#define regMP0_SMN_C2PMSG_97_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_98                                                                            0x00a2
-#define regMP0_SMN_C2PMSG_98_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_99                                                                            0x00a3
-#define regMP0_SMN_C2PMSG_99_BASE_IDX                                                                   0
-#define regMP0_SMN_C2PMSG_100                                                                           0x00a4
-#define regMP0_SMN_C2PMSG_100_BASE_IDX                                                                  0
-#define regMP0_SMN_C2PMSG_101                                                                           0x00a5
-#define regMP0_SMN_C2PMSG_101_BASE_IDX                                                                  0
-#define regMP0_SMN_C2PMSG_102                                                                           0x00a6
-#define regMP0_SMN_C2PMSG_102_BASE_IDX                                                                  0
-#define regMP0_SMN_C2PMSG_103                                                                           0x00a7
-#define regMP0_SMN_C2PMSG_103_BASE_IDX                                                                  0
-#define regMP0_SMN_IH_CREDIT                                                                            0x00c1
-#define regMP0_SMN_IH_CREDIT_BASE_IDX                                                                   0
-#define regMP0_SMN_IH_SW_INT                                                                            0x00c2
-#define regMP0_SMN_IH_SW_INT_BASE_IDX                                                                   0
-#define regMP0_SMN_IH_SW_INT_CTRL                                                                       0x00c3
-#define regMP0_SMN_IH_SW_INT_CTRL_BASE_IDX                                                              0
-
-
-// addressBlock: mp_SmuMp1_SmnDec
-// base address: 0x0
-#define regMP1_SMN_C2PMSG_32                                                                            0x0260
-#define regMP1_SMN_C2PMSG_32_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_33                                                                            0x0261
-#define regMP1_SMN_C2PMSG_33_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_34                                                                            0x0262
-#define regMP1_SMN_C2PMSG_34_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_35                                                                            0x0263
-#define regMP1_SMN_C2PMSG_35_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_36                                                                            0x0264
-#define regMP1_SMN_C2PMSG_36_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_37                                                                            0x0265
-#define regMP1_SMN_C2PMSG_37_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_38                                                                            0x0266
-#define regMP1_SMN_C2PMSG_38_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_39                                                                            0x0267
-#define regMP1_SMN_C2PMSG_39_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_40                                                                            0x0268
-#define regMP1_SMN_C2PMSG_40_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_41                                                                            0x0269
-#define regMP1_SMN_C2PMSG_41_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_42                                                                            0x026a
-#define regMP1_SMN_C2PMSG_42_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_43                                                                            0x026b
-#define regMP1_SMN_C2PMSG_43_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_44                                                                            0x026c
-#define regMP1_SMN_C2PMSG_44_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_45                                                                            0x026d
-#define regMP1_SMN_C2PMSG_45_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_46                                                                            0x026e
-#define regMP1_SMN_C2PMSG_46_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_47                                                                            0x026f
-#define regMP1_SMN_C2PMSG_47_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_48                                                                            0x0270
-#define regMP1_SMN_C2PMSG_48_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_49                                                                            0x0271
-#define regMP1_SMN_C2PMSG_49_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_50                                                                            0x0272
-#define regMP1_SMN_C2PMSG_50_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_51                                                                            0x0273
-#define regMP1_SMN_C2PMSG_51_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_52                                                                            0x0274
-#define regMP1_SMN_C2PMSG_52_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_53                                                                            0x0275
-#define regMP1_SMN_C2PMSG_53_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_54                                                                            0x0276
-#define regMP1_SMN_C2PMSG_54_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_55                                                                            0x0277
-#define regMP1_SMN_C2PMSG_55_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_56                                                                            0x0278
-#define regMP1_SMN_C2PMSG_56_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_57                                                                            0x0279
-#define regMP1_SMN_C2PMSG_57_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_58                                                                            0x027a
-#define regMP1_SMN_C2PMSG_58_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_59                                                                            0x027b
-#define regMP1_SMN_C2PMSG_59_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_60                                                                            0x027c
-#define regMP1_SMN_C2PMSG_60_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_61                                                                            0x027d
-#define regMP1_SMN_C2PMSG_61_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_62                                                                            0x027e
-#define regMP1_SMN_C2PMSG_62_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_63                                                                            0x027f
-#define regMP1_SMN_C2PMSG_63_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_64                                                                            0x0280
-#define regMP1_SMN_C2PMSG_64_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_65                                                                            0x0281
-#define regMP1_SMN_C2PMSG_65_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_66                                                                            0x0282
-#define regMP1_SMN_C2PMSG_66_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_67                                                                            0x0283
-#define regMP1_SMN_C2PMSG_67_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_68                                                                            0x0284
-#define regMP1_SMN_C2PMSG_68_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_69                                                                            0x0285
-#define regMP1_SMN_C2PMSG_69_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_70                                                                            0x0286
-#define regMP1_SMN_C2PMSG_70_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_71                                                                            0x0287
-#define regMP1_SMN_C2PMSG_71_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_72                                                                            0x0288
-#define regMP1_SMN_C2PMSG_72_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_73                                                                            0x0289
-#define regMP1_SMN_C2PMSG_73_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_74                                                                            0x028a
-#define regMP1_SMN_C2PMSG_74_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_75                                                                            0x028b
-#define regMP1_SMN_C2PMSG_75_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_76                                                                            0x028c
-#define regMP1_SMN_C2PMSG_76_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_77                                                                            0x028d
-#define regMP1_SMN_C2PMSG_77_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_78                                                                            0x028e
-#define regMP1_SMN_C2PMSG_78_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_79                                                                            0x028f
-#define regMP1_SMN_C2PMSG_79_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_80                                                                            0x0290
-#define regMP1_SMN_C2PMSG_80_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_81                                                                            0x0291
-#define regMP1_SMN_C2PMSG_81_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_82                                                                            0x0292
-#define regMP1_SMN_C2PMSG_82_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_83                                                                            0x0293
-#define regMP1_SMN_C2PMSG_83_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_84                                                                            0x0294
-#define regMP1_SMN_C2PMSG_84_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_85                                                                            0x0295
-#define regMP1_SMN_C2PMSG_85_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_86                                                                            0x0296
-#define regMP1_SMN_C2PMSG_86_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_87                                                                            0x0297
-#define regMP1_SMN_C2PMSG_87_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_88                                                                            0x0298
-#define regMP1_SMN_C2PMSG_88_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_89                                                                            0x0299
-#define regMP1_SMN_C2PMSG_89_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_90                                                                            0x029a
-#define regMP1_SMN_C2PMSG_90_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_91                                                                            0x029b
-#define regMP1_SMN_C2PMSG_91_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_92                                                                            0x029c
-#define regMP1_SMN_C2PMSG_92_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_93                                                                            0x029d
-#define regMP1_SMN_C2PMSG_93_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_94                                                                            0x029e
-#define regMP1_SMN_C2PMSG_94_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_95                                                                            0x029f
-#define regMP1_SMN_C2PMSG_95_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_96                                                                            0x02a0
-#define regMP1_SMN_C2PMSG_96_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_97                                                                            0x02a1
-#define regMP1_SMN_C2PMSG_97_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_98                                                                            0x02a2
-#define regMP1_SMN_C2PMSG_98_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_99                                                                            0x02a3
-#define regMP1_SMN_C2PMSG_99_BASE_IDX                                                                   0
-#define regMP1_SMN_C2PMSG_100                                                                           0x02a4
-#define regMP1_SMN_C2PMSG_100_BASE_IDX                                                                  0
-#define regMP1_SMN_C2PMSG_101                                                                           0x02a5
-#define regMP1_SMN_C2PMSG_101_BASE_IDX                                                                  0
-#define regMP1_SMN_C2PMSG_102                                                                           0x02a6
-#define regMP1_SMN_C2PMSG_102_BASE_IDX                                                                  0
-#define regMP1_SMN_C2PMSG_103                                                                           0x02a7
-#define regMP1_SMN_C2PMSG_103_BASE_IDX                                                                  0
-#define regMP1_SMN_IH_CREDIT                                                                            0x02c1
-#define regMP1_SMN_IH_CREDIT_BASE_IDX                                                                   0
-#define regMP1_SMN_IH_SW_INT                                                                            0x02c2
-#define regMP1_SMN_IH_SW_INT_BASE_IDX                                                                   0
-#define regMP1_SMN_IH_SW_INT_CTRL                                                                       0x02c3
-#define regMP1_SMN_IH_SW_INT_CTRL_BASE_IDX                                                              0
-#define regMP1_SMN_FPS_CNT                                                                              0x02c4
-#define regMP1_SMN_FPS_CNT_BASE_IDX                                                                     0
-#define regMP1_SMN_EXT_SCRATCH0                                                                         0x0340
-#define regMP1_SMN_EXT_SCRATCH0_BASE_IDX                                                                0
-#define regMP1_SMN_EXT_SCRATCH1                                                                         0x0341
-#define regMP1_SMN_EXT_SCRATCH1_BASE_IDX                                                                0
-#define regMP1_SMN_EXT_SCRATCH2                                                                         0x0342
-#define regMP1_SMN_EXT_SCRATCH2_BASE_IDX                                                                0
-#define regMP1_SMN_EXT_SCRATCH3                                                                         0x0343
-#define regMP1_SMN_EXT_SCRATCH3_BASE_IDX                                                                0
-#define regMP1_SMN_EXT_SCRATCH4                                                                         0x0344
-#define regMP1_SMN_EXT_SCRATCH4_BASE_IDX                                                                0
-#define regMP1_SMN_EXT_SCRATCH5                                                                         0x0345
-#define regMP1_SMN_EXT_SCRATCH5_BASE_IDX                                                                0
-#define regMP1_SMN_EXT_SCRATCH6                                                                         0x0346
-#define regMP1_SMN_EXT_SCRATCH6_BASE_IDX                                                                0
-#define regMP1_SMN_EXT_SCRATCH7                                                                         0x0347
-#define regMP1_SMN_EXT_SCRATCH7_BASE_IDX                                                                0
-
-
-#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h
deleted file mode 100644 (file)
index 2d5e8b5..0000000
+++ /dev/null
@@ -1,531 +0,0 @@
-/*
- * Copyright 2020 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- */
-#ifndef _mp_13_0_1_SH_MASK_HEADER
-#define _mp_13_0_1_SH_MASK_HEADER
-
-
-// addressBlock: mp_SmuMp0_SmnDec
-//MP0_SMN_C2PMSG_32
-#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_32__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_33
-#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_33__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_34
-#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_34__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_35
-#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_35__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_36
-#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_36__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_37
-#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_37__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_38
-#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_38__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_39
-#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_39__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_40
-#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_40__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_41
-#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_41__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_42
-#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_42__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_43
-#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_43__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_44
-#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_44__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_45
-#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_45__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_46
-#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_46__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_47
-#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_47__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_48
-#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_48__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_49
-#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_49__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_50
-#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_50__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_51
-#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_51__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_52
-#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_52__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_53
-#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_53__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_54
-#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_54__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_55
-#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_55__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_56
-#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_56__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_57
-#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_57__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_58
-#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_58__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_59
-#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_59__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_60
-#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_60__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_61
-#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_61__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_62
-#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_62__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_63
-#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_63__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_64
-#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_64__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_65
-#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_65__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_66
-#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_66__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_67
-#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_67__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_68
-#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_68__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_69
-#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_69__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_70
-#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_70__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_71
-#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_71__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_72
-#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_72__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_73
-#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_73__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_74
-#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_74__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_75
-#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_75__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_76
-#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_76__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_77
-#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_77__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_78
-#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_78__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_79
-#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_79__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_80
-#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_80__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_81
-#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_81__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_82
-#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_82__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_83
-#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_83__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_84
-#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_84__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_85
-#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_85__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_86
-#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_86__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_87
-#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_87__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_88
-#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_88__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_89
-#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_89__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_90
-#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_90__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_91
-#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_91__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_92
-#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_92__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_93
-#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_93__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_94
-#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_94__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_95
-#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_95__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_96
-#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_96__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_97
-#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_97__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_98
-#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_98__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_99
-#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT                                                                     0x0
-#define MP0_SMN_C2PMSG_99__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP0_SMN_C2PMSG_100
-#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT                                                                    0x0
-#define MP0_SMN_C2PMSG_100__CONTENT_MASK                                                                      0xFFFFFFFFL
-//MP0_SMN_C2PMSG_101
-#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT                                                                    0x0
-#define MP0_SMN_C2PMSG_101__CONTENT_MASK                                                                      0xFFFFFFFFL
-//MP0_SMN_C2PMSG_102
-#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT                                                                    0x0
-#define MP0_SMN_C2PMSG_102__CONTENT_MASK                                                                      0xFFFFFFFFL
-//MP0_SMN_C2PMSG_103
-#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT                                                                    0x0
-#define MP0_SMN_C2PMSG_103__CONTENT_MASK                                                                      0xFFFFFFFFL
-//MP0_SMN_IH_CREDIT
-#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT                                                                0x0
-#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT                                                                   0x10
-#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK                                                                  0x00000003L
-#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK                                                                     0x00FF0000L
-//MP0_SMN_IH_SW_INT
-#define MP0_SMN_IH_SW_INT__ID__SHIFT                                                                          0x0
-#define MP0_SMN_IH_SW_INT__VALID__SHIFT                                                                       0x8
-#define MP0_SMN_IH_SW_INT__ID_MASK                                                                            0x000000FFL
-#define MP0_SMN_IH_SW_INT__VALID_MASK                                                                         0x00000100L
-//MP0_SMN_IH_SW_INT_CTRL
-#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT                                                               0x0
-#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT                                                                0x8
-#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK_MASK                                                                 0x00000001L
-#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK_MASK                                                                  0x00000100L
-
-
-// addressBlock: mp_SmuMp1Pub_CruDec
-//MP1_FIRMWARE_FLAGS
-#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT                                                         0x0
-#define MP1_FIRMWARE_FLAGS__RESERVED__SHIFT                                                                   0x1
-#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK                                                           0x00000001L
-#define MP1_FIRMWARE_FLAGS__RESERVED_MASK                                                                     0xFFFFFFFEL
-
-
-// addressBlock: mp_SmuMp1_SmnDec
-//MP1_SMN_C2PMSG_32
-#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_32__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_33
-#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_33__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_34
-#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_34__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_35
-#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_35__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_36
-#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_36__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_37
-#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_37__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_38
-#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_38__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_39
-#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_39__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_40
-#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_40__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_41
-#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_41__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_42
-#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_42__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_43
-#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_43__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_44
-#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_44__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_45
-#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_45__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_46
-#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_46__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_47
-#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_47__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_48
-#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_48__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_49
-#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_49__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_50
-#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_50__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_51
-#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_51__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_52
-#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_52__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_53
-#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_53__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_54
-#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_54__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_55
-#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_55__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_56
-#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_56__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_57
-#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_57__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_58
-#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_58__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_59
-#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_59__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_60
-#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_60__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_61
-#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_61__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_62
-#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_62__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_63
-#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_63__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_64
-#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_64__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_65
-#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_65__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_66
-#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_66__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_67
-#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_67__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_68
-#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_68__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_69
-#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_69__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_70
-#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_70__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_71
-#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_71__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_72
-#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_72__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_73
-#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_73__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_74
-#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_74__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_75
-#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_75__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_76
-#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_76__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_77
-#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_77__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_78
-#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_78__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_79
-#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_79__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_80
-#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_80__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_81
-#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_81__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_82
-#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_82__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_83
-#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_83__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_84
-#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_84__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_85
-#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_85__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_86
-#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_86__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_87
-#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_87__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_88
-#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_88__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_89
-#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_89__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_90
-#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_90__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_91
-#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_91__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_92
-#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_92__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_93
-#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_93__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_94
-#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_94__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_95
-#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_95__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_96
-#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_96__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_97
-#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_97__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_98
-#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_98__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_99
-#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT                                                                     0x0
-#define MP1_SMN_C2PMSG_99__CONTENT_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_C2PMSG_100
-#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT                                                                    0x0
-#define MP1_SMN_C2PMSG_100__CONTENT_MASK                                                                      0xFFFFFFFFL
-//MP1_SMN_C2PMSG_101
-#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT                                                                    0x0
-#define MP1_SMN_C2PMSG_101__CONTENT_MASK                                                                      0xFFFFFFFFL
-//MP1_SMN_C2PMSG_102
-#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT                                                                    0x0
-#define MP1_SMN_C2PMSG_102__CONTENT_MASK                                                                      0xFFFFFFFFL
-//MP1_SMN_C2PMSG_103
-#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT                                                                    0x0
-#define MP1_SMN_C2PMSG_103__CONTENT_MASK                                                                      0xFFFFFFFFL
-//MP1_SMN_IH_CREDIT
-#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT                                                                0x0
-#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT                                                                   0x10
-#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK                                                                  0x00000003L
-#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK                                                                     0x00FF0000L
-//MP1_SMN_IH_SW_INT
-#define MP1_SMN_IH_SW_INT__ID__SHIFT                                                                          0x0
-#define MP1_SMN_IH_SW_INT__VALID__SHIFT                                                                       0x8
-#define MP1_SMN_IH_SW_INT__ID_MASK                                                                            0x000000FFL
-#define MP1_SMN_IH_SW_INT__VALID_MASK                                                                         0x00000100L
-//MP1_SMN_IH_SW_INT_CTRL
-#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT                                                               0x0
-#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT                                                                0x8
-#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK                                                                 0x00000001L
-#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK                                                                  0x00000100L
-//MP1_SMN_FPS_CNT
-#define MP1_SMN_FPS_CNT__COUNT__SHIFT                                                                         0x0
-#define MP1_SMN_FPS_CNT__COUNT_MASK                                                                           0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH0
-#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT                                                                     0x0
-#define MP1_SMN_EXT_SCRATCH0__DATA_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH1
-#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT                                                                     0x0
-#define MP1_SMN_EXT_SCRATCH1__DATA_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH2
-#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT                                                                     0x0
-#define MP1_SMN_EXT_SCRATCH2__DATA_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH3
-#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT                                                                     0x0
-#define MP1_SMN_EXT_SCRATCH3__DATA_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH4
-#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT                                                                     0x0
-#define MP1_SMN_EXT_SCRATCH4__DATA_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH5
-#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT                                                                     0x0
-#define MP1_SMN_EXT_SCRATCH5__DATA_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH6
-#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT                                                                     0x0
-#define MP1_SMN_EXT_SCRATCH6__DATA_MASK                                                                       0xFFFFFFFFL
-//MP1_SMN_EXT_SCRATCH7
-#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT                                                                     0x0
-#define MP1_SMN_EXT_SCRATCH7__DATA_MASK                                                                       0xFFFFFFFFL
-
-
-#endif
index 6102660..35fa0d8 100644 (file)
 #define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow  0x41
 
 #define PPSMC_MSG_GfxDriverResetRecovery       0x42
-#define PPSMC_Message_Count                    0x43
+#define PPSMC_MSG_BoardPowerCalibration        0x43
+#define PPSMC_Message_Count                    0x44
 
 //PPSMC Reset Types
 #define PPSMC_RESET_TYPE_WARM_RESET              0x00
index 89a16dc..1d3765b 100644 (file)
        __SMU_DUMMY_MAP(DisableDeterminism),            \
        __SMU_DUMMY_MAP(SetUclkDpmMode),                \
        __SMU_DUMMY_MAP(LightSBR),                      \
-       __SMU_DUMMY_MAP(GfxDriverResetRecovery),
+       __SMU_DUMMY_MAP(GfxDriverResetRecovery),        \
+       __SMU_DUMMY_MAP(BoardPowerCalibration),
 
 #undef __SMU_DUMMY_MAP
 #define __SMU_DUMMY_MAP(type)  SMU_MSG_##type
index 1962a58..f61b5c9 100644 (file)
@@ -34,7 +34,7 @@
 #define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE
 #define SMU11_DRIVER_IF_VERSION_VANGOGH 0x03
 #define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
-#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0x9
+#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0xD
 
 /* MP Apertures */
 #define MP0_Public                     0x03800000
index 6119a36..3fea243 100644 (file)
@@ -26,6 +26,7 @@
 #include "amdgpu_smu.h"
 
 #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
+#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x03
 #define SMU13_DRIVER_IF_VERSION_ALDE 0x07
 
 /* MP Apertures */
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h
deleted file mode 100644 (file)
index b6c976a..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright 2020 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef __SMU_V13_0_1_H__
-#define __SMU_V13_0_1_H__
-
-#include "amdgpu_smu.h"
-
-#define SMU13_0_1_DRIVER_IF_VERSION_INV 0xFFFFFFFF
-#define SMU13_0_1_DRIVER_IF_VERSION_YELLOW_CARP 0x3
-
-/* MP Apertures */
-#define MP0_Public                     0x03800000
-#define MP0_SRAM                       0x03900000
-#define MP1_Public                     0x03b00000
-#define MP1_SRAM                       0x03c00004
-
-/* address block */
-#define smnMP1_FIRMWARE_FLAGS          0x3010024
-
-
-#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3)
-
-int smu_v13_0_1_check_fw_status(struct smu_context *smu);
-
-int smu_v13_0_1_check_fw_version(struct smu_context *smu);
-
-int smu_v13_0_1_fini_smc_tables(struct smu_context *smu);
-
-int smu_v13_0_1_get_vbios_bootup_values(struct smu_context *smu);
-
-int smu_v13_0_1_set_default_dpm_tables(struct smu_context *smu);
-
-int smu_v13_0_1_set_driver_table_location(struct smu_context *smu);
-
-int smu_v13_0_1_gfx_off_control(struct smu_context *smu, bool enable);
-#endif
-#endif
index 388c5cb..0a5d46a 100644 (file)
@@ -1528,6 +1528,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
                case CHIP_SIENNA_CICHLID:
                case CHIP_NAVY_FLOUNDER:
                case CHIP_DIMGREY_CAVEFISH:
+               case CHIP_BEIGE_GOBY:
                        if (amdgpu_runtime_pm == 2)
                                ret = smu_cmn_send_smc_msg_with_param(smu,
                                                                      SMU_MSG_EnterBaco,
index 9b3a850..d4c4c49 100644 (file)
@@ -23,7 +23,7 @@
 # Makefile for the 'smu manager' sub-component of powerplay.
 # It provides the smu management services for the driver.
 
-SMU13_MGR = smu_v13_0.o aldebaran_ppt.o smu_v13_0_1.o yellow_carp_ppt.o
+SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o
 
 AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR))
 
index 9316a72..cb5485c 100644 (file)
@@ -134,6 +134,7 @@ static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT
        MSG_MAP(DisableDeterminism,                  PPSMC_MSG_DisableDeterminism,              0),
        MSG_MAP(SetUclkDpmMode,                      PPSMC_MSG_SetUclkDpmMode,                  0),
        MSG_MAP(GfxDriverResetRecovery,              PPSMC_MSG_GfxDriverResetRecovery,          0),
+       MSG_MAP(BoardPowerCalibration,               PPSMC_MSG_BoardPowerCalibration,           0),
 };
 
 static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
@@ -440,6 +441,39 @@ static int aldebaran_setup_pptable(struct smu_context *smu)
        return ret;
 }
 
+static bool aldebaran_is_primary(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+
+       if (adev->smuio.funcs && adev->smuio.funcs->get_die_id)
+               return adev->smuio.funcs->get_die_id(adev) == 0;
+
+       return true;
+}
+
+static int aldebaran_run_board_btc(struct smu_context *smu)
+{
+       u32 smu_version;
+       int ret;
+
+       if (!aldebaran_is_primary(smu))
+               return 0;
+
+       ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
+       if (ret) {
+               dev_err(smu->adev->dev, "Failed to get smu version!\n");
+               return ret;
+       }
+       if (smu_version <= 0x00441d00)
+               return 0;
+
+       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BoardPowerCalibration, NULL);
+       if (ret)
+               dev_err(smu->adev->dev, "Board power calibration failed!\n");
+
+       return ret;
+}
+
 static int aldebaran_run_btc(struct smu_context *smu)
 {
        int ret;
@@ -447,6 +481,8 @@ static int aldebaran_run_btc(struct smu_context *smu)
        ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
        if (ret)
                dev_err(smu->adev->dev, "RunDcBtc failed!\n");
+       else
+               ret = aldebaran_run_board_btc(smu);
 
        return ret;
 }
@@ -524,16 +560,6 @@ static int aldebaran_freqs_in_same_level(int32_t frequency1,
        return (abs(frequency1 - frequency2) <= EPSILON);
 }
 
-static bool aldebaran_is_primary(struct smu_context *smu)
-{
-       struct amdgpu_device *adev = smu->adev;
-
-       if (adev->smuio.funcs && adev->smuio.funcs->get_die_id)
-               return adev->smuio.funcs->get_die_id(adev) == 0;
-
-       return true;
-}
-
 static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
                                          MetricsMember_t member,
                                          uint32_t *value)
index a3dc719..a421ba8 100644 (file)
@@ -210,6 +210,9 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
        case CHIP_ALDEBARAN:
                smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
                break;
+       case CHIP_YELLOW_CARP:
+               smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP;
+               break;
        default:
                dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type);
                smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV;
@@ -694,6 +697,27 @@ failed:
        return ret;
 }
 
+int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
+{
+       int ret = 0;
+       struct amdgpu_device *adev = smu->adev;
+
+       switch (adev->asic_type) {
+       case CHIP_YELLOW_CARP:
+               if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+                       return 0;
+               if (enable)
+                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
+               else
+                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
 int smu_v13_0_system_features_control(struct smu_context *smu,
                                      bool en)
 {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c
deleted file mode 100644 (file)
index 61917b4..0000000
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * Copyright 2020 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-//#include <linux/reboot.h>
-
-#define SWSMU_CODE_LAYER_L3
-
-#include "amdgpu.h"
-#include "amdgpu_smu.h"
-#include "smu_v13_0_1.h"
-#include "soc15_common.h"
-#include "smu_cmn.h"
-#include "atomfirmware.h"
-#include "amdgpu_atomfirmware.h"
-#include "amdgpu_atombios.h"
-#include "atom.h"
-
-#include "asic_reg/mp/mp_13_0_1_offset.h"
-#include "asic_reg/mp/mp_13_0_1_sh_mask.h"
-
-/*
- * DO NOT use these for err/warn/info/debug messages.
- * Use dev_err, dev_warn, dev_info and dev_dbg instead.
- * They are more MGPU friendly.
- */
-#undef pr_err
-#undef pr_warn
-#undef pr_info
-#undef pr_debug
-
-int smu_v13_0_1_check_fw_status(struct smu_context *smu)
-{
-       struct amdgpu_device *adev = smu->adev;
-       uint32_t mp1_fw_flags;
-
-       mp1_fw_flags = RREG32_PCIE(MP1_Public |
-                                  (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
-
-       if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
-           MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
-               return 0;
-
-       return -EIO;
-}
-
-int smu_v13_0_1_check_fw_version(struct smu_context *smu)
-{
-       uint32_t if_version = 0xff, smu_version = 0xff;
-       uint16_t smu_major;
-       uint8_t smu_minor, smu_debug;
-       int ret = 0;
-
-       ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
-       if (ret)
-               return ret;
-
-       smu_major = (smu_version >> 16) & 0xffff;
-       smu_minor = (smu_version >> 8) & 0xff;
-       smu_debug = (smu_version >> 0) & 0xff;
-
-       switch (smu->adev->asic_type) {
-       case CHIP_YELLOW_CARP:
-               smu->smc_driver_if_version = SMU13_0_1_DRIVER_IF_VERSION_YELLOW_CARP;
-               break;
-
-       default:
-               dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type);
-               smu->smc_driver_if_version = SMU13_0_1_DRIVER_IF_VERSION_INV;
-               break;
-       }
-
-       dev_info(smu->adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n",
-                        smu_version, smu_major, smu_minor, smu_debug);
-
-       /*
-        * 1. if_version mismatch is not critical as our fw is designed
-        * to be backward compatible.
-        * 2. New fw usually brings some optimizations. But that's visible
-        * only on the paired driver.
-        * Considering above, we just leave user a warning message instead
-        * of halt driver loading.
-        */
-       if (if_version != smu->smc_driver_if_version) {
-               dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
-                        "smu fw version = 0x%08x (%d.%d.%d)\n",
-                        smu->smc_driver_if_version, if_version,
-                        smu_version, smu_major, smu_minor, smu_debug);
-               dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
-       }
-
-       return ret;
-}
-
-int smu_v13_0_1_fini_smc_tables(struct smu_context *smu)
-{
-       struct smu_table_context *smu_table = &smu->smu_table;
-
-       kfree(smu_table->clocks_table);
-       smu_table->clocks_table = NULL;
-
-       kfree(smu_table->metrics_table);
-       smu_table->metrics_table = NULL;
-
-       kfree(smu_table->watermarks_table);
-       smu_table->watermarks_table = NULL;
-
-       return 0;
-}
-
-static int smu_v13_0_1_atom_get_smu_clockinfo(struct amdgpu_device *adev,
-                                               uint8_t clk_id,
-                                               uint8_t syspll_id,
-                                               uint32_t *clk_freq)
-{
-       struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
-       struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
-       int ret, index;
-
-       input.clk_id = clk_id;
-       input.syspll_id = syspll_id;
-       input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
-       index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
-                                           getsmuclockinfo);
-
-       ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
-                                       (uint32_t *)&input);
-       if (ret)
-               return -EINVAL;
-
-       output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
-       *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
-
-       return 0;
-}
-
-int smu_v13_0_1_get_vbios_bootup_values(struct smu_context *smu)
-{
-       int ret, index;
-       uint16_t size;
-       uint8_t frev, crev;
-       struct atom_common_table_header *header;
-       struct atom_firmware_info_v3_4 *v_3_4;
-       struct atom_firmware_info_v3_3 *v_3_3;
-       struct atom_firmware_info_v3_1 *v_3_1;
-
-       index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
-                                           firmwareinfo);
-
-       ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
-                                            (uint8_t **)&header);
-       if (ret)
-               return ret;
-
-       if (header->format_revision != 3) {
-               dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu13\n");
-               return -EINVAL;
-       }
-
-       switch (header->content_revision) {
-       case 0:
-       case 1:
-       case 2:
-               v_3_1 = (struct atom_firmware_info_v3_1 *)header;
-               smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
-               smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
-               smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
-               smu->smu_table.boot_values.socclk = 0;
-               smu->smu_table.boot_values.dcefclk = 0;
-               smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
-               smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
-               smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
-               smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
-               smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
-               break;
-       case 3:
-               v_3_3 = (struct atom_firmware_info_v3_3 *)header;
-               smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
-               smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
-               smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
-               smu->smu_table.boot_values.socclk = 0;
-               smu->smu_table.boot_values.dcefclk = 0;
-               smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
-               smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
-               smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
-               smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
-               smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
-               break;
-       case 4:
-       default:
-               v_3_4 = (struct atom_firmware_info_v3_4 *)header;
-               smu->smu_table.boot_values.revision = v_3_4->firmware_revision;
-               smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz;
-               smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz;
-               smu->smu_table.boot_values.socclk = 0;
-               smu->smu_table.boot_values.dcefclk = 0;
-               smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv;
-               smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv;
-               smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv;
-               smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv;
-               smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id;
-               break;
-       }
-
-       smu->smu_table.boot_values.format_revision = header->format_revision;
-       smu->smu_table.boot_values.content_revision = header->content_revision;
-
-       smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
-                                       (uint8_t)SMU11_SYSPLL0_SOCCLK_ID,
-                                       (uint8_t)0,
-                                       &smu->smu_table.boot_values.socclk);
-
-       smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
-                                       (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID,
-                                       (uint8_t)0,
-                                       &smu->smu_table.boot_values.dcefclk);
-
-       smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
-                                       (uint8_t)SMU11_SYSPLL0_ECLK_ID,
-                                       (uint8_t)0,
-                                       &smu->smu_table.boot_values.eclk);
-
-       smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
-                                       (uint8_t)SMU11_SYSPLL0_VCLK_ID,
-                                       (uint8_t)0,
-                                       &smu->smu_table.boot_values.vclk);
-
-       smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
-                                       (uint8_t)SMU11_SYSPLL0_DCLK_ID,
-                                       (uint8_t)0,
-                                       &smu->smu_table.boot_values.dclk);
-
-       if ((smu->smu_table.boot_values.format_revision == 3) &&
-           (smu->smu_table.boot_values.content_revision >= 2))
-               smu_v13_0_1_atom_get_smu_clockinfo(smu->adev,
-                                               (uint8_t)SMU11_SYSPLL1_0_FCLK_ID,
-                                               (uint8_t)SMU11_SYSPLL1_2_ID,
-                                               &smu->smu_table.boot_values.fclk);
-
-       return 0;
-}
-
-int smu_v13_0_1_set_default_dpm_tables(struct smu_context *smu)
-{
-       struct smu_table_context *smu_table = &smu->smu_table;
-
-       return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
-}
-
-int smu_v13_0_1_set_driver_table_location(struct smu_context *smu)
-{
-       struct smu_table *driver_table = &smu->smu_table.driver_table;
-       int ret = 0;
-
-       if (!driver_table->mc_address)
-               return 0;
-
-       ret = smu_cmn_send_smc_msg_with_param(smu,
-                       SMU_MSG_SetDriverDramAddrHigh,
-                       upper_32_bits(driver_table->mc_address),
-                       NULL);
-
-       if (ret)
-               return ret;
-
-       ret = smu_cmn_send_smc_msg_with_param(smu,
-                       SMU_MSG_SetDriverDramAddrLow,
-                       lower_32_bits(driver_table->mc_address),
-                       NULL);
-
-       return ret;
-}
-
-int smu_v13_0_1_gfx_off_control(struct smu_context *smu, bool enable)
-{
-       int ret = 0;
-       struct amdgpu_device *adev = smu->adev;
-
-       switch (adev->asic_type) {
-       case CHIP_YELLOW_CARP:
-               if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
-                       return 0;
-               if (enable)
-                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
-               else
-                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
-               break;
-       default:
-               break;
-       }
-
-       return ret;
-}
index 18a1ffd..0cfeb9f 100644 (file)
@@ -25,7 +25,7 @@
 
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
-#include "smu_v13_0_1.h"
+#include "smu_v13_0.h"
 #include "smu13_driver_if_yellow_carp.h"
 #include "yellow_carp_ppt.h"
 #include "smu_v13_0_1_ppsmc.h"
@@ -186,6 +186,22 @@ err0_out:
        return -ENOMEM;
 }
 
+static int yellow_carp_fini_smc_tables(struct smu_context *smu)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+
+       kfree(smu_table->clocks_table);
+       smu_table->clocks_table = NULL;
+
+       kfree(smu_table->metrics_table);
+       smu_table->metrics_table = NULL;
+
+       kfree(smu_table->watermarks_table);
+       smu_table->watermarks_table = NULL;
+
+       return 0;
+}
+
 static int yellow_carp_system_features_control(struct smu_context *smu, bool en)
 {
        struct smu_feature *feature = &smu->smu_feature;
@@ -282,13 +298,9 @@ static int yellow_carp_mode_reset(struct smu_context *smu, int type)
        if (index < 0)
                return index == -EACCES ? 0 : index;
 
-       mutex_lock(&smu->message_lock);
-
-       ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type);
-
-       mutex_unlock(&smu->message_lock);
-
-       mdelay(10);
+       ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL);
+       if (ret)
+               dev_err(smu->adev->dev, "Failed to mode reset!\n");
 
        return ret;
 }
@@ -659,6 +671,13 @@ static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu,
        return sizeof(struct gpu_metrics_v2_1);
 }
 
+static int yellow_carp_set_default_dpm_tables(struct smu_context *smu)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+
+       return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
+}
+
 static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
                                        long input[], uint32_t size)
 {
@@ -1203,17 +1222,17 @@ static int yellow_carp_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
 }
 
 static const struct pptable_funcs yellow_carp_ppt_funcs = {
-       .check_fw_status = smu_v13_0_1_check_fw_status,
-       .check_fw_version = smu_v13_0_1_check_fw_version,
+       .check_fw_status = smu_v13_0_check_fw_status,
+       .check_fw_version = smu_v13_0_check_fw_version,
        .init_smc_tables = yellow_carp_init_smc_tables,
-       .fini_smc_tables = smu_v13_0_1_fini_smc_tables,
-       .get_vbios_bootup_values = smu_v13_0_1_get_vbios_bootup_values,
+       .fini_smc_tables = yellow_carp_fini_smc_tables,
+       .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
        .system_features_control = yellow_carp_system_features_control,
        .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
        .send_smc_msg = smu_cmn_send_smc_msg,
        .dpm_set_vcn_enable = yellow_carp_dpm_set_vcn_enable,
        .dpm_set_jpeg_enable = yellow_carp_dpm_set_jpeg_enable,
-       .set_default_dpm_table = smu_v13_0_1_set_default_dpm_tables,
+       .set_default_dpm_table = yellow_carp_set_default_dpm_tables,
        .read_sensor = yellow_carp_read_sensor,
        .is_dpm_running = yellow_carp_is_dpm_running,
        .set_watermarks_table = yellow_carp_set_watermarks_table,
@@ -1222,8 +1241,8 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = {
        .get_gpu_metrics = yellow_carp_get_gpu_metrics,
        .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask,
        .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
-       .set_driver_table_location = smu_v13_0_1_set_driver_table_location,
-       .gfx_off_control = smu_v13_0_1_gfx_off_control,
+       .set_driver_table_location = smu_v13_0_set_driver_table_location,
+       .gfx_off_control = smu_v13_0_gfx_off_control,
        .post_init = yellow_carp_post_smu_init,
        .mode2_reset = yellow_carp_mode2_reset,
        .get_dpm_ultimate_freq = yellow_carp_get_dpm_ultimate_freq,
index 98ae006..f454e04 100644 (file)
@@ -834,6 +834,9 @@ long drm_ioctl(struct file *filp,
        if (drm_dev_is_unplugged(dev))
                return -ENODEV;
 
+       if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
+               return -ENOTTY;
+
        is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
 
        if (is_driver_ioctl) {
index 5b6922e..aa667fa 100644 (file)
@@ -2166,7 +2166,8 @@ static void
 init_vbt_missing_defaults(struct drm_i915_private *i915)
 {
        enum port port;
-       int ports = PORT_A | PORT_B | PORT_C | PORT_D | PORT_E | PORT_F;
+       int ports = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) |
+                   BIT(PORT_D) | BIT(PORT_E) | BIT(PORT_F);
 
        if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915))
                return;
index 3bad4e0..2d5d217 100644 (file)
@@ -11361,13 +11361,19 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
                intel_ddi_init(dev_priv, PORT_B);
                intel_ddi_init(dev_priv, PORT_C);
                vlv_dsi_init(dev_priv);
-       } else if (DISPLAY_VER(dev_priv) >= 9) {
+       } else if (DISPLAY_VER(dev_priv) == 10) {
                intel_ddi_init(dev_priv, PORT_A);
                intel_ddi_init(dev_priv, PORT_B);
                intel_ddi_init(dev_priv, PORT_C);
                intel_ddi_init(dev_priv, PORT_D);
                intel_ddi_init(dev_priv, PORT_E);
                intel_ddi_init(dev_priv, PORT_F);
+       } else if (DISPLAY_VER(dev_priv) >= 9) {
+               intel_ddi_init(dev_priv, PORT_A);
+               intel_ddi_init(dev_priv, PORT_B);
+               intel_ddi_init(dev_priv, PORT_C);
+               intel_ddi_init(dev_priv, PORT_D);
+               intel_ddi_init(dev_priv, PORT_E);
        } else if (HAS_DDI(dev_priv)) {
                u32 found;
 
index a8abc9a..4a6419d 100644 (file)
 #include "i915_gem_clflush.h"
 #include "i915_gem_context.h"
 #include "i915_gem_ioctls.h"
-#include "i915_sw_fence_work.h"
 #include "i915_trace.h"
 #include "i915_user_extensions.h"
-#include "i915_memcpy.h"
 
 struct eb_vma {
        struct i915_vma *vma;
@@ -1456,6 +1454,10 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
                int err;
                struct intel_engine_cs *engine = eb->engine;
 
+               /* If we need to copy for the cmdparser, we will stall anyway */
+               if (eb_use_cmdparser(eb))
+                       return ERR_PTR(-EWOULDBLOCK);
+
                if (!reloc_can_use_engine(engine)) {
                        engine = engine->gt->engine_class[COPY_ENGINE_CLASS][0];
                        if (!engine)
@@ -2372,217 +2374,6 @@ shadow_batch_pin(struct i915_execbuffer *eb,
        return vma;
 }
 
-struct eb_parse_work {
-       struct dma_fence_work base;
-       struct intel_engine_cs *engine;
-       struct i915_vma *batch;
-       struct i915_vma *shadow;
-       struct i915_vma *trampoline;
-       unsigned long batch_offset;
-       unsigned long batch_length;
-       unsigned long *jump_whitelist;
-       const void *batch_map;
-       void *shadow_map;
-};
-
-static int __eb_parse(struct dma_fence_work *work)
-{
-       struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
-       int ret;
-       bool cookie;
-
-       cookie = dma_fence_begin_signalling();
-       ret = intel_engine_cmd_parser(pw->engine,
-                                     pw->batch,
-                                     pw->batch_offset,
-                                     pw->batch_length,
-                                     pw->shadow,
-                                     pw->jump_whitelist,
-                                     pw->shadow_map,
-                                     pw->batch_map);
-       dma_fence_end_signalling(cookie);
-
-       return ret;
-}
-
-static void __eb_parse_release(struct dma_fence_work *work)
-{
-       struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
-
-       if (!IS_ERR_OR_NULL(pw->jump_whitelist))
-               kfree(pw->jump_whitelist);
-
-       if (pw->batch_map)
-               i915_gem_object_unpin_map(pw->batch->obj);
-       else
-               i915_gem_object_unpin_pages(pw->batch->obj);
-
-       i915_gem_object_unpin_map(pw->shadow->obj);
-
-       if (pw->trampoline)
-               i915_active_release(&pw->trampoline->active);
-       i915_active_release(&pw->shadow->active);
-       i915_active_release(&pw->batch->active);
-}
-
-static const struct dma_fence_work_ops eb_parse_ops = {
-       .name = "eb_parse",
-       .work = __eb_parse,
-       .release = __eb_parse_release,
-};
-
-static inline int
-__parser_mark_active(struct i915_vma *vma,
-                    struct intel_timeline *tl,
-                    struct dma_fence *fence)
-{
-       struct intel_gt_buffer_pool_node *node = vma->private;
-
-       return i915_active_ref(&node->active, tl->fence_context, fence);
-}
-
-static int
-parser_mark_active(struct eb_parse_work *pw, struct intel_timeline *tl)
-{
-       int err;
-
-       mutex_lock(&tl->mutex);
-
-       err = __parser_mark_active(pw->shadow, tl, &pw->base.dma);
-       if (err)
-               goto unlock;
-
-       if (pw->trampoline) {
-               err = __parser_mark_active(pw->trampoline, tl, &pw->base.dma);
-               if (err)
-                       goto unlock;
-       }
-
-unlock:
-       mutex_unlock(&tl->mutex);
-       return err;
-}
-
-static int eb_parse_pipeline(struct i915_execbuffer *eb,
-                            struct i915_vma *shadow,
-                            struct i915_vma *trampoline)
-{
-       struct eb_parse_work *pw;
-       struct drm_i915_gem_object *batch = eb->batch->vma->obj;
-       bool needs_clflush;
-       int err;
-
-       GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset));
-       GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length));
-
-       pw = kzalloc(sizeof(*pw), GFP_KERNEL);
-       if (!pw)
-               return -ENOMEM;
-
-       err = i915_active_acquire(&eb->batch->vma->active);
-       if (err)
-               goto err_free;
-
-       err = i915_active_acquire(&shadow->active);
-       if (err)
-               goto err_batch;
-
-       if (trampoline) {
-               err = i915_active_acquire(&trampoline->active);
-               if (err)
-                       goto err_shadow;
-       }
-
-       pw->shadow_map = i915_gem_object_pin_map(shadow->obj, I915_MAP_WB);
-       if (IS_ERR(pw->shadow_map)) {
-               err = PTR_ERR(pw->shadow_map);
-               goto err_trampoline;
-       }
-
-       needs_clflush =
-               !(batch->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
-
-       pw->batch_map = ERR_PTR(-ENODEV);
-       if (needs_clflush && i915_has_memcpy_from_wc())
-               pw->batch_map = i915_gem_object_pin_map(batch, I915_MAP_WC);
-
-       if (IS_ERR(pw->batch_map)) {
-               err = i915_gem_object_pin_pages(batch);
-               if (err)
-                       goto err_unmap_shadow;
-               pw->batch_map = NULL;
-       }
-
-       pw->jump_whitelist =
-               intel_engine_cmd_parser_alloc_jump_whitelist(eb->batch_len,
-                                                            trampoline);
-       if (IS_ERR(pw->jump_whitelist)) {
-               err = PTR_ERR(pw->jump_whitelist);
-               goto err_unmap_batch;
-       }
-
-       dma_fence_work_init(&pw->base, &eb_parse_ops);
-
-       pw->engine = eb->engine;
-       pw->batch = eb->batch->vma;
-       pw->batch_offset = eb->batch_start_offset;
-       pw->batch_length = eb->batch_len;
-       pw->shadow = shadow;
-       pw->trampoline = trampoline;
-
-       /* Mark active refs early for this worker, in case we get interrupted */
-       err = parser_mark_active(pw, eb->context->timeline);
-       if (err)
-               goto err_commit;
-
-       err = dma_resv_reserve_shared(pw->batch->resv, 1);
-       if (err)
-               goto err_commit;
-
-       err = dma_resv_reserve_shared(shadow->resv, 1);
-       if (err)
-               goto err_commit;
-
-       /* Wait for all writes (and relocs) into the batch to complete */
-       err = i915_sw_fence_await_reservation(&pw->base.chain,
-                                             pw->batch->resv, NULL, false,
-                                             0, I915_FENCE_GFP);
-       if (err < 0)
-               goto err_commit;
-
-       /* Keep the batch alive and unwritten as we parse */
-       dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
-
-       /* Force execution to wait for completion of the parser */
-       dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
-
-       dma_fence_work_commit_imm(&pw->base);
-       return 0;
-
-err_commit:
-       i915_sw_fence_set_error_once(&pw->base.chain, err);
-       dma_fence_work_commit_imm(&pw->base);
-       return err;
-
-err_unmap_batch:
-       if (pw->batch_map)
-               i915_gem_object_unpin_map(batch);
-       else
-               i915_gem_object_unpin_pages(batch);
-err_unmap_shadow:
-       i915_gem_object_unpin_map(shadow->obj);
-err_trampoline:
-       if (trampoline)
-               i915_active_release(&trampoline->active);
-err_shadow:
-       i915_active_release(&shadow->active);
-err_batch:
-       i915_active_release(&eb->batch->vma->active);
-err_free:
-       kfree(pw);
-       return err;
-}
-
 static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma)
 {
        /*
@@ -2672,7 +2463,15 @@ static int eb_parse(struct i915_execbuffer *eb)
                goto err_trampoline;
        }
 
-       err = eb_parse_pipeline(eb, shadow, trampoline);
+       err = dma_resv_reserve_shared(shadow->resv, 1);
+       if (err)
+               goto err_trampoline;
+
+       err = intel_engine_cmd_parser(eb->engine,
+                                     eb->batch->vma,
+                                     eb->batch_start_offset,
+                                     eb->batch_len,
+                                     shadow, trampoline);
        if (err)
                goto err_unpin_batch;
 
index f4fb68e..e382b7f 100644 (file)
@@ -62,6 +62,7 @@ static void try_to_writeback(struct drm_i915_gem_object *obj,
        switch (obj->mm.madv) {
        case I915_MADV_DONTNEED:
                i915_gem_object_truncate(obj);
+               return;
        case __I915_MADV_PURGED:
                return;
        }
index 4df505e..16162fc 100644 (file)
@@ -125,6 +125,10 @@ static int igt_gpu_reloc(void *arg)
        intel_gt_pm_get(&eb.i915->gt);
 
        for_each_uabi_engine(eb.engine, eb.i915) {
+               if (intel_engine_requires_cmd_parser(eb.engine) ||
+                   intel_engine_using_cmd_parser(eb.engine))
+                       continue;
+
                reloc_cache_init(&eb.reloc_cache, eb.i915);
                memset(map, POISON_INUSE, 4096);
 
index 21c8b73..da4f5eb 100644 (file)
@@ -303,10 +303,7 @@ static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
                        __i915_gem_object_pin_pages(pt->base);
                        i915_gem_object_make_unshrinkable(pt->base);
 
-                       if (lvl ||
-                           gen8_pt_count(*start, end) < I915_PDES ||
-                           intel_vgpu_active(vm->i915))
-                               fill_px(pt, vm->scratch[lvl]->encode);
+                       fill_px(pt, vm->scratch[lvl]->encode);
 
                        spin_lock(&pd->lock);
                        if (likely(!pd->entry[idx])) {
index cac7f3f..f8948de 100644 (file)
@@ -348,7 +348,7 @@ static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
        if (intel_has_pending_fb_unpin(ggtt->vm.i915))
                return ERR_PTR(-EAGAIN);
 
-       return ERR_PTR(-EDEADLK);
+       return ERR_PTR(-ENOBUFS);
 }
 
 int __i915_vma_pin_fence(struct i915_vma *vma)
index 98eb48c..06024d3 100644 (file)
@@ -1977,6 +1977,21 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
        if (drm_WARN_ON(&i915->drm, !engine))
                return -EINVAL;
 
+       /*
+        * Due to d3_entered is used to indicate skipping PPGTT invalidation on
+        * vGPU reset, it's set on D0->D3 on PCI config write, and cleared after
+        * vGPU reset if in resuming.
+        * In S0ix exit, the device power state also transite from D3 to D0 as
+        * S3 resume, but no vGPU reset (triggered by QEMU devic model). After
+        * S0ix exit, all engines continue to work. However the d3_entered
+        * remains set which will break next vGPU reset logic (miss the expected
+        * PPGTT invalidation).
+        * Engines can only work in D0. Thus the 1st elsp write gives GVT a
+        * chance to clear d3_entered.
+        */
+       if (vgpu->d3_entered)
+               vgpu->d3_entered = false;
+
        execlist = &vgpu->submission.execlist[engine->id];
 
        execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
index 3992c25..a3b4d99 100644 (file)
@@ -1145,19 +1145,41 @@ find_reg(const struct intel_engine_cs *engine, u32 addr)
 static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
                       struct drm_i915_gem_object *src_obj,
                       unsigned long offset, unsigned long length,
-                      void *dst, const void *src)
+                      bool *needs_clflush_after)
 {
-       bool needs_clflush =
-               !(src_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
-
-       if (src) {
-               GEM_BUG_ON(!needs_clflush);
-               i915_unaligned_memcpy_from_wc(dst, src + offset, length);
-       } else {
-               struct scatterlist *sg;
+       unsigned int src_needs_clflush;
+       unsigned int dst_needs_clflush;
+       void *dst, *src;
+       int ret;
+
+       ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush);
+       if (ret)
+               return ERR_PTR(ret);
+
+       dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
+       i915_gem_object_finish_access(dst_obj);
+       if (IS_ERR(dst))
+               return dst;
+
+       ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush);
+       if (ret) {
+               i915_gem_object_unpin_map(dst_obj);
+               return ERR_PTR(ret);
+       }
+
+       src = ERR_PTR(-ENODEV);
+       if (src_needs_clflush && i915_has_memcpy_from_wc()) {
+               src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
+               if (!IS_ERR(src)) {
+                       i915_unaligned_memcpy_from_wc(dst,
+                                                     src + offset,
+                                                     length);
+                       i915_gem_object_unpin_map(src_obj);
+               }
+       }
+       if (IS_ERR(src)) {
+               unsigned long x, n, remain;
                void *ptr;
-               unsigned int x, sg_ofs;
-               unsigned long remain;
 
                /*
                 * We can avoid clflushing partial cachelines before the write
@@ -1168,40 +1190,34 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
                 * validate up to the end of the batch.
                 */
                remain = length;
-               if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+               if (dst_needs_clflush & CLFLUSH_BEFORE)
                        remain = round_up(remain,
                                          boot_cpu_data.x86_clflush_size);
 
                ptr = dst;
                x = offset_in_page(offset);
-               sg = i915_gem_object_get_sg(src_obj, offset >> PAGE_SHIFT, &sg_ofs, false);
-
-               while (remain) {
-                       unsigned long sg_max = sg->length >> PAGE_SHIFT;
-
-                       for (; remain && sg_ofs < sg_max; sg_ofs++) {
-                               unsigned long len = min(remain, PAGE_SIZE - x);
-                               void *map;
-
-                               map = kmap_atomic(nth_page(sg_page(sg), sg_ofs));
-                               if (needs_clflush)
-                                       drm_clflush_virt_range(map + x, len);
-                               memcpy(ptr, map + x, len);
-                               kunmap_atomic(map);
-
-                               ptr += len;
-                               remain -= len;
-                               x = 0;
-                       }
-
-                       sg_ofs = 0;
-                       sg = sg_next(sg);
+               for (n = offset >> PAGE_SHIFT; remain; n++) {
+                       int len = min(remain, PAGE_SIZE - x);
+
+                       src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
+                       if (src_needs_clflush)
+                               drm_clflush_virt_range(src + x, len);
+                       memcpy(ptr, src + x, len);
+                       kunmap_atomic(src);
+
+                       ptr += len;
+                       remain -= len;
+                       x = 0;
                }
        }
 
+       i915_gem_object_finish_access(src_obj);
+
        memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
 
        /* dst_obj is returned with vmap pinned */
+       *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
+
        return dst;
 }
 
@@ -1360,6 +1376,9 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length,
        if (target_cmd_index == offset)
                return 0;
 
+       if (IS_ERR(jump_whitelist))
+               return PTR_ERR(jump_whitelist);
+
        if (!test_bit(target_cmd_index, jump_whitelist)) {
                DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
                          jump_target);
@@ -1369,28 +1388,10 @@ static int check_bbstart(u32 *cmd, u32 offset, u32 length,
        return 0;
 }
 
-/**
- * intel_engine_cmd_parser_alloc_jump_whitelist() - preallocate jump whitelist for intel_engine_cmd_parser()
- * @batch_length: length of the commands in batch_obj
- * @trampoline: Whether jump trampolines are used.
- *
- * Preallocates a jump whitelist for parsing the cmd buffer in intel_engine_cmd_parser().
- * This has to be preallocated, because the command parser runs in signaling context,
- * and may not allocate any memory.
- *
- * Return: NULL or pointer to a jump whitelist, or ERR_PTR() on failure. Use
- * IS_ERR() to check for errors. Must bre freed() with kfree().
- *
- * NULL is a valid value, meaning no allocation was required.
- */
-unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
-                                                           bool trampoline)
+static unsigned long *alloc_whitelist(u32 batch_length)
 {
        unsigned long *jmp;
 
-       if (trampoline)
-               return NULL;
-
        /*
         * We expect batch_length to be less than 256KiB for known users,
         * i.e. we need at most an 8KiB bitmap allocation which should be
@@ -1415,9 +1416,7 @@ unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
  * @batch_offset: byte offset in the batch at which execution starts
  * @batch_length: length of the commands in batch_obj
  * @shadow: validated copy of the batch buffer in question
- * @jump_whitelist: buffer preallocated with intel_engine_cmd_parser_alloc_jump_whitelist()
- * @shadow_map: mapping to @shadow vma
- * @batch_map: mapping to @batch vma
+ * @trampoline: true if we need to trampoline into privileged execution
  *
  * Parses the specified batch buffer looking for privilege violations as
  * described in the overview.
@@ -1425,21 +1424,21 @@ unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
  * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
  * if the batch appears legal but should use hardware parsing
  */
+
 int intel_engine_cmd_parser(struct intel_engine_cs *engine,
                            struct i915_vma *batch,
                            unsigned long batch_offset,
                            unsigned long batch_length,
                            struct i915_vma *shadow,
-                           unsigned long *jump_whitelist,
-                           void *shadow_map,
-                           const void *batch_map)
+                           bool trampoline)
 {
        u32 *cmd, *batch_end, offset = 0;
        struct drm_i915_cmd_descriptor default_desc = noop_desc;
        const struct drm_i915_cmd_descriptor *desc = &default_desc;
+       bool needs_clflush_after = false;
+       unsigned long *jump_whitelist;
        u64 batch_addr, shadow_addr;
        int ret = 0;
-       bool trampoline = !jump_whitelist;
 
        GEM_BUG_ON(!IS_ALIGNED(batch_offset, sizeof(*cmd)));
        GEM_BUG_ON(!IS_ALIGNED(batch_length, sizeof(*cmd)));
@@ -1447,8 +1446,18 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
                                     batch->size));
        GEM_BUG_ON(!batch_length);
 
-       cmd = copy_batch(shadow->obj, batch->obj, batch_offset, batch_length,
-                        shadow_map, batch_map);
+       cmd = copy_batch(shadow->obj, batch->obj,
+                        batch_offset, batch_length,
+                        &needs_clflush_after);
+       if (IS_ERR(cmd)) {
+               DRM_DEBUG("CMD: Failed to copy batch\n");
+               return PTR_ERR(cmd);
+       }
+
+       jump_whitelist = NULL;
+       if (!trampoline)
+               /* Defer failure until attempted use */
+               jump_whitelist = alloc_whitelist(batch_length);
 
        shadow_addr = gen8_canonical_addr(shadow->node.start);
        batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
@@ -1549,6 +1558,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
 
        i915_gem_object_flush_map(shadow->obj);
 
+       if (!IS_ERR_OR_NULL(jump_whitelist))
+               kfree(jump_whitelist);
+       i915_gem_object_unpin_map(shadow->obj);
        return ret;
 }
 
index 38ff2fb..b30397b 100644 (file)
@@ -1906,17 +1906,12 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
 int intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
-unsigned long *intel_engine_cmd_parser_alloc_jump_whitelist(u32 batch_length,
-                                                           bool trampoline);
-
 int intel_engine_cmd_parser(struct intel_engine_cs *engine,
                            struct i915_vma *batch,
                            unsigned long batch_offset,
                            unsigned long batch_length,
                            struct i915_vma *shadow,
-                           unsigned long *jump_whitelist,
-                           void *shadow_map,
-                           const void *batch_map);
+                           bool trampoline);
 #define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
 
 /* intel_device_info.c */
index 1014c71..37aef13 100644 (file)
@@ -1426,10 +1426,8 @@ i915_request_await_execution(struct i915_request *rq,
 
        do {
                fence = *child++;
-               if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
-                       i915_sw_fence_set_error_once(&rq->submit, fence->error);
+               if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
                        continue;
-               }
 
                if (fence->context == rq->fence.context)
                        continue;
@@ -1527,10 +1525,8 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
 
        do {
                fence = *child++;
-               if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
-                       i915_sw_fence_set_error_once(&rq->submit, fence->error);
+               if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
                        continue;
-               }
 
                /*
                 * Requests on the same timeline are explicitly ordered, along
index 7eaa92f..e0a10f3 100644 (file)
@@ -325,7 +325,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
                        info->pipe_mask &= ~BIT(PIPE_C);
                        info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
                }
-       } else if (HAS_DISPLAY(dev_priv) && GRAPHICS_VER(dev_priv) >= 9) {
+       } else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) {
                u32 dfsm = intel_de_read(dev_priv, SKL_DFSM);
 
                if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
@@ -340,7 +340,8 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
                        info->pipe_mask &= ~BIT(PIPE_C);
                        info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
                }
-               if (GRAPHICS_VER(dev_priv) >= 12 &&
+
+               if (DISPLAY_VER(dev_priv) >= 12 &&
                    (dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
                        info->pipe_mask &= ~BIT(PIPE_D);
                        info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
@@ -352,10 +353,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
                if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
                        info->display.has_fbc = 0;
 
-               if (GRAPHICS_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
+               if (DISPLAY_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
                        info->display.has_dmc = 0;
 
-               if (GRAPHICS_VER(dev_priv) >= 10 &&
+               if (DISPLAY_VER(dev_priv) >= 10 &&
                    (dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE))
                        info->display.has_dsc = 0;
        }
index d01c4c9..704dace 100644 (file)
@@ -296,7 +296,7 @@ static const struct dpu_mdp_cfg sc7180_mdp[] = {
 static const struct dpu_mdp_cfg sm8250_mdp[] = {
        {
        .name = "top_0", .id = MDP_TOP,
-       .base = 0x0, .len = 0x45C,
+       .base = 0x0, .len = 0x494,
        .features = 0,
        .highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
        .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
index ca96e35..c0423e7 100644 (file)
@@ -771,6 +771,7 @@ int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog)
        dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY,
                                dp_catalog->width_blanking);
        dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active);
+       dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0);
        return 0;
 }
 
index ee221d8..eaddfd7 100644 (file)
@@ -1526,7 +1526,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
         * running. Add the global reset just before disabling the
         * link clocks and core clocks.
         */
-       ret = dp_ctrl_off(&ctrl->dp_ctrl);
+       ret = dp_ctrl_off_link_stream(&ctrl->dp_ctrl);
        if (ret) {
                DRM_ERROR("failed to disable DP controller\n");
                return ret;
index 051c1be..867388a 100644 (file)
@@ -219,6 +219,7 @@ static int dp_display_bind(struct device *dev, struct device *master,
                goto end;
        }
 
+       dp->aux->drm_dev = drm;
        rc = dp_aux_register(dp->aux);
        if (rc) {
                DRM_ERROR("DRM DP AUX register failed\n");
@@ -1311,6 +1312,10 @@ static int dp_pm_resume(struct device *dev)
        else
                dp->dp_display.is_connected = false;
 
+       dp_display_handle_plugged_change(g_dp_display,
+                               dp->dp_display.is_connected);
+
+
        mutex_unlock(&dp->event_mutex);
 
        return 0;
index 1411787..1e8a971 100644 (file)
@@ -1169,7 +1169,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
        case MSM_BO_CACHED_COHERENT:
                if (priv->has_cached_coherent)
                        break;
-               /* fallthrough */
+               fallthrough;
        default:
                DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
                                (flags & MSM_BO_CACHE_MASK));
index eed2a76..bcaddbb 100644 (file)
@@ -142,6 +142,9 @@ static const struct iommu_flush_ops null_tlb_ops = {
        .tlb_add_page = msm_iommu_tlb_add_page,
 };
 
+static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
+               unsigned long iova, int flags, void *arg);
+
 struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
 {
        struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
@@ -157,6 +160,13 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
        if (!ttbr1_cfg)
                return ERR_PTR(-ENODEV);
 
+       /*
+        * Defer setting the fault handler until we have a valid adreno_smmu
+        * to avoid accidentially installing a GPU specific fault handler for
+        * the display's iommu
+        */
+       iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
+
        pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
        if (!pagetable)
                return ERR_PTR(-ENOMEM);
@@ -300,7 +310,6 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
 
        iommu->domain = domain;
        msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
-       iommu_set_fault_handler(domain, msm_fault_handler, iommu);
 
        atomic_set(&iommu->pagetables, 0);
 
index 4f3a535..6d07e65 100644 (file)
@@ -149,6 +149,8 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
         */
        if (bo->base.dev)
                drm_gem_object_release(&bo->base);
+       else
+               dma_resv_fini(&bo->base._resv);
 
        kfree(nvbo);
 }
@@ -330,6 +332,10 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
        if (IS_ERR(nvbo))
                return PTR_ERR(nvbo);
 
+       nvbo->bo.base.size = size;
+       dma_resv_init(&nvbo->bo.base._resv);
+       drm_vma_node_reset(&nvbo->bo.base.vma_node);
+
        ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
        if (ret)
                return ret;
index ef70140..873cbd3 100644 (file)
@@ -706,9 +706,7 @@ static int nt35510_power_on(struct nt35510 *nt)
        if (ret)
                return ret;
 
-       ret = nt35510_read_id(nt);
-       if (ret)
-               return ret;
+       nt35510_read_id(nt);
 
        /* Set up stuff in  manufacturer control, page 1 */
        ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
index 2229f1a..46029c5 100644 (file)
@@ -447,7 +447,6 @@ static int rpi_touchscreen_remove(struct i2c_client *i2c)
        drm_panel_remove(&ts->base);
 
        mipi_dsi_device_unregister(ts->dsi);
-       kfree(ts->dsi);
 
        return 0;
 }
index 21939d4..1b80290 100644 (file)
@@ -4166,7 +4166,7 @@ static const struct drm_display_mode yes_optoelectronics_ytc700tlag_05_201c_mode
 static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = {
        .modes = &yes_optoelectronics_ytc700tlag_05_201c_mode,
        .num_modes = 1,
-       .bpc = 6,
+       .bpc = 8,
        .size = {
                .width = 154,
                .height = 90,
index 19fd39d..37a1b6a 100644 (file)
@@ -127,7 +127,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
        struct qxl_bo *qbo;
        struct qxl_device *qdev;
 
-       if (!qxl_ttm_bo_is_qxl_bo(bo))
+       if (!qxl_ttm_bo_is_qxl_bo(bo) || !bo->resource)
                return;
        qbo = to_qxl_bo(bo);
        qdev = to_qxl(qbo->tbo.base.dev);
index 1b950b4..8d7fd65 100644 (file)
@@ -102,6 +102,9 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
                return;
        }
 
+       if (!mem)
+               return;
+
        man = ttm_manager_type(bdev, mem->mem_type);
        list_move_tail(&bo->lru, &man->lru[bo->priority]);
 
index 2f57f82..763fa6f 100644 (file)
@@ -63,6 +63,9 @@ int ttm_mem_io_reserve(struct ttm_device *bdev,
 void ttm_mem_io_free(struct ttm_device *bdev,
                     struct ttm_resource *mem)
 {
+       if (!mem)
+               return;
+
        if (!mem->bus.offset && !mem->bus.addr)
                return;
 
index 5f31ace..74e3b46 100644 (file)
@@ -44,6 +44,8 @@ static unsigned ttm_glob_use_count;
 struct ttm_global ttm_glob;
 EXPORT_SYMBOL(ttm_glob);
 
+struct dentry *ttm_debugfs_root;
+
 static void ttm_global_release(void)
 {
        struct ttm_global *glob = &ttm_glob;
@@ -53,6 +55,7 @@ static void ttm_global_release(void)
                goto out;
 
        ttm_pool_mgr_fini();
+       debugfs_remove(ttm_debugfs_root);
 
        __free_page(glob->dummy_read_page);
        memset(glob, 0, sizeof(*glob));
@@ -73,6 +76,13 @@ static int ttm_global_init(void)
 
        si_meminfo(&si);
 
+       ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
+       if (IS_ERR(ttm_debugfs_root)) {
+               ret = PTR_ERR(ttm_debugfs_root);
+               ttm_debugfs_root = NULL;
+               goto out;
+       }
+
        /* Limit the number of pages in the pool to about 50% of the total
         * system memory.
         */
@@ -100,6 +110,10 @@ static int ttm_global_init(void)
        debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
                                &glob->bo_count);
 out:
+       if (ret && ttm_debugfs_root)
+               debugfs_remove(ttm_debugfs_root);
+       if (ret)
+               --ttm_glob_use_count;
        mutex_unlock(&ttm_global_mutex);
        return ret;
 }
index 997c458..7fcdef2 100644 (file)
@@ -72,22 +72,6 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
        return tmp;
 }
 
-struct dentry *ttm_debugfs_root;
-
-static int __init ttm_init(void)
-{
-       ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
-       return 0;
-}
-
-static void __exit ttm_exit(void)
-{
-       debugfs_remove(ttm_debugfs_root);
-}
-
-module_init(ttm_init);
-module_exit(ttm_exit);
-
 MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse");
 MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)");
 MODULE_LICENSE("GPL and additional rights");
index 0339538..f4b08a8 100644 (file)
@@ -181,6 +181,9 @@ int ttm_range_man_fini(struct ttm_device *bdev,
        struct drm_mm *mm = &rman->mm;
        int ret;
 
+       if (!man)
+               return 0;
+
        ttm_resource_manager_set_used(man, false);
 
        ret = ttm_resource_manager_evict_all(bdev, man);
index aab1b36..c287673 100644 (file)
@@ -1857,38 +1857,46 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
        vc4_hdmi_cec_update_clk_div(vc4_hdmi);
 
        if (vc4_hdmi->variant->external_irq_controller) {
-               ret = devm_request_threaded_irq(&pdev->dev,
-                                               platform_get_irq_byname(pdev, "cec-rx"),
-                                               vc4_cec_irq_handler_rx_bare,
-                                               vc4_cec_irq_handler_rx_thread, 0,
-                                               "vc4 hdmi cec rx", vc4_hdmi);
+               ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-rx"),
+                                          vc4_cec_irq_handler_rx_bare,
+                                          vc4_cec_irq_handler_rx_thread, 0,
+                                          "vc4 hdmi cec rx", vc4_hdmi);
                if (ret)
                        goto err_delete_cec_adap;
 
-               ret = devm_request_threaded_irq(&pdev->dev,
-                                               platform_get_irq_byname(pdev, "cec-tx"),
-                                               vc4_cec_irq_handler_tx_bare,
-                                               vc4_cec_irq_handler_tx_thread, 0,
-                                               "vc4 hdmi cec tx", vc4_hdmi);
+               ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-tx"),
+                                          vc4_cec_irq_handler_tx_bare,
+                                          vc4_cec_irq_handler_tx_thread, 0,
+                                          "vc4 hdmi cec tx", vc4_hdmi);
                if (ret)
-                       goto err_delete_cec_adap;
+                       goto err_remove_cec_rx_handler;
        } else {
                HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
 
-               ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0),
-                                               vc4_cec_irq_handler,
-                                               vc4_cec_irq_handler_thread, 0,
-                                               "vc4 hdmi cec", vc4_hdmi);
+               ret = request_threaded_irq(platform_get_irq(pdev, 0),
+                                          vc4_cec_irq_handler,
+                                          vc4_cec_irq_handler_thread, 0,
+                                          "vc4 hdmi cec", vc4_hdmi);
                if (ret)
                        goto err_delete_cec_adap;
        }
 
        ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev);
        if (ret < 0)
-               goto err_delete_cec_adap;
+               goto err_remove_handlers;
 
        return 0;
 
+err_remove_handlers:
+       if (vc4_hdmi->variant->external_irq_controller)
+               free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi);
+       else
+               free_irq(platform_get_irq(pdev, 0), vc4_hdmi);
+
+err_remove_cec_rx_handler:
+       if (vc4_hdmi->variant->external_irq_controller)
+               free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi);
+
 err_delete_cec_adap:
        cec_delete_adapter(vc4_hdmi->cec_adap);
 
@@ -1897,6 +1905,15 @@ err_delete_cec_adap:
 
 static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi)
 {
+       struct platform_device *pdev = vc4_hdmi->pdev;
+
+       if (vc4_hdmi->variant->external_irq_controller) {
+               free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi);
+               free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi);
+       } else {
+               free_irq(platform_get_irq(pdev, 0), vc4_hdmi);
+       }
+
        cec_unregister_adapter(vc4_hdmi->cec_adap);
 }
 #else
index 6f5ea00..45aeeca 100644 (file)
@@ -36,6 +36,7 @@
 #include <drm/drm_ioctl.h>
 #include <drm/drm_sysfs.h>
 #include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_range_manager.h>
 #include <drm/ttm/ttm_placement.h>
 #include <generated/utsrelease.h>
 
index 5648664..f2d6254 100644 (file)
@@ -354,7 +354,6 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
        ttm_bo_unpin(bo);
        ttm_bo_unreserve(bo);
 
-       ttm_bo_unpin(batch->otable_bo);
        ttm_bo_put(batch->otable_bo);
        batch->otable_bo = NULL;
 }
index 1605549..76937f7 100644 (file)
@@ -576,7 +576,7 @@ config HID_LOGITECH_HIDPP
        depends on HID_LOGITECH
        select POWER_SUPPLY
        help
-       Support for Logitech devices relyingon the HID++ Logitech specification
+       Support for Logitech devices relying on the HID++ Logitech specification
 
        Say Y if you want support for Logitech devices relying on the HID++
        specification. Such devices are the various Logitech Touchpads (T650,
index 96e2577..8d68796 100644 (file)
@@ -58,7 +58,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx)
        cmd_base.cmd_v2.sensor_id = sensor_idx;
        cmd_base.cmd_v2.length  = 16;
 
-       writeq(0x0, privdata->mmio + AMD_C2P_MSG2);
+       writeq(0x0, privdata->mmio + AMD_C2P_MSG1);
        writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
 }
 
index 6b8f0d0..dc6bd42 100644 (file)
@@ -501,6 +501,8 @@ static const struct hid_device_id apple_devices[] = {
                        APPLE_RDESC_JIS },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI),
                .driver_data = APPLE_HAS_FN },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI),
+               .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO),
                .driver_data = APPLE_HAS_FN },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO),
index fca8fc7..fb807c8 100644 (file)
@@ -485,9 +485,6 @@ static void asus_kbd_backlight_set(struct led_classdev *led_cdev,
 {
        struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
                                                 cdev);
-       if (led->brightness == brightness)
-               return;
-
        led->brightness = brightness;
        schedule_work(&led->work);
 }
index f43a840..4ef1c3b 100644 (file)
@@ -742,7 +742,7 @@ static int ft260_is_interface_enabled(struct hid_device *hdev)
        int ret;
 
        ret = ft260_get_system_config(hdev, &cfg);
-       if (ret)
+       if (ret < 0)
                return ret;
 
        ft260_dbg("interface:  0x%02x\n", interface);
@@ -754,23 +754,16 @@ static int ft260_is_interface_enabled(struct hid_device *hdev)
        switch (cfg.chip_mode) {
        case FT260_MODE_ALL:
        case FT260_MODE_BOTH:
-               if (interface == 1) {
+               if (interface == 1)
                        hid_info(hdev, "uart interface is not supported\n");
-                       return 0;
-               }
-               ret = 1;
+               else
+                       ret = 1;
                break;
        case FT260_MODE_UART:
-               if (interface == 0) {
-                       hid_info(hdev, "uart is unsupported on interface 0\n");
-                       ret = 0;
-               }
+               hid_info(hdev, "uart interface is not supported\n");
                break;
        case FT260_MODE_I2C:
-               if (interface == 1) {
-                       hid_info(hdev, "i2c is unsupported on interface 1\n");
-                       ret = 0;
-               }
+               ret = 1;
                break;
        }
        return ret;
@@ -785,7 +778,7 @@ static int ft260_byte_show(struct hid_device *hdev, int id, u8 *cfg, int len,
        if (ret < 0)
                return ret;
 
-       return scnprintf(buf, PAGE_SIZE, "%hi\n", *field);
+       return scnprintf(buf, PAGE_SIZE, "%d\n", *field);
 }
 
 static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len,
@@ -797,7 +790,7 @@ static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len,
        if (ret < 0)
                return ret;
 
-       return scnprintf(buf, PAGE_SIZE, "%hi\n", le16_to_cpu(*field));
+       return scnprintf(buf, PAGE_SIZE, "%d\n", le16_to_cpu(*field));
 }
 
 #define FT260_ATTR_SHOW(name, reptype, id, type, func)                        \
@@ -1004,11 +997,9 @@ err_hid_stop:
 
 static void ft260_remove(struct hid_device *hdev)
 {
-       int ret;
        struct ft260_device *dev = hid_get_drvdata(hdev);
 
-       ret = ft260_is_interface_enabled(hdev);
-       if (ret <= 0)
+       if (!dev)
                return;
 
        sysfs_remove_group(&hdev->dev.kobj, &ft260_attr_group);
index 6b1fa97..91bf4d0 100644 (file)
@@ -784,6 +784,17 @@ static void hid_ishtp_cl_reset_handler(struct work_struct *work)
        }
 }
 
+static void hid_ishtp_cl_resume_handler(struct work_struct *work)
+{
+       struct ishtp_cl_data *client_data = container_of(work, struct ishtp_cl_data, resume_work);
+       struct ishtp_cl *hid_ishtp_cl = client_data->hid_ishtp_cl;
+
+       if (ishtp_wait_resume(ishtp_get_ishtp_device(hid_ishtp_cl))) {
+               client_data->suspended = false;
+               wake_up_interruptible(&client_data->ishtp_resume_wait);
+       }
+}
+
 ishtp_print_log ishtp_hid_print_trace;
 
 /**
@@ -822,6 +833,8 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
        init_waitqueue_head(&client_data->ishtp_resume_wait);
 
        INIT_WORK(&client_data->work, hid_ishtp_cl_reset_handler);
+       INIT_WORK(&client_data->resume_work, hid_ishtp_cl_resume_handler);
+
 
        ishtp_hid_print_trace = ishtp_trace_callback(cl_device);
 
@@ -921,7 +934,7 @@ static int hid_ishtp_cl_resume(struct device *device)
 
        hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
                        hid_ishtp_cl);
-       client_data->suspended = false;
+       schedule_work(&client_data->resume_work);
        return 0;
 }
 
index f88443a..6a5cc11 100644 (file)
@@ -135,6 +135,7 @@ struct ishtp_cl_data {
        int multi_packet_cnt;
 
        struct work_struct work;
+       struct work_struct resume_work;
        struct ishtp_cl_device *cl_device;
 };
 
index f0802b0..aa2c516 100644 (file)
@@ -314,13 +314,6 @@ static int ishtp_cl_device_resume(struct device *dev)
        if (!device)
                return 0;
 
-       /*
-        * When ISH needs hard reset, it is done asynchrnously, hence bus
-        * resume will  be called before full ISH resume
-        */
-       if (device->ishtp_dev->resume_flag)
-               return 0;
-
        driver = to_ishtp_cl_driver(dev->driver);
        if (driver && driver->driver.pm) {
                if (driver->driver.pm->resume)
@@ -850,6 +843,28 @@ struct device *ishtp_device(struct ishtp_cl_device *device)
 EXPORT_SYMBOL(ishtp_device);
 
 /**
+ * ishtp_wait_resume() - Wait for IPC resume
+ *
+ * Wait for IPC resume
+ *
+ * Return: resume complete or not
+ */
+bool ishtp_wait_resume(struct ishtp_device *dev)
+{
+       /* 50ms to get resume response */
+       #define WAIT_FOR_RESUME_ACK_MS          50
+
+       /* Waiting to get resume response */
+       if (dev->resume_flag)
+               wait_event_interruptible_timeout(dev->resume_wait,
+                                                !dev->resume_flag,
+                                                msecs_to_jiffies(WAIT_FOR_RESUME_ACK_MS));
+
+       return (!dev->resume_flag);
+}
+EXPORT_SYMBOL_GPL(ishtp_wait_resume);
+
+/**
  * ishtp_get_pci_device() - Return PCI device dev pointer
  * This interface is used to return PCI device pointer
  * from ishtp_cl_device instance.
index dcf3a23..7c2032f 100644 (file)
@@ -38,7 +38,7 @@ config USB_HIDDEV
        help
          Say Y here if you want to support HID devices (from the USB
          specification standpoint) that aren't strictly user interface
-         devices, like monitor controls and Uninterruptable Power Supplies.
+         devices, like monitor controls and Uninterruptible Power Supplies.
 
          This module supports these devices separately using a separate
          event interface on /dev/usb/hiddevX (char 180:96 to 180:111).
index 81d7d12..81ba642 100644 (file)
@@ -2548,6 +2548,9 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
                int slot;
 
                slot = input_mt_get_slot_by_key(input, hid_data->id);
+               if (slot < 0)
+                       return;
+
                input_mt_slot(input, slot);
                input_mt_report_slot_state(input, MT_TOOL_FINGER, prox);
        }
@@ -3831,7 +3834,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
                    wacom_wac->shared->touch->product == 0xF6) {
                        input_dev->evbit[0] |= BIT_MASK(EV_SW);
                        __set_bit(SW_MUTE_DEVICE, input_dev->swbit);
-                       wacom_wac->shared->has_mute_touch_switch = true;
+                       wacom_wac->has_mute_touch_switch = true;
                }
                fallthrough;
 
index caf6d0c..1423085 100644 (file)
@@ -605,6 +605,17 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
         */
        mutex_lock(&vmbus_connection.channel_mutex);
 
+       list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+               if (guid_equal(&channel->offermsg.offer.if_type,
+                              &newchannel->offermsg.offer.if_type) &&
+                   guid_equal(&channel->offermsg.offer.if_instance,
+                              &newchannel->offermsg.offer.if_instance)) {
+                       fnew = false;
+                       newchannel->primary_channel = channel;
+                       break;
+               }
+       }
+
        init_vp_index(newchannel);
 
        /* Remember the channels that should be cleaned up upon suspend. */
@@ -617,16 +628,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
         */
        atomic_dec(&vmbus_connection.offer_in_progress);
 
-       list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
-               if (guid_equal(&channel->offermsg.offer.if_type,
-                              &newchannel->offermsg.offer.if_type) &&
-                   guid_equal(&channel->offermsg.offer.if_instance,
-                              &newchannel->offermsg.offer.if_instance)) {
-                       fnew = false;
-                       break;
-               }
-       }
-
        if (fnew) {
                list_add_tail(&newchannel->listentry,
                              &vmbus_connection.chn_list);
@@ -647,7 +648,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
                /*
                 * Process the sub-channel.
                 */
-               newchannel->primary_channel = channel;
                list_add_tail(&newchannel->sc_list, &channel->sc_list);
        }
 
@@ -684,6 +684,30 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
 }
 
 /*
+ * Check if CPUs used by other channels of the same device.
+ * It should only be called by init_vp_index().
+ */
+static bool hv_cpuself_used(u32 cpu, struct vmbus_channel *chn)
+{
+       struct vmbus_channel *primary = chn->primary_channel;
+       struct vmbus_channel *sc;
+
+       lockdep_assert_held(&vmbus_connection.channel_mutex);
+
+       if (!primary)
+               return false;
+
+       if (primary->target_cpu == cpu)
+               return true;
+
+       list_for_each_entry(sc, &primary->sc_list, sc_list)
+               if (sc != chn && sc->target_cpu == cpu)
+                       return true;
+
+       return false;
+}
+
+/*
  * We use this state to statically distribute the channel interrupt load.
  */
 static int next_numa_node_id;
@@ -702,6 +726,7 @@ static int next_numa_node_id;
 static void init_vp_index(struct vmbus_channel *channel)
 {
        bool perf_chn = hv_is_perf_channel(channel);
+       u32 i, ncpu = num_online_cpus();
        cpumask_var_t available_mask;
        struct cpumask *alloced_mask;
        u32 target_cpu;
@@ -724,31 +749,38 @@ static void init_vp_index(struct vmbus_channel *channel)
                return;
        }
 
-       while (true) {
-               numa_node = next_numa_node_id++;
-               if (numa_node == nr_node_ids) {
-                       next_numa_node_id = 0;
-                       continue;
+       for (i = 1; i <= ncpu + 1; i++) {
+               while (true) {
+                       numa_node = next_numa_node_id++;
+                       if (numa_node == nr_node_ids) {
+                               next_numa_node_id = 0;
+                               continue;
+                       }
+                       if (cpumask_empty(cpumask_of_node(numa_node)))
+                               continue;
+                       break;
+               }
+               alloced_mask = &hv_context.hv_numa_map[numa_node];
+
+               if (cpumask_weight(alloced_mask) ==
+                   cpumask_weight(cpumask_of_node(numa_node))) {
+                       /*
+                        * We have cycled through all the CPUs in the node;
+                        * reset the alloced map.
+                        */
+                       cpumask_clear(alloced_mask);
                }
-               if (cpumask_empty(cpumask_of_node(numa_node)))
-                       continue;
-               break;
-       }
-       alloced_mask = &hv_context.hv_numa_map[numa_node];
 
-       if (cpumask_weight(alloced_mask) ==
-           cpumask_weight(cpumask_of_node(numa_node))) {
-               /*
-                * We have cycled through all the CPUs in the node;
-                * reset the alloced map.
-                */
-               cpumask_clear(alloced_mask);
-       }
+               cpumask_xor(available_mask, alloced_mask,
+                           cpumask_of_node(numa_node));
 
-       cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node));
+               target_cpu = cpumask_first(available_mask);
+               cpumask_set_cpu(target_cpu, alloced_mask);
 
-       target_cpu = cpumask_first(available_mask);
-       cpumask_set_cpu(target_cpu, alloced_mask);
+               if (channel->offermsg.offer.sub_channel_index >= ncpu ||
+                   i > ncpu || !hv_cpuself_used(target_cpu, channel))
+                       break;
+       }
 
        channel->target_cpu = target_cpu;
 
index 6d5014e..a6ea1eb 100644 (file)
@@ -635,8 +635,8 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
 
        status = readb(i2c->base + MPC_I2C_SR);
        if (status & CSR_MIF) {
-               /* Read again to allow register to stabilise */
-               status = readb(i2c->base + MPC_I2C_SR);
+               /* Wait up to 100us for transfer to properly complete */
+               readb_poll_timeout(i2c->base + MPC_I2C_SR, status, !(status & CSR_MCF), 0, 100);
                writeb(0, i2c->base + MPC_I2C_SR);
                mpc_i2c_do_intr(i2c, status);
                return IRQ_HANDLED;
index d567402..a8688a9 100644 (file)
@@ -120,6 +120,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
        if (!chip_ctx)
                return -ENOMEM;
        chip_ctx->chip_num = bp->chip_num;
+       chip_ctx->hw_stats_size = bp->hw_ring_stats_size;
 
        rdev->chip_ctx = chip_ctx;
        /* rest members to follow eventually */
@@ -550,6 +551,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
                                       dma_addr_t dma_map,
                                       u32 *fw_stats_ctx_id)
 {
+       struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
        struct hwrm_stat_ctx_alloc_output resp = {0};
        struct hwrm_stat_ctx_alloc_input req = {0};
        struct bnxt_en_dev *en_dev = rdev->en_dev;
@@ -566,7 +568,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
        req.update_period_ms = cpu_to_le32(1000);
        req.stats_dma_addr = cpu_to_le64(dma_map);
-       req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext));
+       req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
        req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
        bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
                            sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
index 17f0701..44282a8 100644 (file)
@@ -56,6 +56,7 @@
 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
                                      struct bnxt_qplib_stats *stats);
 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+                                     struct bnxt_qplib_chip_ctx *cctx,
                                      struct bnxt_qplib_stats *stats);
 
 /* PBL */
@@ -559,7 +560,7 @@ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
                goto fail;
 stats_alloc:
        /* Stats */
-       rc = bnxt_qplib_alloc_stats_ctx(res->pdev, &ctx->stats);
+       rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
        if (rc)
                goto fail;
 
@@ -889,15 +890,12 @@ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
 }
 
 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+                                     struct bnxt_qplib_chip_ctx *cctx,
                                      struct bnxt_qplib_stats *stats)
 {
        memset(stats, 0, sizeof(*stats));
        stats->fw_id = -1;
-       /* 128 byte aligned context memory is required only for 57500.
-        * However making this unconditional, it does not harm previous
-        * generation.
-        */
-       stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128);
+       stats->size = cctx->hw_stats_size;
        stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
                                        &stats->dma_map, GFP_KERNEL);
        if (!stats->dma) {
index c291f49..9103150 100644 (file)
@@ -54,6 +54,7 @@ struct bnxt_qplib_chip_ctx {
        u16     chip_num;
        u8      chip_rev;
        u8      chip_metal;
+       u16     hw_stats_size;
        struct bnxt_qplib_drv_modes modes;
 };
 
index b1023a7..f1e5515 100644 (file)
@@ -2845,7 +2845,7 @@ static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf,
  * parses fpm commit info and copy base value
  * of hmc objects in hmc_info
  */
-static enum irdma_status_code
+static void
 irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
                              struct irdma_hmc_obj_info *info, u32 *sd)
 {
@@ -2915,7 +2915,6 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
        else
                *sd = (u32)(size >> 21);
 
-       return 0;
 }
 
 /**
@@ -4187,11 +4186,9 @@ enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
  * @dev: sc device struct
  * @count: allocate count
  */
-enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
+void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
 {
        writel(count, dev->hw_regs[IRDMA_AEQALLOC]);
-
-       return 0;
 }
 
 /**
@@ -4434,9 +4431,9 @@ static enum irdma_status_code irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev,
        ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
                                           &commit_fpm_mem, true, wait_type);
        if (!ret_code)
-               ret_code = irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
-                                                        hmc_info->hmc_obj,
-                                                        &hmc_info->sd_table.sd_cnt);
+               irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
+                                             hmc_info->hmc_obj,
+                                             &hmc_info->sd_table.sd_cnt);
        print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
                             8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
                             false);
index 7afb8a6..00de5ee 100644 (file)
@@ -1920,7 +1920,7 @@ enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf)
  * irdma_set_hw_rsrc - set hw memory resources.
  * @rf: RDMA PCI function
  */
-static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf)
+static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
 {
        rf->allocated_qps = (void *)(rf->mem_rsrc +
                   (sizeof(struct irdma_arp_entry) * rf->arp_table_size));
@@ -1937,8 +1937,6 @@ static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf)
        spin_lock_init(&rf->arp_lock);
        spin_lock_init(&rf->qptable_lock);
        spin_lock_init(&rf->qh_list_lock);
-
-       return 0;
 }
 
 /**
@@ -2000,9 +1998,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
 
        rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
 
-       ret = irdma_set_hw_rsrc(rf);
-       if (ret)
-               goto set_hw_rsrc_fail;
+       irdma_set_hw_rsrc(rf);
 
        set_bit(0, rf->allocated_mrs);
        set_bit(0, rf->allocated_qps);
@@ -2025,9 +2021,6 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
 
        return 0;
 
-set_hw_rsrc_fail:
-       kfree(rf->mem_rsrc);
-       rf->mem_rsrc = NULL;
 mem_rsrc_kzalloc_fail:
        kfree(rf->allocated_ws_nodes);
        rf->allocated_ws_nodes = NULL;
index ea59432..51a4135 100644 (file)
@@ -215,10 +215,10 @@ static void irdma_remove(struct auxiliary_device *aux_dev)
        pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn));
 }
 
-static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf)
+static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf,
+                                  struct ice_vsi *vsi)
 {
        struct irdma_pci_f *rf = iwdev->rf;
-       struct ice_vsi *vsi = ice_get_main_vsi(pf);
 
        rf->cdev = pf;
        rf->gen_ops.register_qset = irdma_lan_register_qset;
@@ -253,12 +253,15 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
                                                            struct iidc_auxiliary_dev,
                                                            adev);
        struct ice_pf *pf = iidc_adev->pf;
+       struct ice_vsi *vsi = ice_get_main_vsi(pf);
        struct iidc_qos_params qos_info = {};
        struct irdma_device *iwdev;
        struct irdma_pci_f *rf;
        struct irdma_l2params l2params = {};
        int err;
 
+       if (!vsi)
+               return -EIO;
        iwdev = ib_alloc_device(irdma_device, ibdev);
        if (!iwdev)
                return -ENOMEM;
@@ -268,7 +271,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
                return -ENOMEM;
        }
 
-       irdma_fill_device_info(iwdev, pf);
+       irdma_fill_device_info(iwdev, pf, vsi);
        rf = iwdev->rf;
 
        if (irdma_ctrl_init_hw(rf)) {
index 7387b83..874bc25 100644 (file)
@@ -1222,8 +1222,7 @@ enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
                                         struct irdma_aeq_init_info *info);
 enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
                                              struct irdma_aeqe_info *info);
-enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev,
-                                                  u32 count);
+void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
 
 void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
                      int abi_ver);
index a6d52c2..5fb92de 100644 (file)
@@ -931,7 +931,7 @@ enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
 enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
                                             struct irdma_post_rq_info *info)
 {
-       u32 total_size = 0, wqe_idx, i, byte_off;
+       u32 wqe_idx, i, byte_off;
        u32 addl_frag_cnt;
        __le64 *wqe;
        u64 hdr;
@@ -939,9 +939,6 @@ enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
        if (qp->max_rq_frag_cnt < info->num_sges)
                return IRDMA_ERR_INVALID_FRAG_COUNT;
 
-       for (i = 0; i < info->num_sges; i++)
-               total_size += info->sg_list[i].len;
-
        wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
        if (!wqe)
                return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
index 9712f69..717147e 100644 (file)
@@ -557,7 +557,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
  * @iwqp: qp ptr
  * @init_info: initialize info to return
  */
-static int irdma_setup_virt_qp(struct irdma_device *iwdev,
+static void irdma_setup_virt_qp(struct irdma_device *iwdev,
                               struct irdma_qp *iwqp,
                               struct irdma_qp_init_info *init_info)
 {
@@ -574,8 +574,6 @@ static int irdma_setup_virt_qp(struct irdma_device *iwdev,
                init_info->sq_pa = qpmr->sq_pbl.addr;
                init_info->rq_pa = qpmr->rq_pbl.addr;
        }
-
-       return 0;
 }
 
 /**
@@ -914,7 +912,7 @@ static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
                        }
                }
                init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
-               err_code = irdma_setup_virt_qp(iwdev, iwqp, &init_info);
+               irdma_setup_virt_qp(iwdev, iwqp, &init_info);
        } else {
                init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
                err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
index 6aabcb4..be4bcb4 100644 (file)
@@ -113,13 +113,14 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
        int                     num_buf;
        void                    *vaddr;
        int err;
+       int i;
 
        umem = ib_umem_get(pd->ibpd.device, start, length, access);
        if (IS_ERR(umem)) {
-               pr_warn("err %d from rxe_umem_get\n",
-                       (int)PTR_ERR(umem));
+               pr_warn("%s: Unable to pin memory region err = %d\n",
+                       __func__, (int)PTR_ERR(umem));
                err = PTR_ERR(umem);
-               goto err1;
+               goto err_out;
        }
 
        mr->umem = umem;
@@ -129,9 +130,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
        err = rxe_mr_alloc(mr, num_buf);
        if (err) {
-               pr_warn("err %d from rxe_mr_alloc\n", err);
-               ib_umem_release(umem);
-               goto err1;
+               pr_warn("%s: Unable to allocate memory for map\n",
+                               __func__);
+               goto err_release_umem;
        }
 
        mr->page_shift = PAGE_SHIFT;
@@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
                        vaddr = page_address(sg_page_iter_page(&sg_iter));
                        if (!vaddr) {
-                               pr_warn("null vaddr\n");
-                               ib_umem_release(umem);
+                               pr_warn("%s: Unable to get virtual address\n",
+                                               __func__);
                                err = -ENOMEM;
-                               goto err1;
+                               goto err_cleanup_map;
                        }
 
                        buf->addr = (uintptr_t)vaddr;
@@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
        return 0;
 
-err1:
+err_cleanup_map:
+       for (i = 0; i < mr->num_map; i++)
+               kfree(mr->map[i]);
+       kfree(mr->map);
+err_release_umem:
+       ib_umem_release(umem);
+err_out:
        return err;
 }
 
index dd20b01..235f9bd 100644 (file)
@@ -379,6 +379,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
        switch (idx) {
        case CMDQ_ERR_CERROR_ABT_IDX:
                dev_err(smmu->dev, "retrying command fetch\n");
+               return;
        case CMDQ_ERR_CERROR_NONE_IDX:
                return;
        case CMDQ_ERR_CERROR_ATC_INV_IDX:
index 25ed444..021cf8f 100644 (file)
@@ -849,12 +849,10 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
        ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev);
        if (ret) {
                dev_err(dev, "Failed to register iommu\n");
-               goto err_sysfs_remove;
+               return ret;
        }
 
-       ret = bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
-       if (ret)
-               goto err_unregister_device;
+       bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
 
        if (qcom_iommu->local_base) {
                pm_runtime_get_sync(dev);
@@ -863,13 +861,6 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
        }
 
        return 0;
-
-err_unregister_device:
-       iommu_device_unregister(&qcom_iommu->iommu);
-
-err_sysfs_remove:
-       iommu_device_sysfs_remove(&qcom_iommu->iommu);
-       return ret;
 }
 
 static int qcom_iommu_device_remove(struct platform_device *pdev)
index a6a07d9..dd22fc7 100644 (file)
@@ -2429,10 +2429,11 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
        return 0;
 }
 
-static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
+static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 devfn)
 {
-       unsigned long flags;
+       struct intel_iommu *iommu = info->iommu;
        struct context_entry *context;
+       unsigned long flags;
        u16 did_old;
 
        if (!iommu)
@@ -2444,7 +2445,16 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn
                spin_unlock_irqrestore(&iommu->lock, flags);
                return;
        }
-       did_old = context_domain_id(context);
+
+       if (sm_supported(iommu)) {
+               if (hw_pass_through && domain_type_is_si(info->domain))
+                       did_old = FLPT_DEFAULT_DID;
+               else
+                       did_old = info->domain->iommu_did[iommu->seq_id];
+       } else {
+               did_old = context_domain_id(context);
+       }
+
        context_clear_entry(context);
        __iommu_flush_cache(iommu, context, sizeof(*context));
        spin_unlock_irqrestore(&iommu->lock, flags);
@@ -2462,6 +2472,8 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn
                                 0,
                                 0,
                                 DMA_TLB_DSI_FLUSH);
+
+       __iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH);
 }
 
 static inline void unlink_domain_info(struct device_domain_info *info)
@@ -4425,9 +4437,9 @@ out_free_dmar:
 
 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
 {
-       struct intel_iommu *iommu = opaque;
+       struct device_domain_info *info = opaque;
 
-       domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
+       domain_context_clear_one(info, PCI_BUS_NUM(alias), alias & 0xff);
        return 0;
 }
 
@@ -4437,12 +4449,13 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op
  * devices, unbinding the driver from any one of them will possibly leave
  * the others unable to operate.
  */
-static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
+static void domain_context_clear(struct device_domain_info *info)
 {
-       if (!iommu || !dev || !dev_is_pci(dev))
+       if (!info->iommu || !info->dev || !dev_is_pci(info->dev))
                return;
 
-       pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
+       pci_for_each_dma_alias(to_pci_dev(info->dev),
+                              &domain_context_clear_one_cb, info);
 }
 
 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
@@ -4459,14 +4472,13 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
        iommu = info->iommu;
        domain = info->domain;
 
-       if (info->dev) {
+       if (info->dev && !dev_is_real_dma_subdevice(info->dev)) {
                if (dev_is_pci(info->dev) && sm_supported(iommu))
                        intel_pasid_tear_down_entry(iommu, info->dev,
                                        PASID_RID2PASID, false);
 
                iommu_disable_dev_iotlb(info);
-               if (!dev_is_real_dma_subdevice(info->dev))
-                       domain_context_clear(iommu, info->dev);
+               domain_context_clear(info);
                intel_pasid_free_table(info->dev);
        }
 
index 94b9d8e..9febfb7 100644 (file)
@@ -544,12 +544,14 @@ static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma)
 }
 
 #define DT_HI_MASK GENMASK_ULL(39, 32)
+#define DTE_BASE_HI_MASK GENMASK(11, 4)
 #define DT_SHIFT   28
 
 static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr)
 {
-       return (phys_addr_t)(addr & RK_DTE_PT_ADDRESS_MASK) |
-              ((addr & DT_HI_MASK) << DT_SHIFT);
+       u64 addr64 = addr;
+       return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) |
+              ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT);
 }
 
 static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma)
index 4657e99..59a36f9 100644 (file)
@@ -173,10 +173,8 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
        int ret;
 
        for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
-               if (!adev->status.enabled) {
-                       acpi_dev_put(adev);
+               if (!adev->status.enabled)
                        continue;
-               }
 
                if (bridge->n_sensors >= CIO2_NUM_PORTS) {
                        acpi_dev_put(adev);
@@ -185,7 +183,6 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
                }
 
                sensor = &bridge->sensors[bridge->n_sensors];
-               sensor->adev = adev;
                strscpy(sensor->name, cfg->hid, sizeof(sensor->name));
 
                ret = cio2_bridge_read_acpi_buffer(adev, "SSDB",
@@ -215,6 +212,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
                        goto err_free_swnodes;
                }
 
+               sensor->adev = acpi_dev_get(adev);
                adev->fwnode.secondary = fwnode;
 
                dev_info(&cio2->dev, "Found supported sensor %s\n",
index 07f342d..7481f55 100644 (file)
@@ -385,7 +385,7 @@ static int ngene_command_config_free_buf(struct ngene *dev, u8 *config)
 
        com.cmd.hdr.Opcode = CMD_CONFIGURE_FREE_BUFFER;
        com.cmd.hdr.Length = 6;
-       memcpy(&com.cmd.ConfigureBuffers.config, config, 6);
+       memcpy(&com.cmd.ConfigureFreeBuffers.config, config, 6);
        com.in_len = 6;
        com.out_len = 0;
 
index 84f04e0..3d296f1 100644 (file)
@@ -407,12 +407,14 @@ enum _BUFFER_CONFIGS {
 
 struct FW_CONFIGURE_FREE_BUFFERS {
        struct FW_HEADER hdr;
-       u8   UVI1_BufferLength;
-       u8   UVI2_BufferLength;
-       u8   TVO_BufferLength;
-       u8   AUD1_BufferLength;
-       u8   AUD2_BufferLength;
-       u8   TVA_BufferLength;
+       struct {
+               u8   UVI1_BufferLength;
+               u8   UVI2_BufferLength;
+               u8   TVO_BufferLength;
+               u8   AUD1_BufferLength;
+               u8   AUD2_BufferLength;
+               u8   TVA_BufferLength;
+       } __packed config;
 } __attribute__ ((__packed__));
 
 struct FW_CONFIGURE_UART {
index f80c2ea..be0858b 100644 (file)
@@ -9,6 +9,7 @@
  * Copyright (C) 2009 Texas Instruments
  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
  */
+#include <linux/cpu_pm.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -232,7 +233,10 @@ struct gpmc_device {
        int irq;
        struct irq_chip irq_chip;
        struct gpio_chip gpio_chip;
+       struct notifier_block nb;
+       struct omap3_gpmc_regs context;
        int nirqs;
+       unsigned int is_suspended:1;
 };
 
 static struct irq_domain *gpmc_irq_domain;
@@ -2384,6 +2388,106 @@ static int gpmc_gpio_init(struct gpmc_device *gpmc)
        return 0;
 }
 
+static void omap3_gpmc_save_context(struct gpmc_device *gpmc)
+{
+       struct omap3_gpmc_regs *gpmc_context;
+       int i;
+
+       if (!gpmc || !gpmc_base)
+               return;
+
+       gpmc_context = &gpmc->context;
+
+       gpmc_context->sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
+       gpmc_context->irqenable = gpmc_read_reg(GPMC_IRQENABLE);
+       gpmc_context->timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
+       gpmc_context->config = gpmc_read_reg(GPMC_CONFIG);
+       gpmc_context->prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
+       gpmc_context->prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2);
+       gpmc_context->prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL);
+       for (i = 0; i < gpmc_cs_num; i++) {
+               gpmc_context->cs_context[i].is_valid = gpmc_cs_mem_enabled(i);
+               if (gpmc_context->cs_context[i].is_valid) {
+                       gpmc_context->cs_context[i].config1 =
+                               gpmc_cs_read_reg(i, GPMC_CS_CONFIG1);
+                       gpmc_context->cs_context[i].config2 =
+                               gpmc_cs_read_reg(i, GPMC_CS_CONFIG2);
+                       gpmc_context->cs_context[i].config3 =
+                               gpmc_cs_read_reg(i, GPMC_CS_CONFIG3);
+                       gpmc_context->cs_context[i].config4 =
+                               gpmc_cs_read_reg(i, GPMC_CS_CONFIG4);
+                       gpmc_context->cs_context[i].config5 =
+                               gpmc_cs_read_reg(i, GPMC_CS_CONFIG5);
+                       gpmc_context->cs_context[i].config6 =
+                               gpmc_cs_read_reg(i, GPMC_CS_CONFIG6);
+                       gpmc_context->cs_context[i].config7 =
+                               gpmc_cs_read_reg(i, GPMC_CS_CONFIG7);
+               }
+       }
+}
+
+static void omap3_gpmc_restore_context(struct gpmc_device *gpmc)
+{
+       struct omap3_gpmc_regs *gpmc_context;
+       int i;
+
+       if (!gpmc || !gpmc_base)
+               return;
+
+       gpmc_context = &gpmc->context;
+
+       gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context->sysconfig);
+       gpmc_write_reg(GPMC_IRQENABLE, gpmc_context->irqenable);
+       gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context->timeout_ctrl);
+       gpmc_write_reg(GPMC_CONFIG, gpmc_context->config);
+       gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context->prefetch_config1);
+       gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context->prefetch_config2);
+       gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context->prefetch_control);
+       for (i = 0; i < gpmc_cs_num; i++) {
+               if (gpmc_context->cs_context[i].is_valid) {
+                       gpmc_cs_write_reg(i, GPMC_CS_CONFIG1,
+                                         gpmc_context->cs_context[i].config1);
+                       gpmc_cs_write_reg(i, GPMC_CS_CONFIG2,
+                                         gpmc_context->cs_context[i].config2);
+                       gpmc_cs_write_reg(i, GPMC_CS_CONFIG3,
+                                         gpmc_context->cs_context[i].config3);
+                       gpmc_cs_write_reg(i, GPMC_CS_CONFIG4,
+                                         gpmc_context->cs_context[i].config4);
+                       gpmc_cs_write_reg(i, GPMC_CS_CONFIG5,
+                                         gpmc_context->cs_context[i].config5);
+                       gpmc_cs_write_reg(i, GPMC_CS_CONFIG6,
+                                         gpmc_context->cs_context[i].config6);
+                       gpmc_cs_write_reg(i, GPMC_CS_CONFIG7,
+                                         gpmc_context->cs_context[i].config7);
+               } else {
+                       gpmc_cs_write_reg(i, GPMC_CS_CONFIG7, 0);
+               }
+       }
+}
+
+static int omap_gpmc_context_notifier(struct notifier_block *nb,
+                                     unsigned long cmd, void *v)
+{
+       struct gpmc_device *gpmc;
+
+       gpmc = container_of(nb, struct gpmc_device, nb);
+       if (gpmc->is_suspended || pm_runtime_suspended(gpmc->dev))
+               return NOTIFY_OK;
+
+       switch (cmd) {
+       case CPU_CLUSTER_PM_ENTER:
+               omap3_gpmc_save_context(gpmc);
+               break;
+       case CPU_CLUSTER_PM_ENTER_FAILED:       /* No need to restore context */
+               break;
+       case CPU_CLUSTER_PM_EXIT:
+               omap3_gpmc_restore_context(gpmc);
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
 static int gpmc_probe(struct platform_device *pdev)
 {
        int rc;
@@ -2472,6 +2576,9 @@ static int gpmc_probe(struct platform_device *pdev)
 
        gpmc_probe_dt_children(pdev);
 
+       gpmc->nb.notifier_call = omap_gpmc_context_notifier;
+       cpu_pm_register_notifier(&gpmc->nb);
+
        return 0;
 
 gpio_init_failed:
@@ -2486,6 +2593,7 @@ static int gpmc_remove(struct platform_device *pdev)
 {
        struct gpmc_device *gpmc = platform_get_drvdata(pdev);
 
+       cpu_pm_unregister_notifier(&gpmc->nb);
        gpmc_free_irq(gpmc);
        gpmc_mem_exit();
        pm_runtime_put_sync(&pdev->dev);
@@ -2497,15 +2605,23 @@ static int gpmc_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int gpmc_suspend(struct device *dev)
 {
-       omap3_gpmc_save_context();
+       struct gpmc_device *gpmc = dev_get_drvdata(dev);
+
+       omap3_gpmc_save_context(gpmc);
        pm_runtime_put_sync(dev);
+       gpmc->is_suspended = 1;
+
        return 0;
 }
 
 static int gpmc_resume(struct device *dev)
 {
+       struct gpmc_device *gpmc = dev_get_drvdata(dev);
+
        pm_runtime_get_sync(dev);
-       omap3_gpmc_restore_context();
+       omap3_gpmc_restore_context(gpmc);
+       gpmc->is_suspended = 0;
+
        return 0;
 }
 #endif
@@ -2527,74 +2643,3 @@ static __init int gpmc_init(void)
        return platform_driver_register(&gpmc_driver);
 }
 postcore_initcall(gpmc_init);
-
-static struct omap3_gpmc_regs gpmc_context;
-
-void omap3_gpmc_save_context(void)
-{
-       int i;
-
-       if (!gpmc_base)
-               return;
-
-       gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
-       gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
-       gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
-       gpmc_context.config = gpmc_read_reg(GPMC_CONFIG);
-       gpmc_context.prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
-       gpmc_context.prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2);
-       gpmc_context.prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL);
-       for (i = 0; i < gpmc_cs_num; i++) {
-               gpmc_context.cs_context[i].is_valid = gpmc_cs_mem_enabled(i);
-               if (gpmc_context.cs_context[i].is_valid) {
-                       gpmc_context.cs_context[i].config1 =
-                               gpmc_cs_read_reg(i, GPMC_CS_CONFIG1);
-                       gpmc_context.cs_context[i].config2 =
-                               gpmc_cs_read_reg(i, GPMC_CS_CONFIG2);
-                       gpmc_context.cs_context[i].config3 =
-                               gpmc_cs_read_reg(i, GPMC_CS_CONFIG3);
-                       gpmc_context.cs_context[i].config4 =
-                               gpmc_cs_read_reg(i, GPMC_CS_CONFIG4);
-                       gpmc_context.cs_context[i].config5 =
-                               gpmc_cs_read_reg(i, GPMC_CS_CONFIG5);
-                       gpmc_context.cs_context[i].config6 =
-                               gpmc_cs_read_reg(i, GPMC_CS_CONFIG6);
-                       gpmc_context.cs_context[i].config7 =
-                               gpmc_cs_read_reg(i, GPMC_CS_CONFIG7);
-               }
-       }
-}
-
-void omap3_gpmc_restore_context(void)
-{
-       int i;
-
-       if (!gpmc_base)
-               return;
-
-       gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
-       gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
-       gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
-       gpmc_write_reg(GPMC_CONFIG, gpmc_context.config);
-       gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context.prefetch_config1);
-       gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context.prefetch_config2);
-       gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context.prefetch_control);
-       for (i = 0; i < gpmc_cs_num; i++) {
-               if (gpmc_context.cs_context[i].is_valid) {
-                       gpmc_cs_write_reg(i, GPMC_CS_CONFIG1,
-                               gpmc_context.cs_context[i].config1);
-                       gpmc_cs_write_reg(i, GPMC_CS_CONFIG2,
-                               gpmc_context.cs_context[i].config2);
-                       gpmc_cs_write_reg(i, GPMC_CS_CONFIG3,
-                               gpmc_context.cs_context[i].config3);
-                       gpmc_cs_write_reg(i, GPMC_CS_CONFIG4,
-                               gpmc_context.cs_context[i].config4);
-                       gpmc_cs_write_reg(i, GPMC_CS_CONFIG5,
-                               gpmc_context.cs_context[i].config5);
-                       gpmc_cs_write_reg(i, GPMC_CS_CONFIG6,
-                               gpmc_context.cs_context[i].config6);
-                       gpmc_cs_write_reg(i, GPMC_CS_CONFIG7,
-                               gpmc_context.cs_context[i].config7);
-               }
-       }
-}
index e65eac5..3d15388 100644 (file)
@@ -71,6 +71,7 @@ static int tegra186_mc_resume(struct tegra_mc *mc)
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_IOMMU_API)
 static void tegra186_mc_client_sid_override(struct tegra_mc *mc,
                                            const struct tegra_mc_client *client,
                                            unsigned int sid)
@@ -108,6 +109,7 @@ static void tegra186_mc_client_sid_override(struct tegra_mc *mc,
                writel(sid, mc->regs + client->regs.sid.override);
        }
 }
+#endif
 
 static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev)
 {
index 7a6f01a..305ffad 100644 (file)
@@ -714,23 +714,20 @@ static int at24_probe(struct i2c_client *client)
        }
 
        /*
-        * If the 'label' property is not present for the AT24 EEPROM,
-        * then nvmem_config.id is initialised to NVMEM_DEVID_AUTO,
-        * and this will append the 'devid' to the name of the NVMEM
-        * device. This is purely legacy and the AT24 driver has always
-        * defaulted to this. However, if the 'label' property is
-        * present then this means that the name is specified by the
-        * firmware and this name should be used verbatim and so it is
-        * not necessary to append the 'devid'.
+        * We initialize nvmem_config.id to NVMEM_DEVID_AUTO even if the
+        * label property is set as some platform can have multiple eeproms
+        * with same label and we can not register each of those with same
+        * label. Failing to register those eeproms trigger cascade failure
+        * on such platform.
         */
+       nvmem_config.id = NVMEM_DEVID_AUTO;
+
        if (device_property_present(dev, "label")) {
-               nvmem_config.id = NVMEM_DEVID_NONE;
                err = device_property_read_string(dev, "label",
                                                  &nvmem_config.name);
                if (err)
                        return err;
        } else {
-               nvmem_config.id = NVMEM_DEVID_AUTO;
                nvmem_config.name = dev_name(dev);
        }
 
index 9890a15..ce8aed5 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/errno.h>
 #include <linux/hdreg.h>
 #include <linux/kdev_t.h>
+#include <linux/kref.h>
 #include <linux/blkdev.h>
 #include <linux/cdev.h>
 #include <linux/mutex.h>
@@ -111,7 +112,7 @@ struct mmc_blk_data {
 #define MMC_BLK_CMD23  (1 << 0)        /* Can do SET_BLOCK_COUNT for multiblock */
 #define MMC_BLK_REL_WR (1 << 1)        /* MMC Reliable write support */
 
-       unsigned int    usage;
+       struct kref     kref;
        unsigned int    read_only;
        unsigned int    part_type;
        unsigned int    reset_done;
@@ -181,10 +182,8 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
 
        mutex_lock(&open_lock);
        md = disk->private_data;
-       if (md && md->usage == 0)
+       if (md && !kref_get_unless_zero(&md->kref))
                md = NULL;
-       if (md)
-               md->usage++;
        mutex_unlock(&open_lock);
 
        return md;
@@ -196,18 +195,25 @@ static inline int mmc_get_devidx(struct gendisk *disk)
        return devidx;
 }
 
-static void mmc_blk_put(struct mmc_blk_data *md)
+static void mmc_blk_kref_release(struct kref *ref)
 {
-       mutex_lock(&open_lock);
-       md->usage--;
-       if (md->usage == 0) {
-               int devidx = mmc_get_devidx(md->disk);
+       struct mmc_blk_data *md = container_of(ref, struct mmc_blk_data, kref);
+       int devidx;
 
-               ida_simple_remove(&mmc_blk_ida, devidx);
-               put_disk(md->disk);
-               kfree(md);
-       }
+       devidx = mmc_get_devidx(md->disk);
+       ida_simple_remove(&mmc_blk_ida, devidx);
+
+       mutex_lock(&open_lock);
+       md->disk->private_data = NULL;
        mutex_unlock(&open_lock);
+
+       put_disk(md->disk);
+       kfree(md);
+}
+
+static void mmc_blk_put(struct mmc_blk_data *md)
+{
+       kref_put(&md->kref, mmc_blk_kref_release);
 }
 
 static ssize_t power_ro_lock_show(struct device *dev,
@@ -2327,7 +2333,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
 
        INIT_LIST_HEAD(&md->part);
        INIT_LIST_HEAD(&md->rpmbs);
-       md->usage = 1;
+       kref_init(&md->kref);
+
        md->queue.blkdata = md;
 
        md->disk->major = MMC_BLOCK_MAJOR;
index eda4a18..0475d96 100644 (file)
@@ -75,7 +75,8 @@ static void mmc_host_classdev_release(struct device *dev)
 {
        struct mmc_host *host = cls_dev_to_mmc_host(dev);
        wakeup_source_unregister(host->ws);
-       ida_simple_remove(&mmc_host_ida, host->index);
+       if (of_alias_get_id(host->parent->of_node, "mmc") < 0)
+               ida_simple_remove(&mmc_host_ida, host->index);
        kfree(host);
 }
 
@@ -502,7 +503,7 @@ static int mmc_first_nonreserved_index(void)
  */
 struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
 {
-       int err;
+       int index;
        struct mmc_host *host;
        int alias_id, min_idx, max_idx;
 
@@ -515,20 +516,19 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
 
        alias_id = of_alias_get_id(dev->of_node, "mmc");
        if (alias_id >= 0) {
-               min_idx = alias_id;
-               max_idx = alias_id + 1;
+               index = alias_id;
        } else {
                min_idx = mmc_first_nonreserved_index();
                max_idx = 0;
-       }
 
-       err = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL);
-       if (err < 0) {
-               kfree(host);
-               return NULL;
+               index = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL);
+               if (index < 0) {
+                       kfree(host);
+                       return NULL;
+               }
        }
 
-       host->index = err;
+       host->index = index;
 
        dev_set_name(&host->class_dev, "mmc%d", host->index);
        host->ws = wakeup_source_register(NULL, dev_name(&host->class_dev));
index 0db17bc..cb1a64a 100644 (file)
@@ -789,6 +789,8 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
                                break;
                        }
                }
+               fallthrough;
+
        case JZ4740_MMC_STATE_DONE:
                break;
        }
index 99b7986..6a6a2a2 100644 (file)
@@ -108,8 +108,8 @@ map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi
 #if BITS_PER_LONG >= 64
        case 8:
                onecmd |= (onecmd << (chip_mode * 32));
-#endif
                fallthrough;
+#endif
        case 4:
                onecmd |= (onecmd << (chip_mode * 16));
                fallthrough;
@@ -164,8 +164,8 @@ unsigned long cfi_merge_status(map_word val, struct map_info *map,
 #if BITS_PER_LONG >= 64
        case 8:
                res |= (onestat >> (chip_mode * 32));
-#endif
                fallthrough;
+#endif
        case 4:
                res |= (onestat >> (chip_mode * 16));
                fallthrough;
index 0ff7567..31730ef 100644 (file)
@@ -401,24 +401,85 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
 static int bond_ipsec_add_sa(struct xfrm_state *xs)
 {
        struct net_device *bond_dev = xs->xso.dev;
+       struct bond_ipsec *ipsec;
        struct bonding *bond;
        struct slave *slave;
+       int err;
 
        if (!bond_dev)
                return -EINVAL;
 
+       rcu_read_lock();
        bond = netdev_priv(bond_dev);
        slave = rcu_dereference(bond->curr_active_slave);
-       xs->xso.real_dev = slave->dev;
-       bond->xs = xs;
+       if (!slave) {
+               rcu_read_unlock();
+               return -ENODEV;
+       }
 
-       if (!(slave->dev->xfrmdev_ops
-             && slave->dev->xfrmdev_ops->xdo_dev_state_add)) {
+       if (!slave->dev->xfrmdev_ops ||
+           !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
+           netif_is_bond_master(slave->dev)) {
                slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n");
+               rcu_read_unlock();
                return -EINVAL;
        }
 
-       return slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
+       ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
+       if (!ipsec) {
+               rcu_read_unlock();
+               return -ENOMEM;
+       }
+       xs->xso.real_dev = slave->dev;
+
+       err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
+       if (!err) {
+               ipsec->xs = xs;
+               INIT_LIST_HEAD(&ipsec->list);
+               spin_lock_bh(&bond->ipsec_lock);
+               list_add(&ipsec->list, &bond->ipsec_list);
+               spin_unlock_bh(&bond->ipsec_lock);
+       } else {
+               kfree(ipsec);
+       }
+       rcu_read_unlock();
+       return err;
+}
+
+static void bond_ipsec_add_sa_all(struct bonding *bond)
+{
+       struct net_device *bond_dev = bond->dev;
+       struct bond_ipsec *ipsec;
+       struct slave *slave;
+
+       rcu_read_lock();
+       slave = rcu_dereference(bond->curr_active_slave);
+       if (!slave)
+               goto out;
+
+       if (!slave->dev->xfrmdev_ops ||
+           !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
+           netif_is_bond_master(slave->dev)) {
+               spin_lock_bh(&bond->ipsec_lock);
+               if (!list_empty(&bond->ipsec_list))
+                       slave_warn(bond_dev, slave->dev,
+                                  "%s: no slave xdo_dev_state_add\n",
+                                  __func__);
+               spin_unlock_bh(&bond->ipsec_lock);
+               goto out;
+       }
+
+       spin_lock_bh(&bond->ipsec_lock);
+       list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+               ipsec->xs->xso.real_dev = slave->dev;
+               if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) {
+                       slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
+                       ipsec->xs->xso.real_dev = NULL;
+               }
+       }
+       spin_unlock_bh(&bond->ipsec_lock);
+out:
+       rcu_read_unlock();
 }
 
 /**
@@ -428,27 +489,77 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs)
 static void bond_ipsec_del_sa(struct xfrm_state *xs)
 {
        struct net_device *bond_dev = xs->xso.dev;
+       struct bond_ipsec *ipsec;
        struct bonding *bond;
        struct slave *slave;
 
        if (!bond_dev)
                return;
 
+       rcu_read_lock();
        bond = netdev_priv(bond_dev);
        slave = rcu_dereference(bond->curr_active_slave);
 
        if (!slave)
-               return;
+               goto out;
 
-       xs->xso.real_dev = slave->dev;
+       if (!xs->xso.real_dev)
+               goto out;
+
+       WARN_ON(xs->xso.real_dev != slave->dev);
 
-       if (!(slave->dev->xfrmdev_ops
-             && slave->dev->xfrmdev_ops->xdo_dev_state_delete)) {
+       if (!slave->dev->xfrmdev_ops ||
+           !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
+           netif_is_bond_master(slave->dev)) {
                slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
-               return;
+               goto out;
        }
 
        slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
+out:
+       spin_lock_bh(&bond->ipsec_lock);
+       list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+               if (ipsec->xs == xs) {
+                       list_del(&ipsec->list);
+                       kfree(ipsec);
+                       break;
+               }
+       }
+       spin_unlock_bh(&bond->ipsec_lock);
+       rcu_read_unlock();
+}
+
+static void bond_ipsec_del_sa_all(struct bonding *bond)
+{
+       struct net_device *bond_dev = bond->dev;
+       struct bond_ipsec *ipsec;
+       struct slave *slave;
+
+       rcu_read_lock();
+       slave = rcu_dereference(bond->curr_active_slave);
+       if (!slave) {
+               rcu_read_unlock();
+               return;
+       }
+
+       spin_lock_bh(&bond->ipsec_lock);
+       list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+               if (!ipsec->xs->xso.real_dev)
+                       continue;
+
+               if (!slave->dev->xfrmdev_ops ||
+                   !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
+                   netif_is_bond_master(slave->dev)) {
+                       slave_warn(bond_dev, slave->dev,
+                                  "%s: no slave xdo_dev_state_delete\n",
+                                  __func__);
+               } else {
+                       slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
+               }
+               ipsec->xs->xso.real_dev = NULL;
+       }
+       spin_unlock_bh(&bond->ipsec_lock);
+       rcu_read_unlock();
 }
 
 /**
@@ -459,21 +570,37 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs)
 static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
 {
        struct net_device *bond_dev = xs->xso.dev;
-       struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *curr_active = rcu_dereference(bond->curr_active_slave);
-       struct net_device *slave_dev = curr_active->dev;
+       struct net_device *real_dev;
+       struct slave *curr_active;
+       struct bonding *bond;
+       int err;
 
-       if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
-               return true;
+       bond = netdev_priv(bond_dev);
+       rcu_read_lock();
+       curr_active = rcu_dereference(bond->curr_active_slave);
+       real_dev = curr_active->dev;
 
-       if (!(slave_dev->xfrmdev_ops
-             && slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) {
-               slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__);
-               return false;
+       if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
+               err = false;
+               goto out;
        }
 
-       xs->xso.real_dev = slave_dev;
-       return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
+       if (!xs->xso.real_dev) {
+               err = false;
+               goto out;
+       }
+
+       if (!real_dev->xfrmdev_ops ||
+           !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
+           netif_is_bond_master(real_dev)) {
+               err = false;
+               goto out;
+       }
+
+       err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
+out:
+       rcu_read_unlock();
+       return err;
 }
 
 static const struct xfrmdev_ops bond_xfrmdev_ops = {
@@ -990,8 +1117,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                return;
 
 #ifdef CONFIG_XFRM_OFFLOAD
-       if (old_active && bond->xs)
-               bond_ipsec_del_sa(bond->xs);
+       bond_ipsec_del_sa_all(bond);
 #endif /* CONFIG_XFRM_OFFLOAD */
 
        if (new_active) {
@@ -1066,10 +1192,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
        }
 
 #ifdef CONFIG_XFRM_OFFLOAD
-       if (new_active && bond->xs) {
-               xfrm_dev_state_flush(dev_net(bond->dev), bond->dev, true);
-               bond_ipsec_add_sa(bond->xs);
-       }
+       bond_ipsec_add_sa_all(bond);
 #endif /* CONFIG_XFRM_OFFLOAD */
 
        /* resend IGMP joins since active slave has changed or
@@ -3327,6 +3450,9 @@ static int bond_master_netdev_event(unsigned long event,
                return bond_event_changename(event_bond);
        case NETDEV_UNREGISTER:
                bond_remove_proc_entry(event_bond);
+#ifdef CONFIG_XFRM_OFFLOAD
+               xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true);
+#endif /* CONFIG_XFRM_OFFLOAD */
                break;
        case NETDEV_REGISTER:
                bond_create_proc_entry(event_bond);
@@ -4894,7 +5020,8 @@ void bond_setup(struct net_device *bond_dev)
 #ifdef CONFIG_XFRM_OFFLOAD
        /* set up xfrm device ops (only supported in active-backup right now) */
        bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
-       bond->xs = NULL;
+       INIT_LIST_HEAD(&bond->ipsec_list);
+       spin_lock_init(&bond->ipsec_lock);
 #endif /* CONFIG_XFRM_OFFLOAD */
 
        /* don't acquire bond device's netif_tx_lock when transmitting */
index a77124b..709660c 100644 (file)
@@ -20,15 +20,6 @@ config CAIF_TTY
          identified as N_CAIF. When this ldisc is opened from user space
          it will redirect the TTY's traffic into the CAIF stack.
 
-config CAIF_HSI
-       tristate "CAIF HSI transport driver"
-       depends on CAIF
-       default n
-       help
-         The CAIF low level driver for CAIF over HSI.
-         Be aware that if you enable this then you also need to
-         enable a low-level HSI driver.
-
 config CAIF_VIRTIO
        tristate "CAIF virtio transport driver"
        depends on CAIF && HAS_DMA
index b1918c8..97f664f 100644 (file)
@@ -4,8 +4,5 @@ ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG
 # Serial interface
 obj-$(CONFIG_CAIF_TTY) += caif_serial.o
 
-# HSI interface
-obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
-
 # Virtio interface
 obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
deleted file mode 100644 (file)
index 3d63b15..0000000
+++ /dev/null
@@ -1,1454 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Author:  Daniel Martensson
- *         Dmitry.Tarnyagin  / dmitry.tarnyagin@lockless.no
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/netdevice.h>
-#include <linux/string.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/if_arp.h>
-#include <linux/timer.h>
-#include <net/rtnetlink.h>
-#include <linux/pkt_sched.h>
-#include <net/caif/caif_layer.h>
-#include <net/caif/caif_hsi.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Daniel Martensson");
-MODULE_DESCRIPTION("CAIF HSI driver");
-
-/* Returns the number of padding bytes for alignment. */
-#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
-                               (((pow)-((x)&((pow)-1)))))
-
-static const struct cfhsi_config  hsi_default_config = {
-
-       /* Inactivity timeout on HSI, ms */
-       .inactivity_timeout = HZ,
-
-       /* Aggregation timeout (ms) of zero means no aggregation is done*/
-       .aggregation_timeout = 1,
-
-       /*
-        * HSI link layer flow-control thresholds.
-        * Threshold values for the HSI packet queue. Flow-control will be
-        * asserted when the number of packets exceeds q_high_mark. It will
-        * not be de-asserted before the number of packets drops below
-        * q_low_mark.
-        * Warning: A high threshold value might increase throughput but it
-        * will at the same time prevent channel prioritization and increase
-        * the risk of flooding the modem. The high threshold should be above
-        * the low.
-        */
-       .q_high_mark = 100,
-       .q_low_mark = 50,
-
-       /*
-        * HSI padding options.
-        * Warning: must be a base of 2 (& operation used) and can not be zero !
-        */
-       .head_align = 4,
-       .tail_align = 4,
-};
-
-#define ON 1
-#define OFF 0
-
-static LIST_HEAD(cfhsi_list);
-
-static void cfhsi_inactivity_tout(struct timer_list *t)
-{
-       struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer);
-
-       netdev_dbg(cfhsi->ndev, "%s.\n",
-               __func__);
-
-       /* Schedule power down work queue. */
-       if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
-               queue_work(cfhsi->wq, &cfhsi->wake_down_work);
-}
-
-static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
-                                          const struct sk_buff *skb,
-                                          int direction)
-{
-       struct caif_payload_info *info;
-       int hpad, tpad, len;
-
-       info = (struct caif_payload_info *)&skb->cb;
-       hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
-       tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
-       len = skb->len + hpad + tpad;
-
-       if (direction > 0)
-               cfhsi->aggregation_len += len;
-       else if (direction < 0)
-               cfhsi->aggregation_len -= len;
-}
-
-static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
-{
-       int i;
-
-       if (cfhsi->cfg.aggregation_timeout == 0)
-               return true;
-
-       for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
-               if (cfhsi->qhead[i].qlen)
-                       return true;
-       }
-
-       /* TODO: Use aggregation_len instead */
-       if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
-               return true;
-
-       return false;
-}
-
-static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
-{
-       struct sk_buff *skb;
-       int i;
-
-       for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
-               skb = skb_dequeue(&cfhsi->qhead[i]);
-               if (skb)
-                       break;
-       }
-
-       return skb;
-}
-
-static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
-{
-       int i, len = 0;
-       for (i = 0; i < CFHSI_PRIO_LAST; ++i)
-               len += skb_queue_len(&cfhsi->qhead[i]);
-       return len;
-}
-
-static void cfhsi_abort_tx(struct cfhsi *cfhsi)
-{
-       struct sk_buff *skb;
-
-       for (;;) {
-               spin_lock_bh(&cfhsi->lock);
-               skb = cfhsi_dequeue(cfhsi);
-               if (!skb)
-                       break;
-
-               cfhsi->ndev->stats.tx_errors++;
-               cfhsi->ndev->stats.tx_dropped++;
-               cfhsi_update_aggregation_stats(cfhsi, skb, -1);
-               spin_unlock_bh(&cfhsi->lock);
-               kfree_skb(skb);
-       }
-       cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
-       if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
-               mod_timer(&cfhsi->inactivity_timer,
-                       jiffies + cfhsi->cfg.inactivity_timeout);
-       spin_unlock_bh(&cfhsi->lock);
-}
-
-static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
-{
-       char buffer[32]; /* Any reasonable value */
-       size_t fifo_occupancy;
-       int ret;
-
-       netdev_dbg(cfhsi->ndev, "%s.\n",
-               __func__);
-
-       do {
-               ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
-                               &fifo_occupancy);
-               if (ret) {
-                       netdev_warn(cfhsi->ndev,
-                               "%s: can't get FIFO occupancy: %d.\n",
-                               __func__, ret);
-                       break;
-               } else if (!fifo_occupancy)
-                       /* No more data, exitting normally */
-                       break;
-
-               fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
-               set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
-               ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
-                               cfhsi->ops);
-               if (ret) {
-                       clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
-                       netdev_warn(cfhsi->ndev,
-                               "%s: can't read data: %d.\n",
-                               __func__, ret);
-                       break;
-               }
-
-               ret = 5 * HZ;
-               ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
-                        !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
-
-               if (ret < 0) {
-                       netdev_warn(cfhsi->ndev,
-                               "%s: can't wait for flush complete: %d.\n",
-                               __func__, ret);
-                       break;
-               } else if (!ret) {
-                       ret = -ETIMEDOUT;
-                       netdev_warn(cfhsi->ndev,
-                               "%s: timeout waiting for flush complete.\n",
-                               __func__);
-                       break;
-               }
-       } while (1);
-
-       return ret;
-}
-
-static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
-{
-       int nfrms = 0;
-       int pld_len = 0;
-       struct sk_buff *skb;
-       u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
-
-       skb = cfhsi_dequeue(cfhsi);
-       if (!skb)
-               return 0;
-
-       /* Clear offset. */
-       desc->offset = 0;
-
-       /* Check if we can embed a CAIF frame. */
-       if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
-               struct caif_payload_info *info;
-               int hpad;
-               int tpad;
-
-               /* Calculate needed head alignment and tail alignment. */
-               info = (struct caif_payload_info *)&skb->cb;
-
-               hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
-               tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
-
-               /* Check if frame still fits with added alignment. */
-               if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
-                       u8 *pemb = desc->emb_frm;
-                       desc->offset = CFHSI_DESC_SHORT_SZ;
-                       *pemb = (u8)(hpad - 1);
-                       pemb += hpad;
-
-                       /* Update network statistics. */
-                       spin_lock_bh(&cfhsi->lock);
-                       cfhsi->ndev->stats.tx_packets++;
-                       cfhsi->ndev->stats.tx_bytes += skb->len;
-                       cfhsi_update_aggregation_stats(cfhsi, skb, -1);
-                       spin_unlock_bh(&cfhsi->lock);
-
-                       /* Copy in embedded CAIF frame. */
-                       skb_copy_bits(skb, 0, pemb, skb->len);
-
-                       /* Consume the SKB */
-                       consume_skb(skb);
-                       skb = NULL;
-               }
-       }
-
-       /* Create payload CAIF frames. */
-       while (nfrms < CFHSI_MAX_PKTS) {
-               struct caif_payload_info *info;
-               int hpad;
-               int tpad;
-
-               if (!skb)
-                       skb = cfhsi_dequeue(cfhsi);
-
-               if (!skb)
-                       break;
-
-               /* Calculate needed head alignment and tail alignment. */
-               info = (struct caif_payload_info *)&skb->cb;
-
-               hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
-               tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
-
-               /* Fill in CAIF frame length in descriptor. */
-               desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
-
-               /* Fill head padding information. */
-               *pfrm = (u8)(hpad - 1);
-               pfrm += hpad;
-
-               /* Update network statistics. */
-               spin_lock_bh(&cfhsi->lock);
-               cfhsi->ndev->stats.tx_packets++;
-               cfhsi->ndev->stats.tx_bytes += skb->len;
-               cfhsi_update_aggregation_stats(cfhsi, skb, -1);
-               spin_unlock_bh(&cfhsi->lock);
-
-               /* Copy in CAIF frame. */
-               skb_copy_bits(skb, 0, pfrm, skb->len);
-
-               /* Update payload length. */
-               pld_len += desc->cffrm_len[nfrms];
-
-               /* Update frame pointer. */
-               pfrm += skb->len + tpad;
-
-               /* Consume the SKB */
-               consume_skb(skb);
-               skb = NULL;
-
-               /* Update number of frames. */
-               nfrms++;
-       }
-
-       /* Unused length fields should be zero-filled (according to SPEC). */
-       while (nfrms < CFHSI_MAX_PKTS) {
-               desc->cffrm_len[nfrms] = 0x0000;
-               nfrms++;
-       }
-
-       /* Check if we can piggy-back another descriptor. */
-       if (cfhsi_can_send_aggregate(cfhsi))
-               desc->header |= CFHSI_PIGGY_DESC;
-       else
-               desc->header &= ~CFHSI_PIGGY_DESC;
-
-       return CFHSI_DESC_SZ + pld_len;
-}
-
-static void cfhsi_start_tx(struct cfhsi *cfhsi)
-{
-       struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
-       int len, res;
-
-       netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
-
-       if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
-               return;
-
-       do {
-               /* Create HSI frame. */
-               len = cfhsi_tx_frm(desc, cfhsi);
-               if (!len) {
-                       spin_lock_bh(&cfhsi->lock);
-                       if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
-                               spin_unlock_bh(&cfhsi->lock);
-                               res = -EAGAIN;
-                               continue;
-                       }
-                       cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
-                       /* Start inactivity timer. */
-                       mod_timer(&cfhsi->inactivity_timer,
-                               jiffies + cfhsi->cfg.inactivity_timeout);
-                       spin_unlock_bh(&cfhsi->lock);
-                       break;
-               }
-
-               /* Set up new transfer. */
-               res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
-               if (WARN_ON(res < 0))
-                       netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
-                               __func__, res);
-       } while (res < 0);
-}
-
-static void cfhsi_tx_done(struct cfhsi *cfhsi)
-{
-       netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
-
-       if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
-               return;
-
-       /*
-        * Send flow on if flow off has been previously signalled
-        * and number of packets is below low water mark.
-        */
-       spin_lock_bh(&cfhsi->lock);
-       if (cfhsi->flow_off_sent &&
-                       cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
-                       cfhsi->cfdev.flowctrl) {
-
-               cfhsi->flow_off_sent = 0;
-               cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
-       }
-
-       if (cfhsi_can_send_aggregate(cfhsi)) {
-               spin_unlock_bh(&cfhsi->lock);
-               cfhsi_start_tx(cfhsi);
-       } else {
-               mod_timer(&cfhsi->aggregation_timer,
-                       jiffies + cfhsi->cfg.aggregation_timeout);
-               spin_unlock_bh(&cfhsi->lock);
-       }
-
-       return;
-}
-
-static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
-{
-       struct cfhsi *cfhsi;
-
-       cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
-       netdev_dbg(cfhsi->ndev, "%s.\n",
-               __func__);
-
-       if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
-               return;
-       cfhsi_tx_done(cfhsi);
-}
-
-static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
-{
-       int xfer_sz = 0;
-       int nfrms = 0;
-       u16 *plen = NULL;
-       u8 *pfrm = NULL;
-
-       if ((desc->header & ~CFHSI_PIGGY_DESC) ||
-                       (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
-               netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
-                       __func__);
-               return -EPROTO;
-       }
-
-       /* Check for embedded CAIF frame. */
-       if (desc->offset) {
-               struct sk_buff *skb;
-               int len = 0;
-               pfrm = ((u8 *)desc) + desc->offset;
-
-               /* Remove offset padding. */
-               pfrm += *pfrm + 1;
-
-               /* Read length of CAIF frame (little endian). */
-               len = *pfrm;
-               len |= ((*(pfrm+1)) << 8) & 0xFF00;
-               len += 2;       /* Add FCS fields. */
-
-               /* Sanity check length of CAIF frame. */
-               if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
-                       netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
-                               __func__);
-                       return -EPROTO;
-               }
-
-               /* Allocate SKB (OK even in IRQ context). */
-               skb = alloc_skb(len + 1, GFP_ATOMIC);
-               if (!skb) {
-                       netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
-                               __func__);
-                       return -ENOMEM;
-               }
-               caif_assert(skb != NULL);
-
-               skb_put_data(skb, pfrm, len);
-
-               skb->protocol = htons(ETH_P_CAIF);
-               skb_reset_mac_header(skb);
-               skb->dev = cfhsi->ndev;
-
-               netif_rx_any_context(skb);
-
-               /* Update network statistics. */
-               cfhsi->ndev->stats.rx_packets++;
-               cfhsi->ndev->stats.rx_bytes += len;
-       }
-
-       /* Calculate transfer length. */
-       plen = desc->cffrm_len;
-       while (nfrms < CFHSI_MAX_PKTS && *plen) {
-               xfer_sz += *plen;
-               plen++;
-               nfrms++;
-       }
-
-       /* Check for piggy-backed descriptor. */
-       if (desc->header & CFHSI_PIGGY_DESC)
-               xfer_sz += CFHSI_DESC_SZ;
-
-       if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
-               netdev_err(cfhsi->ndev,
-                               "%s: Invalid payload len: %d, ignored.\n",
-                       __func__, xfer_sz);
-               return -EPROTO;
-       }
-       return xfer_sz;
-}
-
-static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
-{
-       int xfer_sz = 0;
-       int nfrms = 0;
-       u16 *plen;
-
-       if ((desc->header & ~CFHSI_PIGGY_DESC) ||
-                       (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
-
-               pr_err("Invalid descriptor. %x %x\n", desc->header,
-                               desc->offset);
-               return -EPROTO;
-       }
-
-       /* Calculate transfer length. */
-       plen = desc->cffrm_len;
-       while (nfrms < CFHSI_MAX_PKTS && *plen) {
-               xfer_sz += *plen;
-               plen++;
-               nfrms++;
-       }
-
-       if (xfer_sz % 4) {
-               pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
-               return -EPROTO;
-       }
-       return xfer_sz;
-}
-
-static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
-{
-       int rx_sz = 0;
-       int nfrms = 0;
-       u16 *plen = NULL;
-       u8 *pfrm = NULL;
-
-       /* Sanity check header and offset. */
-       if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
-                       (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
-               netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
-                       __func__);
-               return -EPROTO;
-       }
-
-       /* Set frame pointer to start of payload. */
-       pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
-       plen = desc->cffrm_len;
-
-       /* Skip already processed frames. */
-       while (nfrms < cfhsi->rx_state.nfrms) {
-               pfrm += *plen;
-               rx_sz += *plen;
-               plen++;
-               nfrms++;
-       }
-
-       /* Parse payload. */
-       while (nfrms < CFHSI_MAX_PKTS && *plen) {
-               struct sk_buff *skb;
-               u8 *pcffrm = NULL;
-               int len;
-
-               /* CAIF frame starts after head padding. */
-               pcffrm = pfrm + *pfrm + 1;
-
-               /* Read length of CAIF frame (little endian). */
-               len = *pcffrm;
-               len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
-               len += 2;       /* Add FCS fields. */
-
-               /* Sanity check length of CAIF frames. */
-               if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
-                       netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
-                               __func__);
-                       return -EPROTO;
-               }
-
-               /* Allocate SKB (OK even in IRQ context). */
-               skb = alloc_skb(len + 1, GFP_ATOMIC);
-               if (!skb) {
-                       netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
-                               __func__);
-                       cfhsi->rx_state.nfrms = nfrms;
-                       return -ENOMEM;
-               }
-               caif_assert(skb != NULL);
-
-               skb_put_data(skb, pcffrm, len);
-
-               skb->protocol = htons(ETH_P_CAIF);
-               skb_reset_mac_header(skb);
-               skb->dev = cfhsi->ndev;
-
-               netif_rx_any_context(skb);
-
-               /* Update network statistics. */
-               cfhsi->ndev->stats.rx_packets++;
-               cfhsi->ndev->stats.rx_bytes += len;
-
-               pfrm += *plen;
-               rx_sz += *plen;
-               plen++;
-               nfrms++;
-       }
-
-       return rx_sz;
-}
-
-static void cfhsi_rx_done(struct cfhsi *cfhsi)
-{
-       int res;
-       int desc_pld_len = 0, rx_len, rx_state;
-       struct cfhsi_desc *desc = NULL;
-       u8 *rx_ptr, *rx_buf;
-       struct cfhsi_desc *piggy_desc = NULL;
-
-       desc = (struct cfhsi_desc *)cfhsi->rx_buf;
-
-       netdev_dbg(cfhsi->ndev, "%s\n", __func__);
-
-       if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
-               return;
-
-       /* Update inactivity timer if pending. */
-       spin_lock_bh(&cfhsi->lock);
-       mod_timer_pending(&cfhsi->inactivity_timer,
-                       jiffies + cfhsi->cfg.inactivity_timeout);
-       spin_unlock_bh(&cfhsi->lock);
-
-       if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
-               desc_pld_len = cfhsi_rx_desc_len(desc);
-
-               if (desc_pld_len < 0)
-                       goto out_of_sync;
-
-               rx_buf = cfhsi->rx_buf;
-               rx_len = desc_pld_len;
-               if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
-                       rx_len += CFHSI_DESC_SZ;
-               if (desc_pld_len == 0)
-                       rx_buf = cfhsi->rx_flip_buf;
-       } else {
-               rx_buf = cfhsi->rx_flip_buf;
-
-               rx_len = CFHSI_DESC_SZ;
-               if (cfhsi->rx_state.pld_len > 0 &&
-                               (desc->header & CFHSI_PIGGY_DESC)) {
-
-                       piggy_desc = (struct cfhsi_desc *)
-                               (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
-                                               cfhsi->rx_state.pld_len);
-
-                       cfhsi->rx_state.piggy_desc = true;
-
-                       /* Extract payload len from piggy-backed descriptor. */
-                       desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
-                       if (desc_pld_len < 0)
-                               goto out_of_sync;
-
-                       if (desc_pld_len > 0) {
-                               rx_len = desc_pld_len;
-                               if (piggy_desc->header & CFHSI_PIGGY_DESC)
-                                       rx_len += CFHSI_DESC_SZ;
-                       }
-
-                       /*
-                        * Copy needed information from the piggy-backed
-                        * descriptor to the descriptor in the start.
-                        */
-                       memcpy(rx_buf, (u8 *)piggy_desc,
-                                       CFHSI_DESC_SHORT_SZ);
-               }
-       }
-
-       if (desc_pld_len) {
-               rx_state = CFHSI_RX_STATE_PAYLOAD;
-               rx_ptr = rx_buf + CFHSI_DESC_SZ;
-       } else {
-               rx_state = CFHSI_RX_STATE_DESC;
-               rx_ptr = rx_buf;
-               rx_len = CFHSI_DESC_SZ;
-       }
-
-       /* Initiate next read */
-       if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
-               /* Set up new transfer. */
-               netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
-                               __func__);
-
-               res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
-                               cfhsi->ops);
-               if (WARN_ON(res < 0)) {
-                       netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
-                               __func__, res);
-                       cfhsi->ndev->stats.rx_errors++;
-                       cfhsi->ndev->stats.rx_dropped++;
-               }
-       }
-
-       if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
-               /* Extract payload from descriptor */
-               if (cfhsi_rx_desc(desc, cfhsi) < 0)
-                       goto out_of_sync;
-       } else {
-               /* Extract payload */
-               if (cfhsi_rx_pld(desc, cfhsi) < 0)
-                       goto out_of_sync;
-               if (piggy_desc) {
-                       /* Extract any payload in piggyback descriptor. */
-                       if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
-                               goto out_of_sync;
-                       /* Mark no embedded frame after extracting it */
-                       piggy_desc->offset = 0;
-               }
-       }
-
-       /* Update state info */
-       memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
-       cfhsi->rx_state.state = rx_state;
-       cfhsi->rx_ptr = rx_ptr;
-       cfhsi->rx_len = rx_len;
-       cfhsi->rx_state.pld_len = desc_pld_len;
-       cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
-
-       if (rx_buf != cfhsi->rx_buf)
-               swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
-       return;
-
-out_of_sync:
-       netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
-       print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
-                       cfhsi->rx_buf, CFHSI_DESC_SZ);
-       schedule_work(&cfhsi->out_of_sync_work);
-}
-
-static void cfhsi_rx_slowpath(struct timer_list *t)
-{
-       struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer);
-
-       netdev_dbg(cfhsi->ndev, "%s.\n",
-               __func__);
-
-       cfhsi_rx_done(cfhsi);
-}
-
-static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
-{
-       struct cfhsi *cfhsi;
-
-       cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
-       netdev_dbg(cfhsi->ndev, "%s.\n",
-               __func__);
-
-       if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
-               return;
-
-       if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
-               wake_up_interruptible(&cfhsi->flush_fifo_wait);
-       else
-               cfhsi_rx_done(cfhsi);
-}
-
-static void cfhsi_wake_up(struct work_struct *work)
-{
-       struct cfhsi *cfhsi = NULL;
-       int res;
-       int len;
-       long ret;
-
-       cfhsi = container_of(work, struct cfhsi, wake_up_work);
-
-       if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
-               return;
-
-       if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
-               /* It happenes when wakeup is requested by
-                * both ends at the same time. */
-               clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
-               clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
-               return;
-       }
-
-       /* Activate wake line. */
-       cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
-
-       netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
-               __func__);
-
-       /* Wait for acknowledge. */
-       ret = CFHSI_WAKE_TOUT;
-       ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
-                                       test_and_clear_bit(CFHSI_WAKE_UP_ACK,
-                                                       &cfhsi->bits), ret);
-       if (unlikely(ret < 0)) {
-               /* Interrupted by signal. */
-               netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
-                       __func__, ret);
-
-               clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
-               cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
-               return;
-       } else if (!ret) {
-               bool ca_wake = false;
-               size_t fifo_occupancy = 0;
-
-               /* Wakeup timeout */
-               netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
-                       __func__);
-
-               /* Check FIFO to check if modem has sent something. */
-               WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
-                                       &fifo_occupancy));
-
-               netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
-                               __func__, (unsigned) fifo_occupancy);
-
-               /* Check if we misssed the interrupt. */
-               WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
-                                                       &ca_wake));
-
-               if (ca_wake) {
-                       netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
-                               __func__);
-
-                       /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
-                       clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
-
-                       /* Continue execution. */
-                       goto wake_ack;
-               }
-
-               clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
-               cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
-               return;
-       }
-wake_ack:
-       netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
-               __func__);
-
-       /* Clear power up bit. */
-       set_bit(CFHSI_AWAKE, &cfhsi->bits);
-       clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
-
-       /* Resume read operation. */
-       netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
-       res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
-
-       if (WARN_ON(res < 0))
-               netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
-
-       /* Clear power up acknowledment. */
-       clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
-
-       spin_lock_bh(&cfhsi->lock);
-
-       /* Resume transmit if queues are not empty. */
-       if (!cfhsi_tx_queue_len(cfhsi)) {
-               netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
-                       __func__);
-               /* Start inactivity timer. */
-               mod_timer(&cfhsi->inactivity_timer,
-                               jiffies + cfhsi->cfg.inactivity_timeout);
-               spin_unlock_bh(&cfhsi->lock);
-               return;
-       }
-
-       netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
-               __func__);
-
-       spin_unlock_bh(&cfhsi->lock);
-
-       /* Create HSI frame. */
-       len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
-
-       if (likely(len > 0)) {
-               /* Set up new transfer. */
-               res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
-               if (WARN_ON(res < 0)) {
-                       netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
-                               __func__, res);
-                       cfhsi_abort_tx(cfhsi);
-               }
-       } else {
-               netdev_err(cfhsi->ndev,
-                               "%s: Failed to create HSI frame: %d.\n",
-                               __func__, len);
-       }
-}
-
-static void cfhsi_wake_down(struct work_struct *work)
-{
-       long ret;
-       struct cfhsi *cfhsi = NULL;
-       size_t fifo_occupancy = 0;
-       int retry = CFHSI_WAKE_TOUT;
-
-       cfhsi = container_of(work, struct cfhsi, wake_down_work);
-       netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
-
-       if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
-               return;
-
-       /* Deactivate wake line. */
-       cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
-
-       /* Wait for acknowledge. */
-       ret = CFHSI_WAKE_TOUT;
-       ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
-                                       test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
-                                                       &cfhsi->bits), ret);
-       if (ret < 0) {
-               /* Interrupted by signal. */
-               netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
-                       __func__, ret);
-               return;
-       } else if (!ret) {
-               bool ca_wake = true;
-
-               /* Timeout */
-               netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
-
-               /* Check if we misssed the interrupt. */
-               WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
-                                                       &ca_wake));
-               if (!ca_wake)
-                       netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
-                               __func__);
-       }
-
-       /* Check FIFO occupancy. */
-       while (retry) {
-               WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
-                                                       &fifo_occupancy));
-
-               if (!fifo_occupancy)
-                       break;
-
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(1);
-               retry--;
-       }
-
-       if (!retry)
-               netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
-
-       /* Clear AWAKE condition. */
-       clear_bit(CFHSI_AWAKE, &cfhsi->bits);
-
-       /* Cancel pending RX requests. */
-       cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
-}
-
-static void cfhsi_out_of_sync(struct work_struct *work)
-{
-       struct cfhsi *cfhsi = NULL;
-
-       cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
-
-       rtnl_lock();
-       dev_close(cfhsi->ndev);
-       rtnl_unlock();
-}
-
-static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
-{
-       struct cfhsi *cfhsi = NULL;
-
-       cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
-       netdev_dbg(cfhsi->ndev, "%s.\n",
-               __func__);
-
-       set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
-       wake_up_interruptible(&cfhsi->wake_up_wait);
-
-       if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
-               return;
-
-       /* Schedule wake up work queue if the peer initiates. */
-       if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
-               queue_work(cfhsi->wq, &cfhsi->wake_up_work);
-}
-
-static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
-{
-       struct cfhsi *cfhsi = NULL;
-
-       cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
-       netdev_dbg(cfhsi->ndev, "%s.\n",
-               __func__);
-
-       /* Initiating low power is only permitted by the host (us). */
-       set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
-       wake_up_interruptible(&cfhsi->wake_down_wait);
-}
-
-static void cfhsi_aggregation_tout(struct timer_list *t)
-{
-       struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer);
-
-       netdev_dbg(cfhsi->ndev, "%s.\n",
-               __func__);
-
-       cfhsi_start_tx(cfhsi);
-}
-
-static netdev_tx_t cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       struct cfhsi *cfhsi = NULL;
-       int start_xfer = 0;
-       int timer_active;
-       int prio;
-
-       if (!dev)
-               return -EINVAL;
-
-       cfhsi = netdev_priv(dev);
-
-       switch (skb->priority) {
-       case TC_PRIO_BESTEFFORT:
-       case TC_PRIO_FILLER:
-       case TC_PRIO_BULK:
-               prio = CFHSI_PRIO_BEBK;
-               break;
-       case TC_PRIO_INTERACTIVE_BULK:
-               prio = CFHSI_PRIO_VI;
-               break;
-       case TC_PRIO_INTERACTIVE:
-               prio = CFHSI_PRIO_VO;
-               break;
-       case TC_PRIO_CONTROL:
-       default:
-               prio = CFHSI_PRIO_CTL;
-               break;
-       }
-
-       spin_lock_bh(&cfhsi->lock);
-
-       /* Update aggregation statistics  */
-       cfhsi_update_aggregation_stats(cfhsi, skb, 1);
-
-       /* Queue the SKB */
-       skb_queue_tail(&cfhsi->qhead[prio], skb);
-
-       /* Sanity check; xmit should not be called after unregister_netdev */
-       if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
-               spin_unlock_bh(&cfhsi->lock);
-               cfhsi_abort_tx(cfhsi);
-               return -EINVAL;
-       }
-
-       /* Send flow off if number of packets is above high water mark. */
-       if (!cfhsi->flow_off_sent &&
-               cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
-               cfhsi->cfdev.flowctrl) {
-               cfhsi->flow_off_sent = 1;
-               cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
-       }
-
-       if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
-               cfhsi->tx_state = CFHSI_TX_STATE_XFER;
-               start_xfer = 1;
-       }
-
-       if (!start_xfer) {
-               /* Send aggregate if it is possible */
-               bool aggregate_ready =
-                       cfhsi_can_send_aggregate(cfhsi) &&
-                       del_timer(&cfhsi->aggregation_timer) > 0;
-               spin_unlock_bh(&cfhsi->lock);
-               if (aggregate_ready)
-                       cfhsi_start_tx(cfhsi);
-               return NETDEV_TX_OK;
-       }
-
-       /* Delete inactivity timer if started. */
-       timer_active = del_timer_sync(&cfhsi->inactivity_timer);
-
-       spin_unlock_bh(&cfhsi->lock);
-
-       if (timer_active) {
-               struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
-               int len;
-               int res;
-
-               /* Create HSI frame. */
-               len = cfhsi_tx_frm(desc, cfhsi);
-               WARN_ON(!len);
-
-               /* Set up new transfer. */
-               res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
-               if (WARN_ON(res < 0)) {
-                       netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
-                               __func__, res);
-                       cfhsi_abort_tx(cfhsi);
-               }
-       } else {
-               /* Schedule wake up work queue if the we initiate. */
-               if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
-                       queue_work(cfhsi->wq, &cfhsi->wake_up_work);
-       }
-
-       return NETDEV_TX_OK;
-}
-
-static const struct net_device_ops cfhsi_netdevops;
-
-static void cfhsi_setup(struct net_device *dev)
-{
-       int i;
-       struct cfhsi *cfhsi = netdev_priv(dev);
-       dev->features = 0;
-       dev->type = ARPHRD_CAIF;
-       dev->flags = IFF_POINTOPOINT | IFF_NOARP;
-       dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
-       dev->priv_flags |= IFF_NO_QUEUE;
-       dev->needs_free_netdev = true;
-       dev->netdev_ops = &cfhsi_netdevops;
-       for (i = 0; i < CFHSI_PRIO_LAST; ++i)
-               skb_queue_head_init(&cfhsi->qhead[i]);
-       cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
-       cfhsi->cfdev.use_frag = false;
-       cfhsi->cfdev.use_stx = false;
-       cfhsi->cfdev.use_fcs = false;
-       cfhsi->ndev = dev;
-       cfhsi->cfg = hsi_default_config;
-}
-
-static int cfhsi_open(struct net_device *ndev)
-{
-       struct cfhsi *cfhsi = netdev_priv(ndev);
-       int res;
-
-       clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
-
-       /* Initialize state vaiables. */
-       cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
-       cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
-
-       /* Set flow info */
-       cfhsi->flow_off_sent = 0;
-
-       /*
-        * Allocate a TX buffer with the size of a HSI packet descriptors
-        * and the necessary room for CAIF payload frames.
-        */
-       cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
-       if (!cfhsi->tx_buf) {
-               res = -ENODEV;
-               goto err_alloc_tx;
-       }
-
-       /*
-        * Allocate a RX buffer with the size of two HSI packet descriptors and
-        * the necessary room for CAIF payload frames.
-        */
-       cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
-       if (!cfhsi->rx_buf) {
-               res = -ENODEV;
-               goto err_alloc_rx;
-       }
-
-       cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
-       if (!cfhsi->rx_flip_buf) {
-               res = -ENODEV;
-               goto err_alloc_rx_flip;
-       }
-
-       /* Initialize aggregation timeout */
-       cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout;
-
-       /* Initialize recieve vaiables. */
-       cfhsi->rx_ptr = cfhsi->rx_buf;
-       cfhsi->rx_len = CFHSI_DESC_SZ;
-
-       /* Initialize spin locks. */
-       spin_lock_init(&cfhsi->lock);
-
-       /* Set up the driver. */
-       cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
-       cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
-       cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
-       cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
-
-       /* Initialize the work queues. */
-       INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
-       INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
-       INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
-
-       /* Clear all bit fields. */
-       clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
-       clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
-       clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
-       clear_bit(CFHSI_AWAKE, &cfhsi->bits);
-
-       /* Create work thread. */
-       cfhsi->wq = alloc_ordered_workqueue(cfhsi->ndev->name, WQ_MEM_RECLAIM);
-       if (!cfhsi->wq) {
-               netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
-                       __func__);
-               res = -ENODEV;
-               goto err_create_wq;
-       }
-
-       /* Initialize wait queues. */
-       init_waitqueue_head(&cfhsi->wake_up_wait);
-       init_waitqueue_head(&cfhsi->wake_down_wait);
-       init_waitqueue_head(&cfhsi->flush_fifo_wait);
-
-       /* Setup the inactivity timer. */
-       timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0);
-       /* Setup the slowpath RX timer. */
-       timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0);
-       /* Setup the aggregation timer. */
-       timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0);
-
-       /* Activate HSI interface. */
-       res = cfhsi->ops->cfhsi_up(cfhsi->ops);
-       if (res) {
-               netdev_err(cfhsi->ndev,
-                       "%s: can't activate HSI interface: %d.\n",
-                       __func__, res);
-               goto err_activate;
-       }
-
-       /* Flush FIFO */
-       res = cfhsi_flush_fifo(cfhsi);
-       if (res) {
-               netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
-                       __func__, res);
-               goto err_net_reg;
-       }
-       return res;
-
- err_net_reg:
-       cfhsi->ops->cfhsi_down(cfhsi->ops);
- err_activate:
-       destroy_workqueue(cfhsi->wq);
- err_create_wq:
-       kfree(cfhsi->rx_flip_buf);
- err_alloc_rx_flip:
-       kfree(cfhsi->rx_buf);
- err_alloc_rx:
-       kfree(cfhsi->tx_buf);
- err_alloc_tx:
-       return res;
-}
-
-static int cfhsi_close(struct net_device *ndev)
-{
-       struct cfhsi *cfhsi = netdev_priv(ndev);
-       u8 *tx_buf, *rx_buf, *flip_buf;
-
-       /* going to shutdown driver */
-       set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
-
-       /* Delete timers if pending */
-       del_timer_sync(&cfhsi->inactivity_timer);
-       del_timer_sync(&cfhsi->rx_slowpath_timer);
-       del_timer_sync(&cfhsi->aggregation_timer);
-
-       /* Cancel pending RX request (if any) */
-       cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
-
-       /* Destroy workqueue */
-       destroy_workqueue(cfhsi->wq);
-
-       /* Store bufferes: will be freed later. */
-       tx_buf = cfhsi->tx_buf;
-       rx_buf = cfhsi->rx_buf;
-       flip_buf = cfhsi->rx_flip_buf;
-       /* Flush transmit queues. */
-       cfhsi_abort_tx(cfhsi);
-
-       /* Deactivate interface */
-       cfhsi->ops->cfhsi_down(cfhsi->ops);
-
-       /* Free buffers. */
-       kfree(tx_buf);
-       kfree(rx_buf);
-       kfree(flip_buf);
-       return 0;
-}
-
-static void cfhsi_uninit(struct net_device *dev)
-{
-       struct cfhsi *cfhsi = netdev_priv(dev);
-       ASSERT_RTNL();
-       symbol_put(cfhsi_get_device);
-       list_del(&cfhsi->list);
-}
-
-static const struct net_device_ops cfhsi_netdevops = {
-       .ndo_uninit = cfhsi_uninit,
-       .ndo_open = cfhsi_open,
-       .ndo_stop = cfhsi_close,
-       .ndo_start_xmit = cfhsi_xmit
-};
-
-static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
-{
-       int i;
-
-       if (!data) {
-               pr_debug("no params data found\n");
-               return;
-       }
-
-       i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
-       /*
-        * Inactivity timeout in millisecs. Lowest possible value is 1,
-        * and highest possible is NEXT_TIMER_MAX_DELTA.
-        */
-       if (data[i]) {
-               u32 inactivity_timeout = nla_get_u32(data[i]);
-               /* Pre-calculate inactivity timeout. */
-               cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000;
-               if (cfhsi->cfg.inactivity_timeout == 0)
-                       cfhsi->cfg.inactivity_timeout = 1;
-               else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA)
-                       cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA;
-       }
-
-       i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
-       if (data[i])
-               cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]);
-
-       i = __IFLA_CAIF_HSI_HEAD_ALIGN;
-       if (data[i])
-               cfhsi->cfg.head_align = nla_get_u32(data[i]);
-
-       i = __IFLA_CAIF_HSI_TAIL_ALIGN;
-       if (data[i])
-               cfhsi->cfg.tail_align = nla_get_u32(data[i]);
-
-       i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
-       if (data[i])
-               cfhsi->cfg.q_high_mark = nla_get_u32(data[i]);
-
-       i = __IFLA_CAIF_HSI_QLOW_WATERMARK;
-       if (data[i])
-               cfhsi->cfg.q_low_mark = nla_get_u32(data[i]);
-}
-
-static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
-                              struct nlattr *data[],
-                              struct netlink_ext_ack *extack)
-{
-       cfhsi_netlink_parms(data, netdev_priv(dev));
-       netdev_state_change(dev);
-       return 0;
-}
-
-static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
-       [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
-       [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
-       [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
-       [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
-       [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
-       [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
-};
-
-static size_t caif_hsi_get_size(const struct net_device *dev)
-{
-       int i;
-       size_t s = 0;
-       for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
-               s += nla_total_size(caif_hsi_policy[i].len);
-       return s;
-}
-
-static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
-{
-       struct cfhsi *cfhsi = netdev_priv(dev);
-
-       if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
-                       cfhsi->cfg.inactivity_timeout) ||
-           nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
-                       cfhsi->cfg.aggregation_timeout) ||
-           nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
-                       cfhsi->cfg.head_align) ||
-           nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
-                       cfhsi->cfg.tail_align) ||
-           nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
-                       cfhsi->cfg.q_high_mark) ||
-           nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
-                       cfhsi->cfg.q_low_mark))
-               return -EMSGSIZE;
-
-       return 0;
-}
-
-static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
-                           struct nlattr *tb[], struct nlattr *data[],
-                           struct netlink_ext_ack *extack)
-{
-       struct cfhsi *cfhsi = NULL;
-       struct cfhsi_ops *(*get_ops)(void);
-
-       ASSERT_RTNL();
-
-       cfhsi = netdev_priv(dev);
-       cfhsi_netlink_parms(data, cfhsi);
-
-       get_ops = symbol_get(cfhsi_get_ops);
-       if (!get_ops) {
-               pr_err("%s: failed to get the cfhsi_ops\n", __func__);
-               return -ENODEV;
-       }
-
-       /* Assign the HSI device. */
-       cfhsi->ops = (*get_ops)();
-       if (!cfhsi->ops) {
-               pr_err("%s: failed to get the cfhsi_ops\n", __func__);
-               goto err;
-       }
-
-       /* Assign the driver to this HSI device. */
-       cfhsi->ops->cb_ops = &cfhsi->cb_ops;
-       if (register_netdevice(dev)) {
-               pr_warn("%s: caif_hsi device registration failed\n", __func__);
-               goto err;
-       }
-       /* Add CAIF HSI device to list. */
-       list_add_tail(&cfhsi->list, &cfhsi_list);
-
-       return 0;
-err:
-       symbol_put(cfhsi_get_ops);
-       return -ENODEV;
-}
-
-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
-       .kind           = "cfhsi",
-       .priv_size      = sizeof(struct cfhsi),
-       .setup          = cfhsi_setup,
-       .maxtype        = __IFLA_CAIF_HSI_MAX,
-       .policy = caif_hsi_policy,
-       .newlink        = caif_hsi_newlink,
-       .changelink     = caif_hsi_changelink,
-       .get_size       = caif_hsi_get_size,
-       .fill_info      = caif_hsi_fill_info,
-};
-
-static void __exit cfhsi_exit_module(void)
-{
-       struct list_head *list_node;
-       struct list_head *n;
-       struct cfhsi *cfhsi;
-
-       rtnl_link_unregister(&caif_hsi_link_ops);
-
-       rtnl_lock();
-       list_for_each_safe(list_node, n, &cfhsi_list) {
-               cfhsi = list_entry(list_node, struct cfhsi, list);
-               unregister_netdevice(cfhsi->ndev);
-       }
-       rtnl_unlock();
-}
-
-static int __init cfhsi_init_module(void)
-{
-       return rtnl_link_register(&caif_hsi_link_ops);
-}
-
-module_init(cfhsi_init_module);
-module_exit(cfhsi_exit_module);
index dd17b8c..89d9c98 100644 (file)
@@ -218,7 +218,7 @@ static int hi3110_spi_trans(struct spi_device *spi, int len)
        return ret;
 }
 
-static u8 hi3110_cmd(struct spi_device *spi, u8 command)
+static int hi3110_cmd(struct spi_device *spi, u8 command)
 {
        struct hi3110_priv *priv = spi_get_drvdata(spi);
 
index 47c3f40..9ae4807 100644 (file)
@@ -2300,6 +2300,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
                   err, priv->regs_status.intf);
        mcp251xfd_dump(priv);
        mcp251xfd_chip_interrupts_disable(priv);
+       mcp251xfd_timestamp_stop(priv);
 
        return handled;
 }
index 0a37af4..2b5302e 100644 (file)
@@ -255,6 +255,8 @@ struct ems_usb {
        unsigned int free_slots; /* remember number of available slots */
 
        struct ems_cpc_msg active_params; /* active controller parameters */
+       void *rxbuf[MAX_RX_URBS];
+       dma_addr_t rxbuf_dma[MAX_RX_URBS];
 };
 
 static void ems_usb_read_interrupt_callback(struct urb *urb)
@@ -587,6 +589,7 @@ static int ems_usb_start(struct ems_usb *dev)
        for (i = 0; i < MAX_RX_URBS; i++) {
                struct urb *urb = NULL;
                u8 *buf = NULL;
+               dma_addr_t buf_dma;
 
                /* create a URB, and a buffer for it */
                urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -596,7 +599,7 @@ static int ems_usb_start(struct ems_usb *dev)
                }
 
                buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
-                                        &urb->transfer_dma);
+                                        &buf_dma);
                if (!buf) {
                        netdev_err(netdev, "No memory left for USB buffer\n");
                        usb_free_urb(urb);
@@ -604,6 +607,8 @@ static int ems_usb_start(struct ems_usb *dev)
                        break;
                }
 
+               urb->transfer_dma = buf_dma;
+
                usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2),
                                  buf, RX_BUFFER_SIZE,
                                  ems_usb_read_bulk_callback, dev);
@@ -619,6 +624,9 @@ static int ems_usb_start(struct ems_usb *dev)
                        break;
                }
 
+               dev->rxbuf[i] = buf;
+               dev->rxbuf_dma[i] = buf_dma;
+
                /* Drop reference, USB core will take care of freeing it */
                usb_free_urb(urb);
        }
@@ -684,6 +692,10 @@ static void unlink_all_urbs(struct ems_usb *dev)
 
        usb_kill_anchored_urbs(&dev->rx_submitted);
 
+       for (i = 0; i < MAX_RX_URBS; ++i)
+               usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
+                                 dev->rxbuf[i], dev->rxbuf_dma[i]);
+
        usb_kill_anchored_urbs(&dev->tx_submitted);
        atomic_set(&dev->active_tx_urbs, 0);
 
index 65b58f8..66fa8b0 100644 (file)
@@ -195,6 +195,8 @@ struct esd_usb2 {
        int net_count;
        u32 version;
        int rxinitdone;
+       void *rxbuf[MAX_RX_URBS];
+       dma_addr_t rxbuf_dma[MAX_RX_URBS];
 };
 
 struct esd_usb2_net_priv {
@@ -545,6 +547,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
        for (i = 0; i < MAX_RX_URBS; i++) {
                struct urb *urb = NULL;
                u8 *buf = NULL;
+               dma_addr_t buf_dma;
 
                /* create a URB, and a buffer for it */
                urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -554,7 +557,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
                }
 
                buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
-                                        &urb->transfer_dma);
+                                        &buf_dma);
                if (!buf) {
                        dev_warn(dev->udev->dev.parent,
                                 "No memory left for USB buffer\n");
@@ -562,6 +565,8 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
                        goto freeurb;
                }
 
+               urb->transfer_dma = buf_dma;
+
                usb_fill_bulk_urb(urb, dev->udev,
                                  usb_rcvbulkpipe(dev->udev, 1),
                                  buf, RX_BUFFER_SIZE,
@@ -574,8 +579,12 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
                        usb_unanchor_urb(urb);
                        usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
                                          urb->transfer_dma);
+                       goto freeurb;
                }
 
+               dev->rxbuf[i] = buf;
+               dev->rxbuf_dma[i] = buf_dma;
+
 freeurb:
                /* Drop reference, USB core will take care of freeing it */
                usb_free_urb(urb);
@@ -663,6 +672,11 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
        int i, j;
 
        usb_kill_anchored_urbs(&dev->rx_submitted);
+
+       for (i = 0; i < MAX_RX_URBS; ++i)
+               usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
+                                 dev->rxbuf[i], dev->rxbuf_dma[i]);
+
        for (i = 0; i < dev->net_count; i++) {
                priv = dev->nets[i];
                if (priv) {
index a45865b..a1a154c 100644 (file)
@@ -653,6 +653,8 @@ static int mcba_usb_start(struct mcba_priv *priv)
                        break;
                }
 
+               urb->transfer_dma = buf_dma;
+
                usb_fill_bulk_urb(urb, priv->udev,
                                  usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN),
                                  buf, MCBA_USB_RX_BUFF_SIZE,
index 1d6f772..899a3d2 100644 (file)
 #define PCAN_USB_BERR_MASK     (PCAN_USB_ERR_RXERR | PCAN_USB_ERR_TXERR)
 
 /* identify bus event packets with rx/tx error counters */
-#define PCAN_USB_ERR_CNT               0x80
+#define PCAN_USB_ERR_CNT_DEC           0x00    /* counters are decreasing */
+#define PCAN_USB_ERR_CNT_INC           0x80    /* counters are increasing */
 
 /* private to PCAN-USB adapter */
 struct pcan_usb {
@@ -608,11 +609,12 @@ static int pcan_usb_handle_bus_evt(struct pcan_usb_msg_context *mc, u8 ir)
 
        /* acccording to the content of the packet */
        switch (ir) {
-       case PCAN_USB_ERR_CNT:
+       case PCAN_USB_ERR_CNT_DEC:
+       case PCAN_USB_ERR_CNT_INC:
 
                /* save rx/tx error counters from in the device context */
-               pdev->bec.rxerr = mc->ptr[0];
-               pdev->bec.txerr = mc->ptr[1];
+               pdev->bec.rxerr = mc->ptr[1];
+               pdev->bec.txerr = mc->ptr[2];
                break;
 
        default:
index b6e7ef0..d1b83bd 100644 (file)
@@ -137,7 +137,8 @@ struct usb_8dev_priv {
        u8 *cmd_msg_buffer;
 
        struct mutex usb_8dev_cmd_lock;
-
+       void *rxbuf[MAX_RX_URBS];
+       dma_addr_t rxbuf_dma[MAX_RX_URBS];
 };
 
 /* tx frame */
@@ -733,6 +734,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
        for (i = 0; i < MAX_RX_URBS; i++) {
                struct urb *urb = NULL;
                u8 *buf;
+               dma_addr_t buf_dma;
 
                /* create a URB, and a buffer for it */
                urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -742,7 +744,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
                }
 
                buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL,
-                                        &urb->transfer_dma);
+                                        &buf_dma);
                if (!buf) {
                        netdev_err(netdev, "No memory left for USB buffer\n");
                        usb_free_urb(urb);
@@ -750,6 +752,8 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
                        break;
                }
 
+               urb->transfer_dma = buf_dma;
+
                usb_fill_bulk_urb(urb, priv->udev,
                                  usb_rcvbulkpipe(priv->udev,
                                                  USB_8DEV_ENDP_DATA_RX),
@@ -767,6 +771,9 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
                        break;
                }
 
+               priv->rxbuf[i] = buf;
+               priv->rxbuf_dma[i] = buf_dma;
+
                /* Drop reference, USB core will take care of freeing it */
                usb_free_urb(urb);
        }
@@ -836,6 +843,10 @@ static void unlink_all_urbs(struct usb_8dev_priv *priv)
 
        usb_kill_anchored_urbs(&priv->rx_submitted);
 
+       for (i = 0; i < MAX_RX_URBS; ++i)
+               usb_free_coherent(priv->udev, RX_BUFFER_SIZE,
+                                 priv->rxbuf[i], priv->rxbuf_dma[i]);
+
        usb_kill_anchored_urbs(&priv->tx_submitted);
        atomic_set(&priv->active_tx_urbs, 0);
 
index a7e5ac6..1542bfb 100644 (file)
@@ -419,8 +419,10 @@ int ksz_switch_register(struct ksz_device *dev,
                                if (of_property_read_u32(port, "reg",
                                                         &port_num))
                                        continue;
-                               if (!(dev->port_mask & BIT(port_num)))
+                               if (!(dev->port_mask & BIT(port_num))) {
+                                       of_node_put(port);
                                        return -EINVAL;
+                               }
                                of_get_phy_mode(port,
                                                &dev->ports[port_num].interface);
                        }
index 93136f7..69f21b7 100644 (file)
@@ -366,6 +366,8 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
        int i;
 
        reg[1] |= vid & CVID_MASK;
+       if (vid > 1)
+               reg[1] |= ATA2_IVL;
        reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER;
        reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP;
        /* STATIC_ENT indicate that entry is static wouldn't
index 334d610..b19b389 100644 (file)
@@ -79,6 +79,7 @@ enum mt753x_bpdu_port_fw {
 #define  STATIC_EMP                    0
 #define  STATIC_ENT                    3
 #define MT7530_ATA2                    0x78
+#define  ATA2_IVL                      BIT(15)
 
 /* Register for address table write data */
 #define MT7530_ATWD                    0x7c
index 05af632..634a48e 100644 (file)
@@ -12,7 +12,7 @@ config NET_DSA_MV88E6XXX
 config NET_DSA_MV88E6XXX_PTP
        bool "PTP support for Marvell 88E6xxx"
        default n
-       depends on PTP_1588_CLOCK
+       depends on NET_DSA_MV88E6XXX && PTP_1588_CLOCK
        help
          Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
          chips that support it.
index 961fa6b..272b053 100644 (file)
@@ -2155,7 +2155,7 @@ static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
        int i, err;
 
        if (!vid)
-               return -EOPNOTSUPP;
+               return 0;
 
        err = mv88e6xxx_vtu_get(chip, vid, &vlan);
        if (err)
@@ -3583,6 +3583,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
        .port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
        .port_max_speed_mode = mv88e6341_port_max_speed_mode,
        .port_tag_remap = mv88e6095_port_tag_remap,
+       .port_set_policy = mv88e6352_port_set_policy,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
        .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -3596,7 +3597,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
        .port_set_cmode = mv88e6341_port_set_cmode,
        .port_setup_message_port = mv88e6xxx_setup_message_port,
        .stats_snapshot = mv88e6390_g1_stats_snapshot,
-       .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+       .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
        .stats_get_strings = mv88e6320_stats_get_strings,
        .stats_get_stats = mv88e6390_stats_get_stats,
@@ -3606,6 +3607,9 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
        .mgmt_rsvd2cpu =  mv88e6390_g1_mgmt_rsvd2cpu,
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
+       .rmu_disable = mv88e6390_g1_rmu_disable,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
        .serdes_power = mv88e6390_serdes_power,
@@ -3619,6 +3623,11 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
        .serdes_irq_enable = mv88e6390_serdes_irq_enable,
        .serdes_irq_status = mv88e6390_serdes_irq_status,
        .gpio_ops = &mv88e6352_gpio_ops,
+       .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+       .serdes_get_strings = mv88e6390_serdes_get_strings,
+       .serdes_get_stats = mv88e6390_serdes_get_stats,
+       .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+       .serdes_get_regs = mv88e6390_serdes_get_regs,
        .phylink_validate = mv88e6341_phylink_validate,
 };
 
@@ -4383,6 +4392,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
        .port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
        .port_max_speed_mode = mv88e6341_port_max_speed_mode,
        .port_tag_remap = mv88e6095_port_tag_remap,
+       .port_set_policy = mv88e6352_port_set_policy,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
        .port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -4396,7 +4406,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
        .port_set_cmode = mv88e6341_port_set_cmode,
        .port_setup_message_port = mv88e6xxx_setup_message_port,
        .stats_snapshot = mv88e6390_g1_stats_snapshot,
-       .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+       .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
        .stats_get_strings = mv88e6320_stats_get_strings,
        .stats_get_stats = mv88e6390_stats_get_stats,
@@ -4406,6 +4416,9 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
        .mgmt_rsvd2cpu =  mv88e6390_g1_mgmt_rsvd2cpu,
        .pot_clear = mv88e6xxx_g2_pot_clear,
        .reset = mv88e6352_g1_reset,
+       .rmu_disable = mv88e6390_g1_rmu_disable,
+       .atu_get_hash = mv88e6165_g1_atu_get_hash,
+       .atu_set_hash = mv88e6165_g1_atu_set_hash,
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
        .serdes_power = mv88e6390_serdes_power,
@@ -4421,6 +4434,11 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
        .gpio_ops = &mv88e6352_gpio_ops,
        .avb_ops = &mv88e6390_avb_ops,
        .ptp_ops = &mv88e6352_ptp_ops,
+       .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+       .serdes_get_strings = mv88e6390_serdes_get_strings,
+       .serdes_get_stats = mv88e6390_serdes_get_stats,
+       .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+       .serdes_get_regs = mv88e6390_serdes_get_regs,
        .phylink_validate = mv88e6341_phylink_validate,
 };
 
index e4fbef8..b1d46dd 100644 (file)
@@ -722,7 +722,7 @@ static struct mv88e6390_serdes_hw_stat mv88e6390_serdes_hw_stats[] = {
 
 int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
 {
-       if (mv88e6390_serdes_get_lane(chip, port) < 0)
+       if (mv88e6xxx_serdes_get_lane(chip, port) < 0)
                return 0;
 
        return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
@@ -734,7 +734,7 @@ int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
        struct mv88e6390_serdes_hw_stat *stat;
        int i;
 
-       if (mv88e6390_serdes_get_lane(chip, port) < 0)
+       if (mv88e6xxx_serdes_get_lane(chip, port) < 0)
                return 0;
 
        for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
@@ -770,7 +770,7 @@ int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
        int lane;
        int i;
 
-       lane = mv88e6390_serdes_get_lane(chip, port);
+       lane = mv88e6xxx_serdes_get_lane(chip, port);
        if (lane < 0)
                return 0;
 
index 4f05456..e2dc997 100644 (file)
@@ -122,14 +122,12 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
 
        for (i = 0; i < ds->num_ports; i++) {
                mac[i] = default_mac;
-               if (i == dsa_upstream_port(priv->ds, i)) {
-                       /* STP doesn't get called for CPU port, so we need to
-                        * set the I/O parameters statically.
-                        */
-                       mac[i].dyn_learn = true;
-                       mac[i].ingress = true;
-                       mac[i].egress = true;
-               }
+
+               /* Let sja1105_bridge_stp_state_set() keep address learning
+                * enabled for the CPU port.
+                */
+               if (dsa_is_cpu_port(ds, i))
+                       priv->learn_ena |= BIT(i);
        }
 
        return 0;
@@ -399,6 +397,12 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
                if (dsa_is_cpu_port(ds, port))
                        v->pvid = true;
                list_add(&v->list, &priv->dsa_8021q_vlans);
+
+               v = kmemdup(v, sizeof(*v), GFP_KERNEL);
+               if (!v)
+                       return -ENOMEM;
+
+               list_add(&v->list, &priv->bridge_vlans);
        }
 
        ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
index 7dff203..f19370c 100644 (file)
@@ -594,6 +594,11 @@ int atl1c_phy_init(struct atl1c_hw *hw)
        int ret_val;
        u16 mii_bmcr_data = BMCR_RESET;
 
+       if (hw->nic_type == athr_mt) {
+               hw->phy_configured = true;
+               return 0;
+       }
+
        if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) ||
                (atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) {
                dev_err(&pdev->dev, "Error get phy ID\n");
index f56245e..8960658 100644 (file)
@@ -1671,11 +1671,16 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
 
        if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
            (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
-               u16 vlan_proto = tpa_info->metadata >>
-                       RX_CMP_FLAGS2_METADATA_TPID_SFT;
+               __be16 vlan_proto = htons(tpa_info->metadata >>
+                                         RX_CMP_FLAGS2_METADATA_TPID_SFT);
                u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
 
-               __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
+               if (eth_type_vlan(vlan_proto)) {
+                       __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+               } else {
+                       dev_kfree_skb(skb);
+                       return NULL;
+               }
        }
 
        skb_checksum_none_assert(skb);
@@ -1897,9 +1902,15 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
            (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
                u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
                u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
-               u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
+               __be16 vlan_proto = htons(meta_data >>
+                                         RX_CMP_FLAGS2_METADATA_TPID_SFT);
 
-               __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
+               if (eth_type_vlan(vlan_proto)) {
+                       __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+               } else {
+                       dev_kfree_skb(skb);
+                       goto next_rx;
+               }
        }
 
        skb_checksum_none_assert(skb);
@@ -7563,8 +7574,12 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
                bp->flags &= ~BNXT_FLAG_WOL_CAP;
                if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
                        bp->flags |= BNXT_FLAG_WOL_CAP;
-               if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED)
+               if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
                        __bnxt_hwrm_ptp_qcfg(bp);
+               } else {
+                       kfree(bp->ptp_cfg);
+                       bp->ptp_cfg = NULL;
+               }
        } else {
 #ifdef CONFIG_BNXT_SRIOV
                struct bnxt_vf_info *vf = &bp->vf;
@@ -10123,7 +10138,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
                }
        }
 
-       bnxt_ptp_start(bp);
        rc = bnxt_init_nic(bp, irq_re_init);
        if (rc) {
                netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
@@ -10197,6 +10211,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
 {
        int rc = 0;
 
+       if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
+               netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
+               rc = -ENODEV;
+               goto half_open_err;
+       }
+
        rc = bnxt_alloc_mem(bp, false);
        if (rc) {
                netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
@@ -10256,9 +10276,16 @@ static int bnxt_open(struct net_device *dev)
        rc = bnxt_hwrm_if_change(bp, true);
        if (rc)
                return rc;
+
+       if (bnxt_ptp_init(bp)) {
+               netdev_warn(dev, "PTP initialization failed.\n");
+               kfree(bp->ptp_cfg);
+               bp->ptp_cfg = NULL;
+       }
        rc = __bnxt_open_nic(bp, true, true);
        if (rc) {
                bnxt_hwrm_if_change(bp, false);
+               bnxt_ptp_clear(bp);
        } else {
                if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
                        if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
@@ -10349,6 +10376,7 @@ static int bnxt_close(struct net_device *dev)
 {
        struct bnxt *bp = netdev_priv(dev);
 
+       bnxt_ptp_clear(bp);
        bnxt_hwmon_close(bp);
        bnxt_close_nic(bp, true, true);
        bnxt_hwrm_shutdown_link(bp);
@@ -11335,6 +11363,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
                bnxt_clear_int_mode(bp);
                pci_disable_device(bp->pdev);
        }
+       bnxt_ptp_clear(bp);
        __bnxt_close_nic(bp, true, false);
        bnxt_vf_reps_free(bp);
        bnxt_clear_int_mode(bp);
@@ -11959,10 +11988,21 @@ static bool bnxt_fw_reset_timeout(struct bnxt *bp)
                          (bp->fw_reset_max_dsecs * HZ / 10));
 }
 
+static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
+{
+       clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+       if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
+               bnxt_ulp_start(bp, rc);
+               bnxt_dl_health_status_update(bp, false);
+       }
+       bp->fw_reset_state = 0;
+       dev_close(bp->dev);
+}
+
 static void bnxt_fw_reset_task(struct work_struct *work)
 {
        struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
-       int rc;
+       int rc = 0;
 
        if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
                netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
@@ -11992,6 +12032,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                }
                bp->fw_reset_timestamp = jiffies;
                rtnl_lock();
+               if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
+                       bnxt_fw_reset_abort(bp, rc);
+                       rtnl_unlock();
+                       return;
+               }
                bnxt_fw_reset_close(bp);
                if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
                        bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
@@ -12039,6 +12084,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                        if (val == 0xffff) {
                                if (bnxt_fw_reset_timeout(bp)) {
                                        netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
+                                       rc = -ETIMEDOUT;
                                        goto fw_reset_abort;
                                }
                                bnxt_queue_fw_reset_work(bp, HZ / 1000);
@@ -12048,6 +12094,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
                if (pci_enable_device(bp->pdev)) {
                        netdev_err(bp->dev, "Cannot re-enable PCI device\n");
+                       rc = -ENODEV;
                        goto fw_reset_abort;
                }
                pci_set_master(bp->pdev);
@@ -12074,18 +12121,18 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                }
                rc = bnxt_open(bp->dev);
                if (rc) {
-                       netdev_err(bp->dev, "bnxt_open_nic() failed\n");
-                       clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
-                       dev_close(bp->dev);
+                       netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
+                       bnxt_fw_reset_abort(bp, rc);
+                       rtnl_unlock();
+                       return;
                }
 
                bp->fw_reset_state = 0;
                /* Make sure fw_reset_state is 0 before clearing the flag */
                smp_mb__before_atomic();
                clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
-               bnxt_ulp_start(bp, rc);
-               if (!rc)
-                       bnxt_reenable_sriov(bp);
+               bnxt_ulp_start(bp, 0);
+               bnxt_reenable_sriov(bp);
                bnxt_vf_reps_alloc(bp);
                bnxt_vf_reps_open(bp);
                bnxt_dl_health_recovery_done(bp);
@@ -12103,12 +12150,8 @@ fw_reset_abort_status:
                netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
        }
 fw_reset_abort:
-       clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
-       if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
-               bnxt_dl_health_status_update(bp, false);
-       bp->fw_reset_state = 0;
        rtnl_lock();
-       dev_close(bp->dev);
+       bnxt_fw_reset_abort(bp, rc);
        rtnl_unlock();
 }
 
@@ -12662,7 +12705,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
        if (BNXT_PF(bp))
                devlink_port_type_clear(&bp->dl_port);
 
-       bnxt_ptp_clear(bp);
        pci_disable_pcie_error_reporting(pdev);
        unregister_netdev(dev);
        clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
@@ -13246,11 +13288,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                                   rc);
        }
 
-       if (bnxt_ptp_init(bp)) {
-               netdev_warn(dev, "PTP initialization failed.\n");
-               kfree(bp->ptp_cfg);
-               bp->ptp_cfg = NULL;
-       }
        bnxt_inv_fw_health_reg(bp);
        bnxt_dl_register(bp);
 
@@ -13436,7 +13473,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
        if (netif_running(netdev))
                bnxt_close(netdev);
 
-       pci_disable_device(pdev);
+       if (pci_is_enabled(pdev))
+               pci_disable_device(pdev);
        bnxt_free_ctx_mem(bp);
        kfree(bp->ctx);
        bp->ctx = NULL;
index 8e90224..8a68df4 100644 (file)
@@ -433,6 +433,7 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
 static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
 {
        int total_ets_bw = 0;
+       bool zero = false;
        u8 max_tc = 0;
        int i;
 
@@ -453,13 +454,20 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
                        break;
                case IEEE_8021QAZ_TSA_ETS:
                        total_ets_bw += ets->tc_tx_bw[i];
+                       zero = zero || !ets->tc_tx_bw[i];
                        break;
                default:
                        return -ENOTSUPP;
                }
        }
-       if (total_ets_bw > 100)
+       if (total_ets_bw > 100) {
+               netdev_warn(bp->dev, "rejecting ETS config exceeding available bandwidth\n");
                return -EINVAL;
+       }
+       if (zero && total_ets_bw == 100) {
+               netdev_warn(bp->dev, "rejecting ETS config starving a TC\n");
+               return -EINVAL;
+       }
 
        if (max_tc >= bp->max_tc)
                *tc = bp->max_tc;
index f698b6b..ec381c2 100644 (file)
@@ -353,6 +353,12 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
 
        bnxt_ptp_get_current_time(bp);
        ptp->next_period = now + HZ;
+       if (time_after_eq(now, ptp->next_overflow_check)) {
+               spin_lock_bh(&ptp->ptp_lock);
+               timecounter_read(&ptp->tc);
+               spin_unlock_bh(&ptp->ptp_lock);
+               ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
+       }
        return HZ;
 }
 
@@ -385,22 +391,6 @@ int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
        return 0;
 }
 
-void bnxt_ptp_start(struct bnxt *bp)
-{
-       struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
-
-       if (!ptp)
-               return;
-
-       if (bp->flags & BNXT_FLAG_CHIP_P5) {
-               spin_lock_bh(&ptp->ptp_lock);
-               ptp->current_time = bnxt_refclk_read(bp, NULL);
-               WRITE_ONCE(ptp->old_time, ptp->current_time);
-               spin_unlock_bh(&ptp->ptp_lock);
-               ptp_schedule_worker(ptp->ptp_clock, 0);
-       }
-}
-
 static const struct ptp_clock_info bnxt_ptp_caps = {
        .owner          = THIS_MODULE,
        .name           = "bnxt clock",
@@ -439,6 +429,7 @@ int bnxt_ptp_init(struct bnxt *bp)
        ptp->cc.shift = 0;
        ptp->cc.mult = 1;
 
+       ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
        timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
 
        ptp->ptp_info = bnxt_ptp_caps;
@@ -450,7 +441,13 @@ int bnxt_ptp_init(struct bnxt *bp)
                bnxt_unmap_ptp_regs(bp);
                return err;
        }
-
+       if (bp->flags & BNXT_FLAG_CHIP_P5) {
+               spin_lock_bh(&ptp->ptp_lock);
+               ptp->current_time = bnxt_refclk_read(bp, NULL);
+               WRITE_ONCE(ptp->old_time, ptp->current_time);
+               spin_unlock_bh(&ptp->ptp_lock);
+               ptp_schedule_worker(ptp->ptp_clock, 0);
+       }
        return 0;
 }
 
index 6b62457..254ba7b 100644 (file)
@@ -32,6 +32,10 @@ struct bnxt_ptp_cfg {
        u64                     current_time;
        u64                     old_time;
        unsigned long           next_period;
+       unsigned long           next_overflow_check;
+       /* 48-bit PHC overflows in 78 hours.  Check overflow every 19 hours. */
+       #define BNXT_PHC_OVERFLOW_PERIOD        (19 * 3600 * HZ)
+
        u16                     tx_seqid;
        struct bnxt             *bp;
        atomic_t                tx_avail;
@@ -75,7 +79,6 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
 int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
 int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
 int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
-void bnxt_ptp_start(struct bnxt *bp);
 int bnxt_ptp_init(struct bnxt *bp);
 void bnxt_ptp_clear(struct bnxt *bp);
 #endif
index a918e37..187ff64 100644 (file)
@@ -479,16 +479,17 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
                if (!edev)
                        return ERR_PTR(-ENOMEM);
                edev->en_ops = &bnxt_en_ops_tbl;
-               if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
-                       edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
-               if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
-                       edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
                edev->net = dev;
                edev->pdev = bp->pdev;
                edev->l2_db_size = bp->db_size;
                edev->l2_db_size_nc = bp->db_size;
                bp->edev = edev;
        }
+       edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP;
+       if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
+               edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
+       if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
+               edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
        return bp->edev;
 }
 EXPORT_SYMBOL(bnxt_ulp_probe);
index 41f7f07..db74241 100644 (file)
@@ -1640,7 +1640,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
 
        switch (mode) {
        case GENET_POWER_PASSIVE:
-               reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+               reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
+                        EXT_ENERGY_DET_MASK);
                if (GENET_IS_V5(priv)) {
                        reg &= ~(EXT_PWR_DOWN_PHY_EN |
                                 EXT_PWR_DOWN_PHY_RD |
@@ -3237,15 +3238,21 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
 /* Returns a reusable dma control register value */
 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
 {
+       unsigned int i;
        u32 reg;
        u32 dma_ctrl;
 
        /* disable DMA */
        dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+       for (i = 0; i < priv->hw_params->tx_queues; i++)
+               dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
        reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
        reg &= ~dma_ctrl;
        bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
 
+       dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+       for (i = 0; i < priv->hw_params->rx_queues; i++)
+               dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
        reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
        reg &= ~dma_ctrl;
        bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
@@ -3292,7 +3299,6 @@ static int bcmgenet_open(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        unsigned long dma_ctrl;
-       u32 reg;
        int ret;
 
        netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
@@ -3318,12 +3324,6 @@ static int bcmgenet_open(struct net_device *dev)
 
        bcmgenet_set_hw_addr(priv, dev->dev_addr);
 
-       if (priv->internal_phy) {
-               reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
-               reg |= EXT_ENERGY_DET_MASK;
-               bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-       }
-
        /* Disable RX/TX DMA and flush TX queues */
        dma_ctrl = bcmgenet_dma_disable(priv);
 
@@ -4139,7 +4139,6 @@ static int bcmgenet_resume(struct device *d)
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct bcmgenet_rxnfc_rule *rule;
        unsigned long dma_ctrl;
-       u32 reg;
        int ret;
 
        if (!netif_running(dev))
@@ -4176,12 +4175,6 @@ static int bcmgenet_resume(struct device *d)
                if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
                        bcmgenet_hfb_create_rxnfc_filter(priv, rule);
 
-       if (priv->internal_phy) {
-               reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
-               reg |= EXT_ENERGY_DET_MASK;
-               bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-       }
-
        /* Disable RX/TX DMA and flush TX queues */
        dma_ctrl = bcmgenet_dma_disable(priv);
 
index facde82..e31a5a3 100644 (file)
@@ -186,12 +186,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
        reg |= CMD_RX_EN;
        bcmgenet_umac_writel(priv, reg, UMAC_CMD);
 
-       if (priv->hw_params->flags & GENET_HAS_EXT) {
-               reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
-               reg &= ~EXT_ENERGY_DET_MASK;
-               bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-       }
-
        reg = UMAC_IRQ_MPD_R;
        if (hfb_enable)
                reg |=  UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM;
index 4cddd62..9ed3d1a 100644 (file)
@@ -420,7 +420,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
         * bits 32:47 indicate the PVF num.
         */
        for (q_no = 0; q_no < ern; q_no++) {
-               reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
+               reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
 
                /* for VF assigned queues. */
                if (q_no < oct->sriov_info.pf_srn) {
index 9a2b166..dbf9a0e 100644 (file)
@@ -2643,6 +2643,9 @@ static void detach_ulds(struct adapter *adap)
 {
        unsigned int i;
 
+       if (!is_uld(adap))
+               return;
+
        mutex_lock(&uld_mutex);
        list_del(&adap->list_node);
 
@@ -7141,10 +7144,13 @@ static void remove_one(struct pci_dev *pdev)
                 */
                destroy_workqueue(adapter->workq);
 
-               if (is_uld(adapter)) {
-                       detach_ulds(adapter);
-                       t4_uld_clean_up(adapter);
-               }
+               detach_ulds(adapter);
+
+               for_each_port(adapter, i)
+                       if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+                               unregister_netdev(adapter->port[i]);
+
+               t4_uld_clean_up(adapter);
 
                adap_free_hma_mem(adapter);
 
@@ -7152,10 +7158,6 @@ static void remove_one(struct pci_dev *pdev)
 
                cxgb4_free_mps_ref_entries(adapter);
 
-               for_each_port(adapter, i)
-                       if (adapter->port[i]->reg_state == NETREG_REGISTERED)
-                               unregister_netdev(adapter->port[i]);
-
                debugfs_remove_recursive(adapter->debugfs_root);
 
                if (!is_t4(adapter->params.chip))
index 743af9e..17faac7 100644 (file)
@@ -581,6 +581,9 @@ void t4_uld_clean_up(struct adapter *adap)
 {
        unsigned int i;
 
+       if (!is_uld(adap))
+               return;
+
        mutex_lock(&uld_mutex);
        for (i = 0; i < CXGB4_ULD_MAX; i++) {
                if (!adap->uld[i].handle)
index f6ff1f7..1876f15 100644 (file)
@@ -357,7 +357,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
        int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
        void __iomem *ioaddr;
 
-       i = pci_enable_device(pdev);
+       i = pcim_enable_device(pdev);
        if (i) return i;
 
        pci_set_master(pdev);
@@ -379,7 +379,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
        if (!ioaddr)
-               goto err_out_free_res;
+               goto err_out_netdev;
 
        for (i = 0; i < 3; i++)
                ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
@@ -458,8 +458,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 err_out_cleardev:
        pci_iounmap(pdev, ioaddr);
-err_out_free_res:
-       pci_release_regions(pdev);
 err_out_netdev:
        free_netdev (dev);
        return -ENODEV;
@@ -1526,7 +1524,6 @@ static void w840_remove1(struct pci_dev *pdev)
        if (dev) {
                struct netdev_private *np = netdev_priv(dev);
                unregister_netdev(dev);
-               pci_release_regions(pdev);
                pci_iounmap(pdev, np->base_addr);
                free_netdev(dev);
        }
index f3d12d0..68b7864 100644 (file)
@@ -2770,32 +2770,32 @@ static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw)
        if (err)
                return err;
 
-       err = dpaa2_switch_seed_bp(ethsw);
-       if (err)
-               goto err_free_dpbp;
-
        err = dpaa2_switch_alloc_rings(ethsw);
        if (err)
-               goto err_drain_dpbp;
+               goto err_free_dpbp;
 
        err = dpaa2_switch_setup_dpio(ethsw);
        if (err)
                goto err_destroy_rings;
 
+       err = dpaa2_switch_seed_bp(ethsw);
+       if (err)
+               goto err_deregister_dpio;
+
        err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
        if (err) {
                dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err);
-               goto err_deregister_dpio;
+               goto err_drain_dpbp;
        }
 
        return 0;
 
+err_drain_dpbp:
+       dpaa2_switch_drain_bp(ethsw);
 err_deregister_dpio:
        dpaa2_switch_free_dpio(ethsw);
 err_destroy_rings:
        dpaa2_switch_destroy_rings(ethsw);
-err_drain_dpbp:
-       dpaa2_switch_drain_bp(ethsw);
 err_free_dpbp:
        dpaa2_switch_free_dpbp(ethsw);
 
index 46ecb42..d9fc5c4 100644 (file)
@@ -524,6 +524,7 @@ static void setup_memac(struct mac_device *mac_dev)
        | SUPPORTED_Autoneg \
        | SUPPORTED_Pause \
        | SUPPORTED_Asym_Pause \
+       | SUPPORTED_FIBRE \
        | SUPPORTED_MII)
 
 static DEFINE_MUTEX(eth_lock);
index 867e87a..099a2bc 100644 (file)
@@ -1469,7 +1469,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = pci_enable_device(pdev);
        if (err)
-               return -ENXIO;
+               return err;
 
        err = pci_request_regions(pdev, "gvnic-cfg");
        if (err)
@@ -1477,19 +1477,12 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_set_master(pdev);
 
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (err) {
                dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
                goto abort_with_pci_region;
        }
 
-       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-       if (err) {
-               dev_err(&pdev->dev,
-                       "Failed to set consistent dma mask: err=%d\n", err);
-               goto abort_with_pci_region;
-       }
-
        reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
        if (!reg_bar) {
                dev_err(&pdev->dev, "Failed to map pci bar!\n");
@@ -1512,6 +1505,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
        if (!dev) {
                dev_err(&pdev->dev, "could not allocate netdev\n");
+               err = -ENOMEM;
                goto abort_with_db_bar;
        }
        SET_NETDEV_DEV(dev, &pdev->dev);
@@ -1565,7 +1559,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = register_netdev(dev);
        if (err)
-               goto abort_with_wq;
+               goto abort_with_gve_init;
 
        dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
        dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
@@ -1573,6 +1567,9 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        queue_work(priv->gve_wq, &priv->service_task);
        return 0;
 
+abort_with_gve_init:
+       gve_teardown_priv_resources(priv);
+
 abort_with_wq:
        destroy_workqueue(priv->gve_wq);
 
@@ -1590,7 +1587,7 @@ abort_with_pci_region:
 
 abort_with_enabled:
        pci_disable_device(pdev);
-       return -ENXIO;
+       return err;
 }
 
 static void gve_remove(struct pci_dev *pdev)
index 77bb822..8500621 100644 (file)
@@ -566,13 +566,6 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
                return 0;
        }
 
-       /* Prefetch the payload header. */
-       prefetch((char *)buf_state->addr + buf_state->page_info.page_offset);
-#if L1_CACHE_BYTES < 128
-       prefetch((char *)buf_state->addr + buf_state->page_info.page_offset +
-                L1_CACHE_BYTES);
-#endif
-
        if (eop && buf_len <= priv->rx_copybreak) {
                rx->skb_head = gve_rx_copy(priv->dev, napi,
                                           &buf_state->page_info, buf_len, 0);
index 12f6c24..e53512f 100644 (file)
 /* buf unit size is cache_line_size, which is 64, so the shift is 6 */
 #define PPE_BUF_SIZE_SHIFT             6
 #define PPE_TX_BUF_HOLD                        BIT(31)
-#define CACHE_LINE_MASK                        0x3F
+#define SOC_CACHE_LINE_MASK            0x3F
 #else
 #define PPE_CFG_QOS_VMID_GRP_SHIFT     8
 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT    11
@@ -531,8 +531,8 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 #if defined(CONFIG_HI13X1_GMAC)
        desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV
                | TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT);
-       desc->data_offset = (__force u32)cpu_to_be32(phys & CACHE_LINE_MASK);
-       desc->send_addr =  (__force u32)cpu_to_be32(phys & ~CACHE_LINE_MASK);
+       desc->data_offset = (__force u32)cpu_to_be32(phys & SOC_CACHE_LINE_MASK);
+       desc->send_addr =  (__force u32)cpu_to_be32(phys & ~SOC_CACHE_LINE_MASK);
 #else
        desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
        desc->send_addr = (__force u32)cpu_to_be32(phys);
index 0a6cda3..aa86a81 100644 (file)
@@ -98,6 +98,7 @@ struct hclgevf_mbx_resp_status {
        u32 origin_mbx_msg;
        bool received_resp;
        int resp_status;
+       u16 match_id;
        u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE];
 };
 
@@ -143,7 +144,8 @@ struct hclge_mbx_vf_to_pf_cmd {
        u8 mbx_need_resp;
        u8 rsv1[1];
        u8 msg_len;
-       u8 rsv2[3];
+       u8 rsv2;
+       u16 match_id;
        struct hclge_vf_to_pf_msg msg;
 };
 
@@ -153,7 +155,8 @@ struct hclge_mbx_pf_to_vf_cmd {
        u8 dest_vfid;
        u8 rsv[3];
        u8 msg_len;
-       u8 rsv1[3];
+       u8 rsv1;
+       u16 match_id;
        struct hclge_pf_to_vf_msg msg;
 };
 
index dd3354a..ebeaf12 100644 (file)
@@ -9552,13 +9552,17 @@ static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
        if (ret)
                return ret;
 
-       if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps))
+       if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
                ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
                                                        !enable);
-       else if (!vport->vport_id)
+       } else if (!vport->vport_id) {
+               if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
+                       enable = false;
+
                ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
                                                 HCLGE_FILTER_FE_INGRESS,
                                                 enable, 0);
+       }
 
        return ret;
 }
index e10a2c3..c0a478a 100644 (file)
@@ -47,6 +47,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
 
        resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
        resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
+       resp_pf_to_vf->match_id = vf_to_pf_req->match_id;
 
        resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP;
        resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code;
index 3b1f845..befa9bc 100644 (file)
@@ -5,9 +5,27 @@
 #include "hclge_main.h"
 #include "hnae3.h"
 
+static int hclge_ptp_get_cycle(struct hclge_dev *hdev)
+{
+       struct hclge_ptp *ptp = hdev->ptp;
+
+       ptp->cycle.quo = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG) &
+                        HCLGE_PTP_CYCLE_QUO_MASK;
+       ptp->cycle.numer = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
+       ptp->cycle.den = readl(hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
+
+       if (ptp->cycle.den == 0) {
+               dev_err(&hdev->pdev->dev, "invalid ptp cycle denominator!\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 {
        struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+       struct hclge_ptp_cycle *cycle = &hdev->ptp->cycle;
        u64 adj_val, adj_base, diff;
        unsigned long flags;
        bool is_neg = false;
@@ -18,7 +36,7 @@ static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
                is_neg = true;
        }
 
-       adj_base = HCLGE_PTP_CYCLE_ADJ_BASE * HCLGE_PTP_CYCLE_ADJ_UNIT;
+       adj_base = (u64)cycle->quo * (u64)cycle->den + (u64)cycle->numer;
        adj_val = adj_base * ppb;
        diff = div_u64(adj_val, 1000000000ULL);
 
@@ -29,16 +47,16 @@ static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 
        /* This clock cycle is defined by three part: quotient, numerator
         * and denominator. For example, 2.5ns, the quotient is 2,
-        * denominator is fixed to HCLGE_PTP_CYCLE_ADJ_UNIT, and numerator
-        * is 0.5 * HCLGE_PTP_CYCLE_ADJ_UNIT.
+        * denominator is fixed to ptp->cycle.den, and numerator
+        * is 0.5 * ptp->cycle.den.
         */
-       quo = div_u64_rem(adj_val, HCLGE_PTP_CYCLE_ADJ_UNIT, &numerator);
+       quo = div_u64_rem(adj_val, cycle->den, &numerator);
 
        spin_lock_irqsave(&hdev->ptp->lock, flags);
-       writel(quo, hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
+       writel(quo & HCLGE_PTP_CYCLE_QUO_MASK,
+              hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
        writel(numerator, hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
-       writel(HCLGE_PTP_CYCLE_ADJ_UNIT,
-              hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
+       writel(cycle->den, hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
        writel(HCLGE_PTP_CYCLE_ADJ_EN,
               hdev->ptp->io_base + HCLGE_PTP_CYCLE_CFG_REG);
        spin_unlock_irqrestore(&hdev->ptp->lock, flags);
@@ -475,6 +493,10 @@ int hclge_ptp_init(struct hclge_dev *hdev)
                ret = hclge_ptp_create_clock(hdev);
                if (ret)
                        return ret;
+
+               ret = hclge_ptp_get_cycle(hdev);
+               if (ret)
+                       return ret;
        }
 
        ret = hclge_ptp_int_en(hdev, true);
index 5a202b7..dbf5f4c 100644 (file)
@@ -29,6 +29,7 @@
 #define HCLGE_PTP_TIME_ADJ_REG         0x60
 #define HCLGE_PTP_TIME_ADJ_EN          BIT(0)
 #define HCLGE_PTP_CYCLE_QUO_REG                0x64
+#define HCLGE_PTP_CYCLE_QUO_MASK       GENMASK(7, 0)
 #define HCLGE_PTP_CYCLE_DEN_REG                0x68
 #define HCLGE_PTP_CYCLE_NUM_REG                0x6C
 #define HCLGE_PTP_CYCLE_CFG_REG                0x70
@@ -37,9 +38,7 @@
 #define HCLGE_PTP_CUR_TIME_SEC_L_REG   0x78
 #define HCLGE_PTP_CUR_TIME_NSEC_REG    0x7C
 
-#define HCLGE_PTP_CYCLE_ADJ_BASE       2
 #define HCLGE_PTP_CYCLE_ADJ_MAX                500000000
-#define HCLGE_PTP_CYCLE_ADJ_UNIT       100000000
 #define HCLGE_PTP_SEC_H_OFFSET         32u
 #define HCLGE_PTP_SEC_L_MASK           GENMASK(31, 0)
 
 #define HCLGE_PTP_FLAG_TX_EN           1
 #define HCLGE_PTP_FLAG_RX_EN           2
 
+struct hclge_ptp_cycle {
+       u32 quo;
+       u32 numer;
+       u32 den;
+};
+
 struct hclge_ptp {
        struct hclge_dev *hdev;
        struct ptp_clock *clock;
@@ -58,6 +63,7 @@ struct hclge_ptp {
        spinlock_t lock;        /* protects ptp registers */
        u32 ptp_cfg;
        u32 last_tx_seqid;
+       struct hclge_ptp_cycle cycle;
        unsigned long tx_start;
        unsigned long tx_cnt;
        unsigned long tx_skipped;
index 52eaf82..8784d61 100644 (file)
@@ -2641,6 +2641,16 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
 
 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
 {
+       struct hnae3_handle *nic = &hdev->nic;
+       int ret;
+
+       ret = hclgevf_en_hw_strip_rxvtag(nic, true);
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "failed to enable rx vlan offload, ret = %d\n", ret);
+               return ret;
+       }
+
        return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
                                       false);
 }
index 9b17735..772b2f8 100644 (file)
@@ -13,6 +13,7 @@ static int hclgevf_resp_to_errno(u16 resp_code)
        return resp_code ? -resp_code : 0;
 }
 
+#define HCLGEVF_MBX_MATCH_ID_START     1
 static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
 {
        /* this function should be called with mbx_resp.mbx_mutex held
@@ -21,6 +22,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
        hdev->mbx_resp.received_resp  = false;
        hdev->mbx_resp.origin_mbx_msg = 0;
        hdev->mbx_resp.resp_status    = 0;
+       hdev->mbx_resp.match_id++;
+       /* Update match_id and ensure the value of match_id is not zero */
+       if (hdev->mbx_resp.match_id == 0)
+               hdev->mbx_resp.match_id = HCLGEVF_MBX_MATCH_ID_START;
        memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE);
 }
 
@@ -115,6 +120,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
        if (need_resp) {
                mutex_lock(&hdev->mbx_resp.mbx_mutex);
                hclgevf_reset_mbx_resp_status(hdev);
+               req->match_id = hdev->mbx_resp.match_id;
                status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
                if (status) {
                        dev_err(&hdev->pdev->dev,
@@ -211,6 +217,19 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
                                resp->additional_info[i] = *temp;
                                temp++;
                        }
+
+                       /* If match_id is not zero, it means PF support
+                        * match_id. If the match_id is right, VF get the
+                        * right response, otherwise ignore the response.
+                        * Driver will clear hdev->mbx_resp when send
+                        * next message which need response.
+                        */
+                       if (req->match_id) {
+                               if (req->match_id == resp->match_id)
+                                       resp->received_resp = true;
+                       } else {
+                               resp->received_resp = true;
+                       }
                        break;
                case HCLGE_MBX_LINK_STAT_CHANGE:
                case HCLGE_MBX_ASSERTING_RESET:
index 374a75d..a775c69 100644 (file)
@@ -1731,7 +1731,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
                tx_send_failed++;
                tx_dropped++;
                ret = NETDEV_TX_OK;
-               ibmvnic_tx_scrq_flush(adapter, tx_scrq);
                goto out;
        }
 
@@ -1753,6 +1752,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
                dev_kfree_skb_any(skb);
                tx_send_failed++;
                tx_dropped++;
+               ibmvnic_tx_scrq_flush(adapter, tx_scrq);
                ret = NETDEV_TX_OK;
                goto out;
        }
@@ -2420,9 +2420,10 @@ out:
 
 static void __ibmvnic_reset(struct work_struct *work)
 {
-       struct ibmvnic_rwi *rwi;
        struct ibmvnic_adapter *adapter;
        bool saved_state = false;
+       struct ibmvnic_rwi *tmprwi;
+       struct ibmvnic_rwi *rwi;
        unsigned long flags;
        u32 reset_state;
        int rc = 0;
@@ -2489,7 +2490,7 @@ static void __ibmvnic_reset(struct work_struct *work)
                } else {
                        rc = do_reset(adapter, rwi, reset_state);
                }
-               kfree(rwi);
+               tmprwi = rwi;
                adapter->last_reset_time = jiffies;
 
                if (rc)
@@ -2497,8 +2498,23 @@ static void __ibmvnic_reset(struct work_struct *work)
 
                rwi = get_next_rwi(adapter);
 
+               /*
+                * If there is another reset queued, free the previous rwi
+                * and process the new reset even if previous reset failed
+                * (the previous reset could have failed because of a fail
+                * over for instance, so process the fail over).
+                *
+                * If there are no resets queued and the previous reset failed,
+                * the adapter would be in an undefined state. So retry the
+                * previous reset as a hard reset.
+                */
+               if (rwi)
+                       kfree(tmprwi);
+               else if (rc)
+                       rwi = tmprwi;
+
                if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
-                           rwi->reset_reason == VNIC_RESET_MOBILITY))
+                           rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
                        adapter->force_reset_recovery = true;
        }
 
index d150dad..757a54c 100644 (file)
@@ -7664,6 +7664,7 @@ err_flashmap:
 err_ioremap:
        free_netdev(netdev);
 err_alloc_etherdev:
+       pci_disable_pcie_error_reporting(pdev);
        pci_release_mem_regions(pdev);
 err_pci_reg:
 err_dma:
index dbcae92..adfa276 100644 (file)
@@ -2227,6 +2227,7 @@ err_sw_init:
 err_ioremap:
        free_netdev(netdev);
 err_alloc_netdev:
+       pci_disable_pcie_error_reporting(pdev);
        pci_release_mem_regions(pdev);
 err_pci_reg:
 err_dma:
index 3e822ba..2c9e4ee 100644 (file)
@@ -980,7 +980,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
        default:
                /* if we got here and link is up something bad is afoot */
                netdev_info(netdev,
-                           "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
+                           "WARNING: Link is up but PHY type 0x%x is not recognized, or incorrect cable is in use\n",
                            hw_link_info->phy_type);
        }
 
@@ -5294,6 +5294,10 @@ flags_complete:
                                        dev_warn(&pf->pdev->dev,
                                                 "Device configuration forbids SW from starting the LLDP agent.\n");
                                        return -EINVAL;
+                               case I40E_AQ_RC_EAGAIN:
+                                       dev_warn(&pf->pdev->dev,
+                                                "Stop FW LLDP agent command is still being processed, please try again in a second.\n");
+                                       return -EBUSY;
                                default:
                                        dev_warn(&pf->pdev->dev,
                                                 "Starting FW LLDP agent failed: error: %s, %s\n",
index 861e59a..1d1f527 100644 (file)
@@ -4454,11 +4454,10 @@ int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
 }
 
 /**
- * i40e_vsi_control_tx - Start or stop a VSI's rings
+ * i40e_vsi_enable_tx - Start a VSI's rings
  * @vsi: the VSI being configured
- * @enable: start or stop the rings
  **/
-static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
+static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
        int i, pf_q, ret = 0;
@@ -4467,7 +4466,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
        for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
                ret = i40e_control_wait_tx_q(vsi->seid, pf,
                                             pf_q,
-                                            false /*is xdp*/, enable);
+                                            false /*is xdp*/, true);
                if (ret)
                        break;
 
@@ -4476,7 +4475,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
 
                ret = i40e_control_wait_tx_q(vsi->seid, pf,
                                             pf_q + vsi->alloc_queue_pairs,
-                                            true /*is xdp*/, enable);
+                                            true /*is xdp*/, true);
                if (ret)
                        break;
        }
@@ -4574,32 +4573,25 @@ int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
 }
 
 /**
- * i40e_vsi_control_rx - Start or stop a VSI's rings
+ * i40e_vsi_enable_rx - Start a VSI's rings
  * @vsi: the VSI being configured
- * @enable: start or stop the rings
  **/
-static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
+static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
        int i, pf_q, ret = 0;
 
        pf_q = vsi->base_queue;
        for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
-               ret = i40e_control_wait_rx_q(pf, pf_q, enable);
+               ret = i40e_control_wait_rx_q(pf, pf_q, true);
                if (ret) {
                        dev_info(&pf->pdev->dev,
-                                "VSI seid %d Rx ring %d %sable timeout\n",
-                                vsi->seid, pf_q, (enable ? "en" : "dis"));
+                                "VSI seid %d Rx ring %d enable timeout\n",
+                                vsi->seid, pf_q);
                        break;
                }
        }
 
-       /* Due to HW errata, on Rx disable only, the register can indicate done
-        * before it really is. Needs 50ms to be sure
-        */
-       if (!enable)
-               mdelay(50);
-
        return ret;
 }
 
@@ -4612,29 +4604,47 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
        int ret = 0;
 
        /* do rx first for enable and last for disable */
-       ret = i40e_vsi_control_rx(vsi, true);
+       ret = i40e_vsi_enable_rx(vsi);
        if (ret)
                return ret;
-       ret = i40e_vsi_control_tx(vsi, true);
+       ret = i40e_vsi_enable_tx(vsi);
 
        return ret;
 }
 
+#define I40E_DISABLE_TX_GAP_MSEC       50
+
 /**
  * i40e_vsi_stop_rings - Stop a VSI's rings
  * @vsi: the VSI being configured
  **/
 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
 {
+       struct i40e_pf *pf = vsi->back;
+       int pf_q, err, q_end;
+
        /* When port TX is suspended, don't wait */
        if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
                return i40e_vsi_stop_rings_no_wait(vsi);
 
-       /* do rx first for enable and last for disable
-        * Ignore return value, we need to shutdown whatever we can
-        */
-       i40e_vsi_control_tx(vsi, false);
-       i40e_vsi_control_rx(vsi, false);
+       q_end = vsi->base_queue + vsi->num_queue_pairs;
+       for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+               i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
+
+       for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
+               err = i40e_control_wait_rx_q(pf, pf_q, false);
+               if (err)
+                       dev_info(&pf->pdev->dev,
+                                "VSI seid %d Rx ring %d dissable timeout\n",
+                                vsi->seid, pf_q);
+       }
+
+       msleep(I40E_DISABLE_TX_GAP_MSEC);
+       pf_q = vsi->base_queue;
+       for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+               wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
+
+       i40e_vsi_wait_queues_disabled(vsi);
 }
 
 /**
@@ -7280,6 +7290,8 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
        }
        if (vsi->num_queue_pairs <
            (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
+               dev_err(&vsi->back->pdev->dev,
+                       "Failed to create traffic channel, insufficient number of queues.\n");
                return -EINVAL;
        }
        if (sum_max_rate > i40e_get_link_speed(vsi)) {
@@ -13261,6 +13273,7 @@ static const struct net_device_ops i40e_netdev_ops = {
        .ndo_poll_controller    = i40e_netpoll,
 #endif
        .ndo_setup_tc           = __i40e_setup_tc,
+       .ndo_select_queue       = i40e_lan_select_queue,
        .ndo_set_features       = i40e_set_features,
        .ndo_set_vf_mac         = i40e_ndo_set_vf_mac,
        .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
index 38eb815..3f25bd8 100644 (file)
@@ -3631,6 +3631,56 @@ dma_error:
        return -1;
 }
 
+static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev,
+                                 const struct sk_buff *skb,
+                                 u16 num_tx_queues)
+{
+       u32 jhash_initval_salt = 0xd631614b;
+       u32 hash;
+
+       if (skb->sk && skb->sk->sk_hash)
+               hash = skb->sk->sk_hash;
+       else
+               hash = (__force u16)skb->protocol ^ skb->hash;
+
+       hash = jhash_1word(hash, jhash_initval_salt);
+
+       return (u16)(((u64)hash * num_tx_queues) >> 32);
+}
+
+u16 i40e_lan_select_queue(struct net_device *netdev,
+                         struct sk_buff *skb,
+                         struct net_device __always_unused *sb_dev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_hw *hw;
+       u16 qoffset;
+       u16 qcount;
+       u8 tclass;
+       u16 hash;
+       u8 prio;
+
+       /* is DCB enabled at all? */
+       if (vsi->tc_config.numtc == 1)
+               return i40e_swdcb_skb_tx_hash(netdev, skb,
+                                             netdev->real_num_tx_queues);
+
+       prio = skb->priority;
+       hw = &vsi->back->hw;
+       tclass = hw->local_dcbx_config.etscfg.prioritytable[prio];
+       /* sanity check */
+       if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass))))
+               tclass = 0;
+
+       /* select a queue assigned for the given TC */
+       qcount = vsi->tc_config.tc_info[tclass].qcount;
+       hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount);
+
+       qoffset = vsi->tc_config.tc_info[tclass].qoffset;
+       return qoffset + hash;
+}
+
 /**
  * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
  * @xdpf: data to transmit
index 86fed05..bfc2845 100644 (file)
@@ -451,6 +451,8 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
 
 bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb,
+                         struct net_device *sb_dev);
 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
 void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
index e612c24..44bafed 100644 (file)
@@ -3798,6 +3798,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 err_ioremap:
        free_netdev(netdev);
 err_alloc_etherdev:
+       pci_disable_pcie_error_reporting(pdev);
        pci_release_regions(pdev);
 err_pci_reg:
 err_dma:
index 7e6435d..171a7a6 100644 (file)
@@ -931,6 +931,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
  **/
 static int igb_request_msix(struct igb_adapter *adapter)
 {
+       unsigned int num_q_vectors = adapter->num_q_vectors;
        struct net_device *netdev = adapter->netdev;
        int i, err = 0, vector = 0, free_vector = 0;
 
@@ -939,7 +940,13 @@ static int igb_request_msix(struct igb_adapter *adapter)
        if (err)
                goto err_out;
 
-       for (i = 0; i < adapter->num_q_vectors; i++) {
+       if (num_q_vectors > MAX_Q_VECTORS) {
+               num_q_vectors = MAX_Q_VECTORS;
+               dev_warn(&adapter->pdev->dev,
+                        "The number of queue vectors (%d) is higher than max allowed (%d)\n",
+                        adapter->num_q_vectors, MAX_Q_VECTORS);
+       }
+       for (i = 0; i < num_q_vectors; i++) {
                struct igb_q_vector *q_vector = adapter->q_vector[i];
 
                vector++;
@@ -1678,14 +1685,15 @@ static bool is_any_txtime_enabled(struct igb_adapter *adapter)
  **/
 static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
 {
-       struct igb_ring *ring = adapter->tx_ring[queue];
        struct net_device *netdev = adapter->netdev;
        struct e1000_hw *hw = &adapter->hw;
+       struct igb_ring *ring;
        u32 tqavcc, tqavctrl;
        u16 value;
 
        WARN_ON(hw->mac.type != e1000_i210);
        WARN_ON(queue < 0 || queue > 1);
+       ring = adapter->tx_ring[queue];
 
        /* If any of the Qav features is enabled, configure queues as SR and
         * with HIGH PRIO. If none is, then configure them with LOW PRIO and
@@ -3615,6 +3623,7 @@ err_sw_init:
 err_ioremap:
        free_netdev(netdev);
 err_alloc_etherdev:
+       pci_disable_pcie_error_reporting(pdev);
        pci_release_mem_regions(pdev);
 err_pci_reg:
 err_dma:
@@ -4835,6 +4844,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
                                               DMA_TO_DEVICE);
                }
 
+               tx_buffer->next_to_watch = NULL;
+
                /* move us one more past the eop_desc for start of next pkt */
                tx_buffer++;
                i++;
index 9e0bbb2..5901ed9 100644 (file)
@@ -578,7 +578,7 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
        if (hw->phy.ops.read_reg)
                return hw->phy.ops.read_reg(hw, offset, data);
 
-       return 0;
+       return -EOPNOTSUPP;
 }
 
 void igc_reinit_locked(struct igc_adapter *);
index 9532309..e29aadb 100644 (file)
@@ -232,6 +232,8 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
                                igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
                }
 
+               tx_buffer->next_to_watch = NULL;
+
                /* move us one more past the eop_desc for start of next pkt */
                tx_buffer++;
                i++;
@@ -6054,6 +6056,7 @@ err_sw_init:
 err_ioremap:
        free_netdev(netdev);
 err_alloc_etherdev:
+       pci_disable_pcie_error_reporting(pdev);
        pci_release_mem_regions(pdev);
 err_pci_reg:
 err_dma:
index ffff69e..14aea40 100644 (file)
@@ -1825,7 +1825,8 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
                                struct sk_buff *skb)
 {
        if (ring_uses_build_skb(rx_ring)) {
-               unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
+               unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1;
+               unsigned long offset = (unsigned long)(skb->data) & mask;
 
                dma_sync_single_range_for_cpu(rx_ring->dev,
                                              IXGBE_CB(skb)->dma,
@@ -11067,6 +11068,7 @@ err_ioremap:
        disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
        free_netdev(netdev);
 err_alloc_etherdev:
+       pci_disable_pcie_error_reporting(pdev);
        pci_release_mem_regions(pdev);
 err_pci_reg:
 err_dma:
index caaea2c..e3e4676 100644 (file)
@@ -211,7 +211,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
 static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
                                          u32 *mykey, u32 *mysalt)
 {
-       struct net_device *dev = xs->xso.dev;
+       struct net_device *dev = xs->xso.real_dev;
        unsigned char *key_data;
        char *alg_name = NULL;
        int key_len;
@@ -260,12 +260,15 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
  **/
 static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
 {
-       struct net_device *dev = xs->xso.dev;
-       struct ixgbevf_adapter *adapter = netdev_priv(dev);
-       struct ixgbevf_ipsec *ipsec = adapter->ipsec;
+       struct net_device *dev = xs->xso.real_dev;
+       struct ixgbevf_adapter *adapter;
+       struct ixgbevf_ipsec *ipsec;
        u16 sa_idx;
        int ret;
 
+       adapter = netdev_priv(dev);
+       ipsec = adapter->ipsec;
+
        if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
                netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n",
                           xs->id.proto);
@@ -383,11 +386,14 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
  **/
 static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
 {
-       struct net_device *dev = xs->xso.dev;
-       struct ixgbevf_adapter *adapter = netdev_priv(dev);
-       struct ixgbevf_ipsec *ipsec = adapter->ipsec;
+       struct net_device *dev = xs->xso.real_dev;
+       struct ixgbevf_adapter *adapter;
+       struct ixgbevf_ipsec *ipsec;
        u16 sa_idx;
 
+       adapter = netdev_priv(dev);
+       ipsec = adapter->ipsec;
+
        if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
                sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
 
index 361bc4f..76a7777 100644 (file)
@@ -2299,19 +2299,19 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
                skb_frag_off_set(frag, pp->rx_offset_correction);
                skb_frag_size_set(frag, data_len);
                __skb_frag_set_page(frag, page);
-
-               /* last fragment */
-               if (len == *size) {
-                       struct skb_shared_info *sinfo;
-
-                       sinfo = xdp_get_shared_info_from_buff(xdp);
-                       sinfo->nr_frags = xdp_sinfo->nr_frags;
-                       memcpy(sinfo->frags, xdp_sinfo->frags,
-                              sinfo->nr_frags * sizeof(skb_frag_t));
-               }
        } else {
                page_pool_put_full_page(rxq->page_pool, page, true);
        }
+
+       /* last fragment */
+       if (len == *size) {
+               struct skb_shared_info *sinfo;
+
+               sinfo = xdp_get_shared_info_from_buff(xdp);
+               sinfo->nr_frags = xdp_sinfo->nr_frags;
+               memcpy(sinfo->frags, xdp_sinfo->frags,
+                      sinfo->nr_frags * sizeof(skb_frag_t));
+       }
        *size -= len;
 }
 
index 1a34556..cc8ac36 100644 (file)
@@ -10,4 +10,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o
 rvu_mbox-y := mbox.o rvu_trace.o
 rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
                  rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
-                 rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o
+                 rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o
index fac6474..544c96c 100644 (file)
@@ -86,6 +86,22 @@ bool is_lmac_valid(struct cgx *cgx, int lmac_id)
        return test_bit(lmac_id, &cgx->lmac_bmap);
 }
 
+/* Helper function to get sequential index
+ * given the enabled LMAC of a CGX
+ */
+static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
+{
+       int tmp, id = 0;
+
+       for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
+               if (tmp == lmac_id)
+                       break;
+               id++;
+       }
+
+       return id;
+}
+
 struct mac_ops *get_mac_ops(void *cgxd)
 {
        if (!cgxd)
@@ -211,37 +227,257 @@ static u64 mac2u64 (u8 *mac_addr)
        return mac;
 }
 
+static void cfg2mac(u64 cfg, u8 *mac_addr)
+{
+       int i, index = 0;
+
+       for (i = ETH_ALEN - 1; i >= 0; i--, index++)
+               mac_addr[i] = (cfg >> (8 * index)) & 0xFF;
+}
+
 int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
 {
        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+       struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
        struct mac_ops *mac_ops;
+       int index, id;
        u64 cfg;
 
+       /* access mac_ops to know csr_offset */
        mac_ops = cgx_dev->mac_ops;
+
        /* copy 6bytes from macaddr */
        /* memcpy(&cfg, mac_addr, 6); */
 
        cfg = mac2u64 (mac_addr);
 
-       cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
+       id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+       index = id * lmac->mac_to_index_bmap.max;
+
+       cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
                  cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
 
        cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
-       cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
+       cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
+               CGX_DMAC_MCAST_MODE);
        cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 
        return 0;
 }
 
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
+{
+       struct mac_ops *mac_ops;
+       struct cgx *cgx = cgxd;
+
+       if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
+               return 0;
+
+       cgx = cgxd;
+       /* Get mac_ops to know csr offset */
+       mac_ops = cgx->mac_ops;
+
+       return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+}
+
+u64 cgx_read_dmac_entry(void *cgxd, int index)
+{
+       struct mac_ops *mac_ops;
+       struct cgx *cgx;
+
+       if (!cgxd)
+               return 0;
+
+       cgx = cgxd;
+       mac_ops = cgx->mac_ops;
+       return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
+}
+
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
+{
+       struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+       struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+       struct mac_ops *mac_ops;
+       int index, idx;
+       u64 cfg = 0;
+       int id;
+
+       if (!lmac)
+               return -ENODEV;
+
+       mac_ops = cgx_dev->mac_ops;
+       /* Get available index where entry is to be installed */
+       idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
+       if (idx < 0)
+               return idx;
+
+       id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+       index = id * lmac->mac_to_index_bmap.max + idx;
+
+       cfg = mac2u64 (mac_addr);
+       cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+       cfg |= ((u64)lmac_id << 49);
+       cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+
+       cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+       cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
+
+       if (is_multicast_ether_addr(mac_addr)) {
+               cfg &= ~GENMASK_ULL(2, 1);
+               cfg |= CGX_DMAC_MCAST_MODE_CAM;
+               lmac->mcast_filters_count++;
+       } else if (!lmac->mcast_filters_count) {
+               cfg |= CGX_DMAC_MCAST_MODE;
+       }
+
+       cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+       return idx;
+}
+
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
+{
+       struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+       struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+       struct mac_ops *mac_ops;
+       u8 index = 0, id;
+       u64 cfg;
+
+       if (!lmac)
+               return -ENODEV;
+
+       mac_ops = cgx_dev->mac_ops;
+       /* Restore index 0 to its default init value as done during
+        * cgx_lmac_init
+        */
+       set_bit(0, lmac->mac_to_index_bmap.bmap);
+
+       id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+       index = id * lmac->mac_to_index_bmap.max + index;
+       cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+       /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
+       cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+       cfg &= ~CGX_DMAC_CAM_ACCEPT;
+       cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
+       cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+       return 0;
+}
+
+/* Allows caller to change macaddress associated with index
+ * in dmac filter table including index 0 reserved for
+ * interface mac address
+ */
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
+{
+       struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+       struct mac_ops *mac_ops;
+       struct lmac *lmac;
+       u64 cfg;
+       int id;
+
+       lmac = lmac_pdata(lmac_id, cgx_dev);
+       if (!lmac)
+               return -ENODEV;
+
+       mac_ops = cgx_dev->mac_ops;
+       /* Validate the index */
+       if (index >= lmac->mac_to_index_bmap.max)
+               return -EINVAL;
+
+       /* ensure index is already set */
+       if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
+               return -EINVAL;
+
+       id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+       index = id * lmac->mac_to_index_bmap.max + index;
+
+       cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+       cfg &= ~CGX_RX_DMAC_ADR_MASK;
+       cfg |= mac2u64 (mac_addr);
+
+       cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+       return 0;
+}
+
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
+{
+       struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+       struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+       struct mac_ops *mac_ops;
+       u8 mac[ETH_ALEN];
+       u64 cfg;
+       int id;
+
+       if (!lmac)
+               return -ENODEV;
+
+       mac_ops = cgx_dev->mac_ops;
+       /* Validate the index */
+       if (index >= lmac->mac_to_index_bmap.max)
+               return -EINVAL;
+
+       /* Skip deletion for reserved index i.e. index 0 */
+       if (index == 0)
+               return 0;
+
+       rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
+
+       id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+       index = id * lmac->mac_to_index_bmap.max + index;
+
+       /* Read MAC address to check whether it is ucast or mcast */
+       cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+
+       cfg2mac(cfg, mac);
+       if (is_multicast_ether_addr(mac))
+               lmac->mcast_filters_count--;
+
+       if (!lmac->mcast_filters_count) {
+               cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+               cfg &= ~GENMASK_ULL(2, 1);
+               cfg |= CGX_DMAC_MCAST_MODE;
+               cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+       }
+
+       cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+       return 0;
+}
+
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
+{
+       struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+       struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+
+       if (lmac)
+               return lmac->mac_to_index_bmap.max;
+
+       return 0;
+}
+
 u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
 {
        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+       struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
        struct mac_ops *mac_ops;
+       int index;
        u64 cfg;
+       int id;
 
        mac_ops = cgx_dev->mac_ops;
 
-       cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
+       id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+       index = id * lmac->mac_to_index_bmap.max;
+
+       cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
        return cfg & CGX_RX_DMAC_ADR_MASK;
 }
 
@@ -297,35 +533,51 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
 void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
 {
        struct cgx *cgx = cgx_get_pdata(cgx_id);
+       struct lmac *lmac = lmac_pdata(lmac_id, cgx);
+       u16 max_dmac = lmac->mac_to_index_bmap.max;
        struct mac_ops *mac_ops;
+       int index, i;
        u64 cfg = 0;
+       int id;
 
        if (!cgx)
                return;
 
+       id = get_sequence_id_of_lmac(cgx, lmac_id);
+
        mac_ops = cgx->mac_ops;
        if (enable) {
                /* Enable promiscuous mode on LMAC */
                cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
-               cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
-               cfg |= CGX_DMAC_BCAST_MODE;
+               cfg &= ~CGX_DMAC_CAM_ACCEPT;
+               cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
                cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 
-               cfg = cgx_read(cgx, 0,
-                              (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
-               cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
-               cgx_write(cgx, 0,
-                         (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+               for (i = 0; i < max_dmac; i++) {
+                       index = id * max_dmac + i;
+                       cfg = cgx_read(cgx, 0,
+                                      (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+                       cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
+                       cgx_write(cgx, 0,
+                                 (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
+               }
        } else {
                /* Disable promiscuous mode */
                cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
                cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
                cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
-               cfg = cgx_read(cgx, 0,
-                              (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
-               cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
-               cgx_write(cgx, 0,
-                         (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+               for (i = 0; i < max_dmac; i++) {
+                       index = id * max_dmac + i;
+                       cfg = cgx_read(cgx, 0,
+                                      (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+                       if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
+                               cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+                               cgx_write(cgx, 0,
+                                         (CGXX_CMRX_RX_DMAC_CAM0 +
+                                          index * 0x8),
+                                         cfg);
+                       }
+               }
        }
 }
 
@@ -1234,6 +1486,15 @@ static int cgx_lmac_init(struct cgx *cgx)
                }
 
                lmac->cgx = cgx;
+               lmac->mac_to_index_bmap.max =
+                               MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
+               err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
+               if (err)
+                       return err;
+
+               /* Reserve first entry for default MAC address */
+               set_bit(0, lmac->mac_to_index_bmap.bmap);
+
                init_waitqueue_head(&lmac->wq_cmd_cmplt);
                mutex_init(&lmac->cmd_lock);
                spin_lock_init(&lmac->event_cb_lock);
@@ -1243,8 +1504,8 @@ static int cgx_lmac_init(struct cgx *cgx)
 
                /* Add reference */
                cgx->lmac_idmap[lmac->lmac_id] = lmac;
-               cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
                set_bit(lmac->lmac_id, &cgx->lmac_bmap);
+               cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
        }
 
        return cgx_lmac_verify_fwi_version(cgx);
@@ -1274,6 +1535,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
                        continue;
                cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
                cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
+               kfree(lmac->mac_to_index_bmap.bmap);
                kfree(lmac->name);
                kfree(lmac);
        }
index 1252126..237ba2b 100644 (file)
@@ -23,6 +23,7 @@
 
 #define CGX_ID_MASK                    0x7
 #define MAX_LMAC_PER_CGX               4
+#define MAX_DMAC_ENTRIES_PER_CGX       32
 #define CGX_FIFO_LEN                   65536 /* 64K for both Rx & Tx */
 #define CGX_OFFSET(x)                  ((x) * MAX_LMAC_PER_CGX)
 
 #define CGXX_CMRX_RX_DMAC_CTL0         (0x1F8 + mac_ops->csr_offset)
 #define CGX_DMAC_CTL0_CAM_ENABLE       BIT_ULL(3)
 #define CGX_DMAC_CAM_ACCEPT            BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE_CAM                BIT_ULL(2)
 #define CGX_DMAC_MCAST_MODE            BIT_ULL(1)
 #define CGX_DMAC_BCAST_MODE            BIT_ULL(0)
 #define CGXX_CMRX_RX_DMAC_CAM0         (0x200 + mac_ops->csr_offset)
 #define CGX_DMAC_CAM_ADDR_ENABLE       BIT_ULL(48)
+#define CGX_DMAC_CAM_ENTRY_LMACID      GENMASK_ULL(50, 49)
 #define CGXX_CMRX_RX_DMAC_CAM1         0x400
 #define CGX_RX_DMAC_ADR_MASK           GENMASK_ULL(47, 0)
 #define CGXX_CMRX_TX_STAT0             0x700
@@ -139,7 +142,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
 int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
 int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
 int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id);
 u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index);
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id);
 void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
 void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable);
 int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
@@ -165,4 +172,7 @@ u8 cgx_get_lmacid(void *cgxd, u8 lmac_index);
 unsigned long cgx_get_lmac_bmap(void *cgxd);
 void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val);
 u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index);
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id);
+u64 cgx_read_dmac_entry(void *cgxd, int index);
 #endif /* CGX_H */
index 45706fd..a8b7b1c 100644 (file)
 #include "rvu.h"
 #include "cgx.h"
 /**
- * struct lmac
+ * struct lmac - per lmac locks and properties
  * @wq_cmd_cmplt:      waitq to keep the process blocked until cmd completion
  * @cmd_lock:          Lock to serialize the command interface
  * @resp:              command response
  * @link_info:         link related information
+ * @mac_to_index_bmap: Mac address to CGX table index mapping
  * @event_cb:          callback for linkchange events
  * @event_cb_lock:     lock for serializing callback with unregister
- * @cmd_pend:          flag set before new command is started
- *                     flag cleared after command response is received
  * @cgx:               parent cgx port
+ * @mcast_filters_count:  Number of multicast filters installed
  * @lmac_id:           lmac port id
+ * @cmd_pend:          flag set before new command is started
+ *                     flag cleared after command response is received
  * @name:              lmac port name
  */
 struct lmac {
@@ -29,12 +31,14 @@ struct lmac {
        struct mutex cmd_lock;
        u64 resp;
        struct cgx_link_user_info link_info;
+       struct rsrc_bmap mac_to_index_bmap;
        struct cgx_event_cb event_cb;
        /* lock for serializing callback with unregister */
        spinlock_t event_cb_lock;
-       bool cmd_pend;
        struct cgx *cgx;
+       u8 mcast_filters_count;
        u8 lmac_id;
+       bool cmd_pend;
        char *name;
 };
 
index 770d862..f5ec39d 100644 (file)
@@ -134,6 +134,8 @@ M(MSIX_OFFSET,              0x005, msix_offset, msg_req, msix_offset_rsp)   \
 M(VF_FLR,              0x006, vf_flr, msg_req, msg_rsp)                \
 M(PTP_OP,              0x007, ptp_op, ptp_req, ptp_rsp)                \
 M(GET_HW_CAP,          0x008, get_hw_cap, msg_req, get_hw_cap_rsp)     \
+M(LMTST_TBL_SETUP,     0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req,    \
+                               msg_rsp)                                \
 M(SET_VF_PERM,         0x00b, set_vf_perm, set_vf_perm, msg_rsp)       \
 /* CGX mbox IDs (range 0x200 - 0x3FF) */                               \
 M(CGX_START_RXTX,      0x200, cgx_start_rxtx, msg_req, msg_rsp)        \
@@ -163,7 +165,15 @@ M(CGX_SET_LINK_MODE,       0x214, cgx_set_link_mode, cgx_set_link_mode_req,\
 M(CGX_FEATURES_GET,    0x215, cgx_features_get, msg_req,               \
                               cgx_features_info_msg)                   \
 M(RPM_STATS,           0x216, rpm_stats, msg_req, rpm_stats_rsp)       \
- /* NPA mbox IDs (range 0x400 - 0x5FF) */                              \
+M(CGX_MAC_ADDR_ADD,    0x217, cgx_mac_addr_add, cgx_mac_addr_add_req,    \
+                              cgx_mac_addr_add_rsp)            \
+M(CGX_MAC_ADDR_DEL,    0x218, cgx_mac_addr_del, cgx_mac_addr_del_req,    \
+                              msg_rsp)         \
+M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req,    \
+                                 cgx_max_dmac_entries_get_rsp)         \
+M(CGX_MAC_ADDR_RESET,  0x21A, cgx_mac_addr_reset, msg_req, msg_rsp)    \
+M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \
+                              msg_rsp)                                 \
 /* NPA mbox IDs (range 0x400 - 0x5FF) */                               \
 M(NPA_LF_ALLOC,                0x400, npa_lf_alloc,                            \
                                npa_lf_alloc_req, npa_lf_alloc_rsp)     \
@@ -401,6 +411,38 @@ struct cgx_mac_addr_set_or_get {
        u8 mac_addr[ETH_ALEN];
 };
 
+/* Structure for requesting the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_req {
+       struct mbox_msghdr hdr;
+       u8 mac_addr[ETH_ALEN];
+};
+
+/* Structure for response against the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_rsp {
+       struct mbox_msghdr hdr;
+       u8 index;
+};
+
+/* Structure for requesting the operation to
+ * delete DMAC filter entry from CGX interface
+ */
+struct cgx_mac_addr_del_req {
+       struct mbox_msghdr hdr;
+       u8 index;
+};
+
+/* Structure for response against the operation to
+ * get maximum supported DMAC filter entries
+ */
+struct cgx_max_dmac_entries_get_rsp {
+       struct mbox_msghdr hdr;
+       u8 max_dmac_filters;
+};
+
 struct cgx_link_user_info {
        uint64_t link_up:1;
        uint64_t full_duplex:1;
@@ -499,6 +541,12 @@ struct cgx_set_link_mode_rsp {
        int status;
 };
 
+struct cgx_mac_addr_update_req {
+       struct mbox_msghdr hdr;
+       u8 mac_addr[ETH_ALEN];
+       u8 index;
+};
+
 #define RVU_LMAC_FEAT_FC               BIT_ULL(0) /* pause frames */
 #define RVU_LMAC_FEAT_PTP              BIT_ULL(1) /* precision time protocol */
 #define RVU_MAC_VERSION                        BIT_ULL(2)
@@ -1278,6 +1326,14 @@ struct set_vf_perm  {
        u64     flags;
 };
 
+struct lmtst_tbl_setup_req {
+       struct mbox_msghdr hdr;
+       u16 base_pcifunc;
+       u8  use_local_lmt_region;
+       u64 lmt_iova;
+       u64 rsvd[4];
+};
+
 /* CPT mailbox error codes
  * Range 901 - 1000.
  */
index 19bad9a..243cf80 100644 (file)
@@ -151,7 +151,10 @@ enum npc_kpu_lh_ltype {
  * Software assigns pkind for each incoming port such as CGX
  * Ethernet interfaces, LBK interfaces, etc.
  */
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_VLAN_EXDSA_PKIND
+
 enum npc_pkind_type {
+       NPC_RX_LBK_PKIND = 0ULL,
        NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
        NPC_RX_CHLEN24B_PKIND = 57ULL,
        NPC_RX_CPT_HDR_PKIND,
index 0b09294..5fe277e 100644 (file)
@@ -391,8 +391,10 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
 
        /* Get numVFs attached to this PF and first HWVF */
        cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
-       *numvfs = (cfg >> 12) & 0xFF;
-       *hwvf = cfg & 0xFFF;
+       if (numvfs)
+               *numvfs = (cfg >> 12) & 0xFF;
+       if (hwvf)
+               *hwvf = cfg & 0xFFF;
 }
 
 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
@@ -1314,7 +1316,7 @@ int rvu_mbox_handler_detach_resources(struct rvu *rvu,
        return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
 }
 
-static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
 {
        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
        int blkaddr = BLKADDR_NIX0, vf;
@@ -2333,6 +2335,7 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
        rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
        rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
        rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
+       rvu_reset_lmt_map_tbl(rvu, pcifunc);
        rvu_detach_rsrcs(rvu, NULL, pcifunc);
        mutex_unlock(&rvu->flr_lock);
 }
@@ -2858,6 +2861,12 @@ static int rvu_enable_sriov(struct rvu *rvu)
        if (!vfs)
                return 0;
 
+       /* LBK channel number 63 is used for switching packets between
+        * CGX mapped VFs. Hence limit LBK pairs till 62 only.
+        */
+       if (vfs > 62)
+               vfs = 62;
+
        /* Save VFs number for reference in VF interrupts handlers.
         * Since interrupts might start arriving during SRIOV enablement
         * ordinary API cannot be used to get number of enabled VFs.
@@ -3000,6 +3009,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        /* Initialize debugfs */
        rvu_dbg_init(rvu);
 
+       mutex_init(&rvu->rswitch.switch_lock);
+
        return 0;
 err_dl:
        rvu_unregister_dl(rvu);
index 9e5d9ba..91503fb 100644 (file)
@@ -243,6 +243,7 @@ struct rvu_pfvf {
        u8      nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
        u8      nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
        u8      nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
+       u64     lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/
        unsigned long flags;
 };
 
@@ -414,6 +415,16 @@ struct npc_kpu_profile_adapter {
        size_t                          kpus;
 };
 
+#define RVU_SWITCH_LBK_CHAN    63
+
+struct rvu_switch {
+       struct mutex switch_lock; /* Serialize flow installation */
+       u32 used_entries;
+       u16 *entry2pcifunc;
+       u16 mode;
+       u16 start_entry;
+};
+
 struct rvu {
        void __iomem            *afreg_base;
        void __iomem            *pfreg_base;
@@ -444,6 +455,7 @@ struct rvu {
 
        /* CGX */
 #define PF_CGXMAP_BASE         1 /* PF 0 is reserved for RVU PF */
+       u16                     cgx_mapped_vfs; /* maximum CGX mapped VFs */
        u8                      cgx_mapped_pfs;
        u8                      cgx_cnt_max;     /* CGX port count max */
        u8                      *pf2cgxlmac_map; /* pf to cgx_lmac map */
@@ -476,6 +488,9 @@ struct rvu {
        struct rvu_debugfs      rvu_dbg;
 #endif
        struct rvu_devlink      *rvu_dl;
+
+       /* RVU switch implementation over NPC with DMAC rules */
+       struct rvu_switch       rswitch;
 };
 
 static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -656,6 +671,8 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
                           int rxtxflag, u64 *stat);
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc);
+
 /* NPA APIs */
 int rvu_npa_init(struct rvu *rvu);
 void rvu_npa_freemem(struct rvu *rvu);
@@ -688,6 +705,7 @@ int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
                        struct nix_cn10k_aq_enq_req *aq_req,
                        struct nix_cn10k_aq_enq_rsp *aq_rsp,
                        u16 pcifunc, u8 ctype, u32 qidx);
+int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc);
 
 /* NPC APIs */
 int rvu_npc_init(struct rvu *rvu);
@@ -741,6 +759,7 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
 u32  rvu_cgx_get_fifolen(struct rvu *rvu);
 void *rvu_first_cgx_pdata(struct rvu *rvu);
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
 
 int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
                             int type);
@@ -754,6 +773,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
 int rvu_set_channels_base(struct rvu *rvu);
 void rvu_program_channels(struct rvu *rvu);
 
+/* CN10K RVU - LMT*/
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
+
 #ifdef CONFIG_DEBUG_FS
 void rvu_dbg_init(struct rvu *rvu);
 void rvu_dbg_exit(struct rvu *rvu);
@@ -761,4 +783,10 @@ void rvu_dbg_exit(struct rvu *rvu);
 static inline void rvu_dbg_init(struct rvu *rvu) {}
 static inline void rvu_dbg_exit(struct rvu *rvu) {}
 #endif
+
+/* RVU Switch */
+void rvu_switch_enable(struct rvu *rvu);
+void rvu_switch_disable(struct rvu *rvu);
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
+
 #endif /* RVU_H */
index 6e2bf4f..fe99ac4 100644 (file)
@@ -63,7 +63,7 @@ static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
        return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
 }
 
-static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
 {
        unsigned long pfmap;
 
@@ -126,6 +126,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
        unsigned long lmac_bmap;
        int size, free_pkind;
        int cgx, lmac, iter;
+       int numvfs, hwvfs;
 
        if (!cgx_cnt_max)
                return 0;
@@ -166,6 +167,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
                        pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
                        rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
                        rvu->cgx_mapped_pfs++;
+                       rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
+                       rvu->cgx_mapped_vfs += numvfs;
                        pf++;
                }
        }
@@ -454,6 +457,31 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
        return 0;
 }
 
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
+{
+       int pf = rvu_get_pf(pcifunc);
+       int i = 0, lmac_count = 0;
+       u8 max_dmac_filters;
+       u8 cgx_id, lmac_id;
+       void *cgx_dev;
+
+       if (!is_cgx_config_permitted(rvu, pcifunc))
+               return;
+
+       rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+       cgx_dev = cgx_get_pdata(cgx_id);
+       lmac_count = cgx_get_lmac_cnt(cgx_dev);
+       max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
+
+       for (i = 0; i < max_dmac_filters; i++)
+               cgx_lmac_addr_del(cgx_id, lmac_id, i);
+
+       /* As cgx_lmac_addr_del does not clear entry for index 0
+        * so it needs to be done explicitly
+        */
+       cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
                                    struct msg_rsp *rsp)
 {
@@ -557,6 +585,63 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
        return 0;
 }
 
+int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
+                                     struct cgx_mac_addr_add_req *req,
+                                     struct cgx_mac_addr_add_rsp *rsp)
+{
+       int pf = rvu_get_pf(req->hdr.pcifunc);
+       u8 cgx_id, lmac_id;
+       int rc = 0;
+
+       if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+               return -EPERM;
+
+       rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+       rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
+       if (rc >= 0) {
+               rsp->index = rc;
+               return 0;
+       }
+
+       return rc;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
+                                     struct cgx_mac_addr_del_req *req,
+                                     struct msg_rsp *rsp)
+{
+       int pf = rvu_get_pf(req->hdr.pcifunc);
+       u8 cgx_id, lmac_id;
+
+       if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+               return -EPERM;
+
+       rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+       return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
+}
+
+int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
+                                            struct msg_req *req,
+                                            struct cgx_max_dmac_entries_get_rsp
+                                            *rsp)
+{
+       int pf = rvu_get_pf(req->hdr.pcifunc);
+       u8 cgx_id, lmac_id;
+
+       /* If msg is received from PFs(which are not mapped to CGX LMACs)
+        * or VF then no entries are allocated for DMAC filters at CGX level.
+        * So returning zero.
+        */
+       if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
+               rsp->max_dmac_filters = 0;
+               return 0;
+       }
+
+       rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+       rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
+       return 0;
+}
+
 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
                                      struct cgx_mac_addr_set_or_get *req,
                                      struct cgx_mac_addr_set_or_get *rsp)
@@ -953,3 +1038,30 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
        rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
        return 0;
 }
+
+int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
+                                       struct msg_rsp *rsp)
+{
+       int pf = rvu_get_pf(req->hdr.pcifunc);
+       u8 cgx_id, lmac_id;
+
+       if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+               return -EPERM;
+
+       rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+       return cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
+int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
+                                        struct cgx_mac_addr_update_req *req,
+                                        struct msg_rsp *rsp)
+{
+       int pf = rvu_get_pf(req->hdr.pcifunc);
+       u8 cgx_id, lmac_id;
+
+       if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+               return -EPERM;
+
+       rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+       return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
+}
index 7d9e71c..8d48b64 100644 (file)
 #include "cgx.h"
 #include "rvu_reg.h"
 
+/* RVU LMTST */
+#define LMT_TBL_OP_READ                0
+#define LMT_TBL_OP_WRITE       1
+#define LMT_MAP_TABLE_SIZE     (128 * 1024)
+#define LMT_MAPTBL_ENTRY_SIZE  16
+
+/* Function to perform operations (read/write) on lmtst map table */
+static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
+                              int lmt_tbl_op)
+{
+       void __iomem *lmt_map_base;
+       u64 tbl_base;
+
+       tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+       lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
+       if (!lmt_map_base) {
+               dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+               return -ENOMEM;
+       }
+
+       if (lmt_tbl_op == LMT_TBL_OP_READ) {
+               *val = readq(lmt_map_base + index);
+       } else {
+               writeq((*val), (lmt_map_base + index));
+               /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
+                * changes effective. Write 1 for flush and read is being used as a
+                * barrier and sets up a data dependency. Write to 0 after a write
+                * to 1 to complete the flush.
+                */
+               rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
+               rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
+               rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
+       }
+
+       iounmap(lmt_map_base);
+       return 0;
+}
+
+static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
+{
+       return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
+               (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
+}
+
+static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+                          u64 iova, u64 *lmt_addr)
+{
+       u64 pa, val, pf;
+       int err;
+
+       if (!iova) {
+               dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
+               return -EINVAL;
+       }
+
+       rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
+       pf = rvu_get_pf(pcifunc) & 0x1F;
+       val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
+             ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
+       rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
+
+       err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
+       if (err) {
+               dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
+               return err;
+       }
+       val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
+       if (val & ~0x1ULL) {
+               dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
+               return -EIO;
+       }
+       /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT1[60:21]
+        * PA[11:0] = IOVA[11:0]
+        */
+       pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT1) >> 21;
+       pa &= GENMASK_ULL(39, 0);
+       *lmt_addr = (pa << 12) | (iova  & 0xFFF);
+
+       return 0;
+}
+
+static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
+{
+       struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+       u32 tbl_idx;
+       int err = 0;
+       u64 val;
+
+       /* Read the current lmt addr of pcifunc */
+       tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+       err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
+       if (err) {
+               dev_err(rvu->dev,
+                       "Failed to read LMT map table: index 0x%x err %d\n",
+                       tbl_idx, err);
+               return err;
+       }
+
+       /* Storing the seondary's lmt base address as this needs to be
+        * reverted in FLR. Also making sure this default value doesn't
+        * get overwritten on multiple calls to this mailbox.
+        */
+       if (!pfvf->lmt_base_addr)
+               pfvf->lmt_base_addr = val;
+
+       /* Update the LMT table with new addr */
+       err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
+       if (err) {
+               dev_err(rvu->dev,
+                       "Failed to update LMT map table: index 0x%x err %d\n",
+                       tbl_idx, err);
+               return err;
+       }
+       return 0;
+}
+
+int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
+                                    struct lmtst_tbl_setup_req *req,
+                                    struct msg_rsp *rsp)
+{
+       u64 lmt_addr, val;
+       u32 pri_tbl_idx;
+       int err = 0;
+
+       /* Check if PF_FUNC wants to use it's own local memory as LMTLINE
+        * region, if so, convert that IOVA to physical address and
+        * populate LMT table with that address
+        */
+       if (req->use_local_lmt_region) {
+               err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
+                                     req->lmt_iova, &lmt_addr);
+               if (err < 0)
+                       return err;
+
+               /* Update the lmt addr for this PFFUNC in the LMT table */
+               err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
+               if (err)
+                       return err;
+       }
+
+       /* Reconfiguring lmtst map table in lmt region shared mode i.e. make
+        * multiple PF_FUNCs to share an LMTLINE region, so primary/base
+        * pcifunc (which is passed as an argument to mailbox) is the one
+        * whose lmt base address will be shared among other secondary
+        * pcifunc (will be the one who is calling this mailbox).
+        */
+       if (req->base_pcifunc) {
+               /* Calculating the LMT table index equivalent to primary
+                * pcifunc.
+                */
+               pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
+
+               /* Read the base lmt addr of the primary pcifunc */
+               err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
+                                         LMT_TBL_OP_READ);
+               if (err) {
+                       dev_err(rvu->dev,
+                               "Failed to read LMT map table: index 0x%x err %d\n",
+                               pri_tbl_idx, err);
+                       return err;
+               }
+
+               /* Update the base lmt addr of secondary with primary's base
+                * lmt addr.
+                */
+               err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+/* Resetting the lmtst map table to original base addresses */
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
+{
+       struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+       u32 tbl_idx;
+       int err;
+
+       if (is_rvu_otx2(rvu))
+               return;
+
+       if (pfvf->lmt_base_addr) {
+               /* This corresponds to lmt map table index */
+               tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+               /* Reverting back original lmt base addr for respective
+                * pcifunc.
+                */
+               err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr,
+                                         LMT_TBL_OP_WRITE);
+               if (err)
+                       dev_err(rvu->dev,
+                               "Failed to update LMT map table: index 0x%x err %d\n",
+                               tbl_idx, err);
+               pfvf->lmt_base_addr = 0;
+       }
+}
+
 int rvu_set_channels_base(struct rvu *rvu)
 {
        struct rvu_hwinfo *hw = rvu->hw;
index 3cc3c6f..9b2dfbf 100644 (file)
@@ -1971,10 +1971,9 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
        return err;
 }
 
-static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
 {
        struct dentry *current_dir;
-       int err, lmac_id;
        char *buf;
 
        current_dir = filp->file->f_path.dentry->d_parent;
@@ -1982,17 +1981,87 @@ static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
        if (!buf)
                return -EINVAL;
 
-       err = kstrtoint(buf + 1, 10, &lmac_id);
-       if (!err) {
-               err = cgx_print_stats(filp, lmac_id);
-               if (err)
-                       return err;
-       }
+       return kstrtoint(buf + 1, 10, lmac_id);
+}
+
+static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+{
+       int lmac_id, err;
+
+       err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+       if (!err)
+               return cgx_print_stats(filp, lmac_id);
+
        return err;
 }
 
 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
 
+static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
+{
+       struct pci_dev *pdev = NULL;
+       void *cgxd = s->private;
+       char *bcast, *mcast;
+       u16 index, domain;
+       u8 dmac[ETH_ALEN];
+       struct rvu *rvu;
+       u64 cfg, mac;
+       int pf;
+
+       rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+                                            PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+       if (!rvu)
+               return -ENODEV;
+
+       pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
+       domain = 2;
+
+       pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+       if (!pdev)
+               return 0;
+
+       cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
+       bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
+       mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
+
+       seq_puts(s,
+                "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
+       seq_printf(s, "%s  PF%d  %9s  %9s",
+                  dev_name(&pdev->dev), pf, bcast, mcast);
+       if (cfg & CGX_DMAC_CAM_ACCEPT)
+               seq_printf(s, "%12s\n\n", "UNICAST");
+       else
+               seq_printf(s, "%16s\n\n", "PROMISCUOUS");
+
+       seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
+
+       for (index = 0 ; index < 32 ; index++) {
+               cfg = cgx_read_dmac_entry(cgxd, index);
+               /* Display enabled dmac entries associated with current lmac */
+               if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
+                   FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
+                       mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
+                       u64_to_ether_addr(mac, dmac);
+                       seq_printf(s, "%7d     %pM\n", index, dmac);
+               }
+       }
+
+       return 0;
+}
+
+static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
+{
+       int err, lmac_id;
+
+       err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+       if (!err)
+               return cgx_print_dmac_flt(filp, lmac_id);
+
+       return err;
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
+
 static void rvu_dbg_cgx_init(struct rvu *rvu)
 {
        struct mac_ops *mac_ops;
@@ -2029,6 +2098,9 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
 
                        debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
                                            cgx, &rvu_dbg_cgx_stat_fops);
+                       debugfs_create_file("mac_filter", 0600,
+                                           rvu->rvu_dbg.lmac, cgx,
+                                           &rvu_dbg_cgx_dmac_flt_fops);
                }
        }
 }
@@ -2041,9 +2113,6 @@ static void rvu_print_npc_mcam_info(struct seq_file *s,
        int entry_acnt, entry_ecnt;
        int cntr_acnt, cntr_ecnt;
 
-       /* Skip PF0 */
-       if (!pcifunc)
-               return;
        rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
                                          &entry_acnt, &entry_ecnt);
        rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
@@ -2226,7 +2295,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
                                         struct rvu_npc_mcam_rule *rule)
 {
-       if (rule->intf == NIX_INTF_TX) {
+       if (is_npc_intf_tx(rule->intf)) {
                switch (rule->tx_action.op) {
                case NIX_TX_ACTIONOP_DROP:
                        seq_puts(s, "\taction: Drop\n");
index 10a98bc..2688186 100644 (file)
@@ -1364,6 +1364,44 @@ static void rvu_health_reporters_destroy(struct rvu *rvu)
        rvu_nix_health_reporters_destroy(rvu_dl);
 }
 
+static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+       struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+       struct rvu *rvu = rvu_dl->rvu;
+       struct rvu_switch *rswitch;
+
+       rswitch = &rvu->rswitch;
+       *mode = rswitch->mode;
+
+       return 0;
+}
+
+static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+                                       struct netlink_ext_ack *extack)
+{
+       struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+       struct rvu *rvu = rvu_dl->rvu;
+       struct rvu_switch *rswitch;
+
+       rswitch = &rvu->rswitch;
+       switch (mode) {
+       case DEVLINK_ESWITCH_MODE_LEGACY:
+       case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+               if (rswitch->mode == mode)
+                       return 0;
+               rswitch->mode = mode;
+               if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+                       rvu_switch_enable(rvu);
+               else
+                       rvu_switch_disable(rvu);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
                                struct netlink_ext_ack *extack)
 {
@@ -1372,6 +1410,8 @@ static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req
 
 static const struct devlink_ops rvu_devlink_ops = {
        .info_get = rvu_devlink_info_get,
+       .eswitch_mode_get = rvu_devlink_eswitch_mode_get,
+       .eswitch_mode_set = rvu_devlink_eswitch_mode_set,
 };
 
 int rvu_register_dl(struct rvu *rvu)
@@ -1380,14 +1420,9 @@ int rvu_register_dl(struct rvu *rvu)
        struct devlink *dl;
        int err;
 
-       rvu_dl = kzalloc(sizeof(*rvu_dl), GFP_KERNEL);
-       if (!rvu_dl)
-               return -ENOMEM;
-
        dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink));
        if (!dl) {
                dev_warn(rvu->dev, "devlink_alloc failed\n");
-               kfree(rvu_dl);
                return -ENOMEM;
        }
 
@@ -1395,10 +1430,10 @@ int rvu_register_dl(struct rvu *rvu)
        if (err) {
                dev_err(rvu->dev, "devlink register failed with error %d\n", err);
                devlink_free(dl);
-               kfree(rvu_dl);
                return err;
        }
 
+       rvu_dl = devlink_priv(dl);
        rvu_dl->dl = dl;
        rvu_dl->rvu = rvu;
        rvu->rvu_dl = rvu_dl;
@@ -1417,5 +1452,4 @@ void rvu_unregister_dl(struct rvu *rvu)
        rvu_health_reporters_destroy(rvu);
        devlink_unregister(dl);
        devlink_free(dl);
-       kfree(rvu_dl);
 }
index d6f8210..4bfbbdf 100644 (file)
@@ -196,11 +196,22 @@ static void nix_rx_sync(struct rvu *rvu, int blkaddr)
 {
        int err;
 
-       /*Sync all in flight RX packets to LLC/DRAM */
+       /* Sync all in flight RX packets to LLC/DRAM */
        rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
        err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
        if (err)
-               dev_err(rvu->dev, "NIX RX software sync failed\n");
+               dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
+
+       /* SW_SYNC ensures all existing transactions are finished and pkts
+        * are written to LLC/DRAM, queues should be teared down after
+        * successful SW_SYNC. Due to a HW errata, in some rare scenarios
+        * an existing transaction might end after SW_SYNC operation. To
+        * ensure operation is fully done, do the SW_SYNC twice.
+        */
+       rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+       err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+       if (err)
+               dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
 }
 
 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
@@ -298,6 +309,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
                                        rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
                pfvf->rx_chan_cnt = 1;
                pfvf->tx_chan_cnt = 1;
+               rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
                rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
                                              pfvf->rx_chan_base,
                                              pfvf->rx_chan_cnt);
@@ -346,6 +358,9 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
 
        /* Free and disable any MCAM entries used by this NIX LF */
        rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+
+       /* Disable DMAC filters used */
+       rvu_cgx_disable_dmac_entries(rvu, pcifunc);
 }
 
 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
@@ -1949,6 +1964,35 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
        pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
 }
 
+static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
+                              u16 pcifunc, struct nix_txsch *txsch)
+{
+       struct rvu_hwinfo *hw = rvu->hw;
+       int lbk_link_start, lbk_links;
+       u8 pf = rvu_get_pf(pcifunc);
+       int schq;
+
+       if (!is_pf_cgxmapped(rvu, pf))
+               return;
+
+       lbk_link_start = hw->cgx_links;
+
+       for (schq = 0; schq < txsch->schq.max; schq++) {
+               if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+                       continue;
+               /* Enable all LBK links with channel 63 by default so that
+                * packets can be sent to LBK with a NPC TX MCAM rule
+                */
+               lbk_links = hw->lbk_links;
+               while (lbk_links--)
+                       rvu_write64(rvu, blkaddr,
+                                   NIX_AF_TL3_TL2X_LINKX_CFG(schq,
+                                                             lbk_link_start +
+                                                             lbk_links),
+                                   BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
+       }
+}
+
 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
                                    struct nix_txschq_config *req,
                                    struct msg_rsp *rsp)
@@ -2037,6 +2081,9 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
                rvu_write64(rvu, blkaddr, reg, regval);
        }
 
+       rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
+                          &nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
+
        return 0;
 }
 
@@ -3177,6 +3224,8 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
        if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
                ether_addr_copy(pfvf->default_mac, req->mac_addr);
 
+       rvu_switch_update_rules(rvu, pcifunc);
+
        return 0;
 }
 
@@ -3805,7 +3854,6 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
                vlan = &nix_hw->txvlan;
                kfree(vlan->rsrc.bmap);
                mutex_destroy(&vlan->rsrc_lock);
-               devm_kfree(rvu->dev, vlan->entry2pfvf_map);
 
                mcast = &nix_hw->mcast;
                qmem_free(rvu->dev, mcast->mce_ctx);
@@ -3846,6 +3894,8 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
        pfvf = rvu_get_pfvf(rvu, pcifunc);
        set_bit(NIXLF_INITIALIZED, &pfvf->flags);
 
+       rvu_switch_update_rules(rvu, pcifunc);
+
        return rvu_cgx_start_stop_io(rvu, pcifunc, true);
 }
 
index 3612e0a..52b2554 100644 (file)
@@ -442,7 +442,8 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
        owner = mcam->entry2pfvf_map[index];
        target_func = (entry->action >> 4) & 0xffff;
        /* do nothing when target is LBK/PF or owner is not PF */
-       if (is_afvf(target_func) || (owner & RVU_PFVF_FUNC_MASK) ||
+       if (is_pffunc_af(owner) || is_afvf(target_func) ||
+           (owner & RVU_PFVF_FUNC_MASK) ||
            !(target_func & RVU_PFVF_FUNC_MASK))
                return;
 
@@ -468,6 +469,8 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
 {
        int bank = npc_get_bank(mcam, index);
        int kw = 0, actbank, actindex;
+       u8 tx_intf_mask = ~intf & 0x3;
+       u8 tx_intf = intf;
        u64 cam0, cam1;
 
        actbank = bank; /* Save bank id, to set action later on */
@@ -488,12 +491,21 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
         */
        for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
                /* Interface should be set in all banks */
+               if (is_npc_intf_tx(intf)) {
+                       /* Last bit must be set and rest don't care
+                        * for TX interfaces
+                        */
+                       tx_intf_mask = 0x1;
+                       tx_intf = intf & tx_intf_mask;
+                       tx_intf_mask = ~tx_intf & tx_intf_mask;
+               }
+
                rvu_write64(rvu, blkaddr,
                            NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1),
-                           intf);
+                           tx_intf);
                rvu_write64(rvu, blkaddr,
                            NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0),
-                           ~intf & 0x3);
+                           tx_intf_mask);
 
                /* Set the match key */
                npc_get_keyword(entry, kw, &cam0, &cam1);
@@ -650,6 +662,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
        eth_broadcast_addr((u8 *)&req.mask.dmac);
        req.features = BIT_ULL(NPC_DMAC);
        req.channel = chan;
+       req.chan_mask = 0xFFFU;
        req.intf = pfvf->nix_rx_intf;
        req.op = action.op;
        req.hdr.pcifunc = 0; /* AF is requester */
@@ -799,6 +812,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
        eth_broadcast_addr((u8 *)&req.mask.dmac);
        req.features = BIT_ULL(NPC_DMAC);
        req.channel = chan;
+       req.chan_mask = 0xFFFU;
        req.intf = pfvf->nix_rx_intf;
        req.entry = index;
        req.hdr.pcifunc = 0; /* AF is requester */
@@ -1707,7 +1721,6 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
 {
        struct rvu_hwinfo *hw = rvu->hw;
        int num_pkinds, num_kpus, idx;
-       struct npc_pkind *pkind;
 
        /* Disable all KPUs and their entries */
        for (idx = 0; idx < hw->npc_kpus; idx++) {
@@ -1725,9 +1738,8 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
         * Check HW max count to avoid configuring junk or
         * writing to unsupported CSR addresses.
         */
-       pkind = &hw->pkind;
        num_pkinds = rvu->kpu.pkinds;
-       num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds);
+       num_pkinds = min_t(int, hw->npc_pkinds, num_pkinds);
 
        for (idx = 0; idx < num_pkinds; idx++)
                npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true);
@@ -1745,6 +1757,8 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
        int nixlf_count = rvu_get_nixlf_count(rvu);
        struct npc_mcam *mcam = &rvu->hw->mcam;
        int rsvd, err;
+       u16 index;
+       int cntr;
        u64 cfg;
 
        /* Actual number of MCAM entries vary by entry size */
@@ -1845,6 +1859,14 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
        if (!mcam->entry2target_pffunc)
                goto free_mem;
 
+       for (index = 0; index < mcam->bmap_entries; index++) {
+               mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
+               mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP;
+       }
+
+       for (cntr = 0; cntr < mcam->counters.max; cntr++)
+               mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP;
+
        mutex_init(&mcam->lock);
 
        return 0;
@@ -1867,7 +1889,8 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
        if (npc_const1 & BIT_ULL(63))
                npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2);
 
-       pkind->rsrc.max = (npc_const1 >> 12) & 0xFFULL;
+       pkind->rsrc.max = NPC_UNRESERVED_PKIND_COUNT;
+       hw->npc_pkinds = (npc_const1 >> 12) & 0xFFULL;
        hw->npc_kpu_entries = npc_const1 & 0xFFFULL;
        hw->npc_kpus = (npc_const >> 8) & 0x1FULL;
        hw->npc_intfs = npc_const & 0xFULL;
@@ -1978,6 +2001,10 @@ int rvu_npc_init(struct rvu *rvu)
        err = rvu_alloc_bitmap(&pkind->rsrc);
        if (err)
                return err;
+       /* Reserve PKIND#0 for LBKs. Power reset value of LBK_CH_PKIND is '0',
+        * no need to configure PKIND for all LBKs separately.
+        */
+       rvu_alloc_rsrc(&pkind->rsrc);
 
        /* Allocate mem for pkind to PF and channel mapping info */
        pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max,
@@ -2562,7 +2589,7 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
        }
 
        /* Alloc request from PFFUNC with no NIXLF attached should be denied */
-       if (!is_nixlf_attached(rvu, pcifunc))
+       if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
                return NPC_MCAM_ALLOC_DENIED;
 
        return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp);
@@ -2582,7 +2609,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
                return NPC_MCAM_INVALID_REQ;
 
        /* Free request from PFFUNC with no NIXLF attached, ignore */
-       if (!is_nixlf_attached(rvu, pcifunc))
+       if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
                return NPC_MCAM_INVALID_REQ;
 
        mutex_lock(&mcam->lock);
@@ -2594,7 +2621,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
        if (rc)
                goto exit;
 
-       mcam->entry2pfvf_map[req->entry] = 0;
+       mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP;
        mcam->entry2target_pffunc[req->entry] = 0x0;
        npc_mcam_clear_bit(mcam, req->entry);
        npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
@@ -2679,13 +2706,14 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
        else
                nix_intf = pfvf->nix_rx_intf;
 
-       if (npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) {
+       if (!is_pffunc_af(pcifunc) &&
+           npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) {
                rc = NPC_MCAM_INVALID_REQ;
                goto exit;
        }
 
-       if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
-                                   pcifunc)) {
+       if (!is_pffunc_af(pcifunc) &&
+           npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) {
                rc = NPC_MCAM_INVALID_REQ;
                goto exit;
        }
@@ -2836,7 +2864,7 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
                return NPC_MCAM_INVALID_REQ;
 
        /* If the request is from a PFFUNC with no NIXLF attached, ignore */
-       if (!is_nixlf_attached(rvu, pcifunc))
+       if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc))
                return NPC_MCAM_INVALID_REQ;
 
        /* Since list of allocated counter IDs needs to be sent to requester,
@@ -3081,7 +3109,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
        if (rc) {
                /* Free allocated MCAM entry */
                mutex_lock(&mcam->lock);
-               mcam->entry2pfvf_map[entry] = 0;
+               mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP;
                npc_mcam_clear_bit(mcam, entry);
                mutex_unlock(&mcam->lock);
                return rc;
index 6863314..5c01cf4 100644 (file)
@@ -910,14 +910,17 @@ static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
 
 static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
                                struct mcam_entry *entry,
-                               struct npc_install_flow_req *req, u16 target)
+                               struct npc_install_flow_req *req,
+                               u16 target, bool pf_set_vfs_mac)
 {
+       struct rvu_switch *rswitch = &rvu->rswitch;
        struct nix_rx_action action;
-       u64 chan_mask;
 
-       chan_mask = req->chan_mask ? req->chan_mask : ~0ULL;
-       npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, chan_mask, 0,
-                        NIX_INTF_RX);
+       if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac)
+               req->chan_mask = 0x0; /* Do not care channel */
+
+       npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask,
+                        0, NIX_INTF_RX);
 
        *(u64 *)&action = 0x00;
        action.pf_func = target;
@@ -949,9 +952,16 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
                                struct npc_install_flow_req *req, u16 target)
 {
        struct nix_tx_action action;
+       u64 mask = ~0ULL;
+
+       /* If AF is installing then do not care about
+        * PF_FUNC in Send Descriptor
+        */
+       if (is_pffunc_af(req->hdr.pcifunc))
+               mask = 0;
 
        npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target),
-                        0, ~0ULL, 0, NIX_INTF_TX);
+                        0, mask, 0, NIX_INTF_TX);
 
        *(u64 *)&action = 0x00;
        action.op = req->op;
@@ -1002,7 +1012,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
                        req->intf);
 
        if (is_npc_intf_rx(req->intf))
-               npc_update_rx_entry(rvu, pfvf, entry, req, target);
+               npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
        else
                npc_update_tx_entry(rvu, pfvf, entry, req, target);
 
@@ -1164,7 +1174,9 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
        if (err)
                return err;
 
-       if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
+       /* Skip channel validation if AF is installing */
+       if (!is_pffunc_af(req->hdr.pcifunc) &&
+           npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
                return -EINVAL;
 
        pfvf = rvu_get_pfvf(rvu, target);
@@ -1180,6 +1192,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
                eth_broadcast_addr((u8 *)&req->mask.dmac);
        }
 
+       /* Proceed if NIXLF is attached or not for TX rules */
        err = nix_get_nixlf(rvu, target, &nixlf, NULL);
        if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac)
                return -EINVAL;
index 76837d5..8b01ef6 100644 (file)
 #define RVU_AF_PFX_VF_BAR4_ADDR             (0x5400 | (a) << 4)
 #define RVU_AF_PFX_VF_BAR4_CFG              (0x5600 | (a) << 4)
 #define RVU_AF_PFX_LMTLINE_ADDR             (0x5800 | (a) << 4)
+#define RVU_AF_SMMU_ADDR_REQ               (0x6000)
+#define RVU_AF_SMMU_TXN_REQ                (0x6008)
+#define RVU_AF_SMMU_ADDR_RSP_STS           (0x6010)
+#define RVU_AF_SMMU_ADDR_TLN               (0x6018)
+#define RVU_AF_SMMU_TLN_FLIT1              (0x6030)
 
 /* Admin function's privileged PF/VF registers */
 #define RVU_PRIV_CONST                      (0x8000000)
 #define LBK_LINK_CFG_ID_MASK           GENMASK_ULL(11, 6)
 #define LBK_LINK_CFG_BASE_MASK         GENMASK_ULL(5, 0)
 
+/* APR */
+#define        APR_AF_LMT_CFG                  (0x000ull)
+#define        APR_AF_LMT_MAP_BASE             (0x008ull)
+#define        APR_AF_LMT_CTL                  (0x010ull)
+
 #endif /* RVU_REG_H */
index 14aa8e3..5bbe672 100644 (file)
@@ -35,7 +35,8 @@ enum rvu_block_addr_e {
        BLKADDR_NDC_NPA0        = 0xeULL,
        BLKADDR_NDC_NIX1_RX     = 0x10ULL,
        BLKADDR_NDC_NIX1_TX     = 0x11ULL,
-       BLK_COUNT               = 0x12ULL,
+       BLKADDR_APR             = 0x16ULL,
+       BLK_COUNT               = 0x17ULL,
 };
 
 /* RVU Block Type Enumeration */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
new file mode 100644 (file)
index 0000000..820adf3
--- /dev/null
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include <linux/bitfield.h>
+#include "rvu.h"
+
+static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc,
+                                     u16 chan_mask)
+{
+       struct npc_install_flow_req req = { 0 };
+       struct npc_install_flow_rsp rsp = { 0 };
+       struct rvu_pfvf *pfvf;
+
+       pfvf = rvu_get_pfvf(rvu, pcifunc);
+       /* If the pcifunc is not initialized then nothing to do.
+        * This same function will be called again via rvu_switch_update_rules
+        * after pcifunc is initialized.
+        */
+       if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+               return 0;
+
+       ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+       eth_broadcast_addr((u8 *)&req.mask.dmac);
+       req.hdr.pcifunc = 0; /* AF is requester */
+       req.vf = pcifunc;
+       req.features = BIT_ULL(NPC_DMAC);
+       req.channel = pfvf->rx_chan_base;
+       req.chan_mask = chan_mask;
+       req.intf = pfvf->nix_rx_intf;
+       req.op = NIX_RX_ACTION_DEFAULT;
+       req.default_rule = 1;
+
+       return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry)
+{
+       struct npc_install_flow_req req = { 0 };
+       struct npc_install_flow_rsp rsp = { 0 };
+       struct rvu_pfvf *pfvf;
+       u8 lbkid;
+
+       pfvf = rvu_get_pfvf(rvu, pcifunc);
+       /* If the pcifunc is not initialized then nothing to do.
+        * This same function will be called again via rvu_switch_update_rules
+        * after pcifunc is initialized.
+        */
+       if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
+               return 0;
+
+       lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
+       ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
+       eth_broadcast_addr((u8 *)&req.mask.dmac);
+       req.hdr.pcifunc = 0; /* AF is requester */
+       req.vf = pcifunc;
+       req.entry = entry;
+       req.features = BIT_ULL(NPC_DMAC);
+       req.intf = pfvf->nix_tx_intf;
+       req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
+       req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
+       req.set_cntr = 1;
+
+       return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_switch_install_rules(struct rvu *rvu)
+{
+       struct rvu_switch *rswitch = &rvu->rswitch;
+       u16 start = rswitch->start_entry;
+       struct rvu_hwinfo *hw = rvu->hw;
+       u16 pcifunc, entry = 0;
+       int pf, vf, numvfs;
+       int err;
+
+       for (pf = 1; pf < hw->total_pfs; pf++) {
+               if (!is_pf_cgxmapped(rvu, pf))
+                       continue;
+
+               pcifunc = pf << 10;
+               /* rvu_get_nix_blkaddr sets up the corresponding NIX block
+                * address and NIX RX and TX interfaces for a pcifunc.
+                * Generally it is called during attach call of a pcifunc but it
+                * is called here since we are pre-installing rules before
+                * nixlfs are attached
+                */
+               rvu_get_nix_blkaddr(rvu, pcifunc);
+
+               /* MCAM RX rule for a PF/VF already exists as default unicast
+                * rules installed by AF. Hence change the channel in those
+                * rules to ignore channel so that packets with the required
+                * DMAC received from LBK(by other PF/VFs in system) or from
+                * external world (from wire) are accepted.
+                */
+               err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+               if (err) {
+                       dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n",
+                               pf, err);
+                       return err;
+               }
+
+               err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry);
+               if (err) {
+                       dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n",
+                               pf, err);
+                       return err;
+               }
+
+               rswitch->entry2pcifunc[entry++] = pcifunc;
+
+               rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+               for (vf = 0; vf < numvfs; vf++) {
+                       pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+                       rvu_get_nix_blkaddr(rvu, pcifunc);
+
+                       err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+                       if (err) {
+                               dev_err(rvu->dev,
+                                       "RX rule for PF%dVF%d failed(%d)\n",
+                                       pf, vf, err);
+                               return err;
+                       }
+
+                       err = rvu_switch_install_tx_rule(rvu, pcifunc,
+                                                        start + entry);
+                       if (err) {
+                               dev_err(rvu->dev,
+                                       "TX rule for PF%dVF%d failed(%d)\n",
+                                       pf, vf, err);
+                               return err;
+                       }
+
+                       rswitch->entry2pcifunc[entry++] = pcifunc;
+               }
+       }
+
+       return 0;
+}
+
+void rvu_switch_enable(struct rvu *rvu)
+{
+       struct npc_mcam_alloc_entry_req alloc_req = { 0 };
+       struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 };
+       struct npc_delete_flow_req uninstall_req = { 0 };
+       struct npc_mcam_free_entry_req free_req = { 0 };
+       struct rvu_switch *rswitch = &rvu->rswitch;
+       struct msg_rsp rsp;
+       int ret;
+
+       alloc_req.contig = true;
+       alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
+       ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
+                                                   &alloc_rsp);
+       if (ret) {
+               dev_err(rvu->dev,
+                       "Unable to allocate MCAM entries\n");
+               goto exit;
+       }
+
+       if (alloc_rsp.count != alloc_req.count) {
+               dev_err(rvu->dev,
+                       "Unable to allocate %d MCAM entries, got %d\n",
+                       alloc_req.count, alloc_rsp.count);
+               goto free_entries;
+       }
+
+       rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16),
+                                        GFP_KERNEL);
+       if (!rswitch->entry2pcifunc)
+               goto free_entries;
+
+       rswitch->used_entries = alloc_rsp.count;
+       rswitch->start_entry = alloc_rsp.entry;
+
+       ret = rvu_switch_install_rules(rvu);
+       if (ret)
+               goto uninstall_rules;
+
+       return;
+
+uninstall_rules:
+       uninstall_req.start = rswitch->start_entry;
+       uninstall_req.end =  rswitch->start_entry + rswitch->used_entries - 1;
+       rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+       kfree(rswitch->entry2pcifunc);
+free_entries:
+       free_req.all = 1;
+       rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+exit:
+       return;
+}
+
+void rvu_switch_disable(struct rvu *rvu)
+{
+       struct npc_delete_flow_req uninstall_req = { 0 };
+       struct npc_mcam_free_entry_req free_req = { 0 };
+       struct rvu_switch *rswitch = &rvu->rswitch;
+       struct rvu_hwinfo *hw = rvu->hw;
+       int pf, vf, numvfs;
+       struct msg_rsp rsp;
+       u16 pcifunc;
+       int err;
+
+       if (!rswitch->used_entries)
+               return;
+
+       for (pf = 1; pf < hw->total_pfs; pf++) {
+               if (!is_pf_cgxmapped(rvu, pf))
+                       continue;
+
+               pcifunc = pf << 10;
+               err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+               if (err)
+                       dev_err(rvu->dev,
+                               "Reverting RX rule for PF%d failed(%d)\n",
+                               pf, err);
+
+               rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+               for (vf = 0; vf < numvfs; vf++) {
+                       pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+                       err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
+                       if (err)
+                               dev_err(rvu->dev,
+                                       "Reverting RX rule for PF%dVF%d failed(%d)\n",
+                                       pf, vf, err);
+               }
+       }
+
+       uninstall_req.start = rswitch->start_entry;
+       uninstall_req.end =  rswitch->start_entry + rswitch->used_entries - 1;
+       free_req.all = 1;
+       rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+       rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
+       rswitch->used_entries = 0;
+       kfree(rswitch->entry2pcifunc);
+}
+
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc)
+{
+       struct rvu_switch *rswitch = &rvu->rswitch;
+       u32 max = rswitch->used_entries;
+       u16 entry;
+
+       if (!rswitch->used_entries)
+               return;
+
+       for (entry = 0; entry < max; entry++) {
+               if (rswitch->entry2pcifunc[entry] == pcifunc)
+                       break;
+       }
+
+       if (entry >= max)
+               return;
+
+       rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry);
+       rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
+}
index 457c947..3254b02 100644 (file)
@@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o
 obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o
 
 rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
-                    otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o
+               otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o
 rvu_nicvf-y := otx2_vf.o
 
 ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
index 1b08896..184de94 100644 (file)
@@ -22,69 +22,52 @@ static struct dev_hw_ops cn10k_hw_ops = {
        .refill_pool_ptrs = cn10k_refill_pool_ptrs,
 };
 
-int cn10k_pf_lmtst_init(struct otx2_nic *pf)
+int cn10k_lmtst_init(struct otx2_nic *pfvf)
 {
-       int size, num_lines;
-       u64 base;
 
-       if (!test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
-               pf->hw_ops = &otx2_hw_ops;
+       struct lmtst_tbl_setup_req *req;
+       int qcount, err;
+
+       if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+               pfvf->hw_ops = &otx2_hw_ops;
                return 0;
        }
 
-       pf->hw_ops = &cn10k_hw_ops;
-       base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
-                      (MBOX_SIZE * (pf->total_vfs + 1));
-
-       size = pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM) -
-              (MBOX_SIZE * (pf->total_vfs + 1));
-
-       pf->hw.lmt_base = ioremap(base, size);
+       pfvf->hw_ops = &cn10k_hw_ops;
+       qcount = pfvf->hw.max_queues;
+       /* LMTST lines allocation
+        * qcount = num_online_cpus();
+        * NPA = TX + RX + XDP.
+        * NIX = TX * 32 (For Burst SQE flush).
+        */
+       pfvf->tot_lmt_lines = (qcount * 3) + (qcount * 32);
+       pfvf->npa_lmt_lines = qcount * 3;
+       pfvf->nix_lmt_size =  LMT_BURST_SIZE * LMT_LINE_SIZE;
 
-       if (!pf->hw.lmt_base) {
-               dev_err(pf->dev, "Unable to map PF LMTST region\n");
+       mutex_lock(&pfvf->mbox.lock);
+       req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox);
+       if (!req) {
+               mutex_unlock(&pfvf->mbox.lock);
                return -ENOMEM;
        }
 
-       /* FIXME: Get the num of LMTST lines from LMT table */
-       pf->tot_lmt_lines = size / LMT_LINE_SIZE;
-       num_lines = (pf->tot_lmt_lines - NIX_LMTID_BASE) /
-                           pf->hw.tx_queues;
-       /* Number of LMT lines per SQ queues */
-       pf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
-
-       pf->nix_lmt_size = pf->nix_lmt_lines * LMT_LINE_SIZE;
-       return 0;
-}
+       req->use_local_lmt_region = true;
 
-int cn10k_vf_lmtst_init(struct otx2_nic *vf)
-{
-       int size, num_lines;
-
-       if (!test_bit(CN10K_LMTST, &vf->hw.cap_flag)) {
-               vf->hw_ops = &otx2_hw_ops;
-               return 0;
+       err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines,
+                        LMT_LINE_SIZE);
+       if (err) {
+               mutex_unlock(&pfvf->mbox.lock);
+               return err;
        }
+       pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base;
+       req->lmt_iova = (u64)pfvf->dync_lmt->iova;
 
-       vf->hw_ops = &cn10k_hw_ops;
-       size = pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM);
-       vf->hw.lmt_base = ioremap_wc(pci_resource_start(vf->pdev,
-                                                       PCI_MBOX_BAR_NUM),
-                                    size);
-       if (!vf->hw.lmt_base) {
-               dev_err(vf->dev, "Unable to map VF LMTST region\n");
-               return -ENOMEM;
-       }
+       err = otx2_sync_mbox_msg(&pfvf->mbox);
+       mutex_unlock(&pfvf->mbox.lock);
 
-       vf->tot_lmt_lines = size / LMT_LINE_SIZE;
-       /* LMTST lines per SQ */
-       num_lines = (vf->tot_lmt_lines - NIX_LMTID_BASE) /
-                           vf->hw.tx_queues;
-       vf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
-       vf->nix_lmt_size = vf->nix_lmt_lines * LMT_LINE_SIZE;
        return 0;
 }
-EXPORT_SYMBOL(cn10k_vf_lmtst_init);
+EXPORT_SYMBOL(cn10k_lmtst_init);
 
 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
 {
@@ -93,9 +76,11 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
        struct otx2_snd_queue *sq;
 
        sq = &pfvf->qset.sq[qidx];
-       sq->lmt_addr = (__force u64 *)((u64)pfvf->hw.nix_lmt_base +
+       sq->lmt_addr = (u64 *)((u64)pfvf->hw.nix_lmt_base +
                               (qidx * pfvf->nix_lmt_size));
 
+       sq->lmt_id = pfvf->npa_lmt_lines + (qidx * LMT_BURST_SIZE);
+
        /* Get memory to put this msg */
        aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
        if (!aq)
@@ -158,15 +143,13 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
 
 void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
 {
-       struct otx2_nic *pfvf = dev;
-       int lmt_id = NIX_LMTID_BASE + (qidx * pfvf->nix_lmt_lines);
        u64 val = 0, tar_addr = 0;
 
        /* FIXME: val[0:10] LMT_ID.
         * [12:15] no of LMTST - 1 in the burst.
         * [19:63] data size of each LMTST in the burst except first.
         */
-       val = (lmt_id & 0x7FF);
+       val = (sq->lmt_id & 0x7FF);
        /* Target address for LMTST flush tells HW how many 128bit
         * words are present.
         * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
index 71292a4..1a1ae33 100644 (file)
@@ -12,8 +12,7 @@
 void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
 void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
-int cn10k_pf_lmtst_init(struct otx2_nic *pf);
-int cn10k_vf_lmtst_init(struct otx2_nic *vf);
+int cn10k_lmtst_init(struct otx2_nic *pfvf);
 int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
 int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
 int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf);
index cf7875d..70fcc1f 100644 (file)
@@ -210,6 +210,9 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
                /* update dmac field in vlan offload rule */
                if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
                        otx2_install_rxvlan_offload_flow(pfvf);
+               /* update dmac address in ntuple and DMAC filter list */
+               if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+                       otx2_dmacflt_update_pfmac_flow(pfvf);
        } else {
                return -EPERM;
        }
@@ -921,12 +924,14 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
                aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
                aq->cq.drop_ena = 1;
 
-               /* Enable receive CQ backpressure */
-               aq->cq.bp_ena = 1;
-               aq->cq.bpid = pfvf->bpid[0];
+               if (!is_otx2_lbkvf(pfvf->pdev)) {
+                       /* Enable receive CQ backpressure */
+                       aq->cq.bp_ena = 1;
+                       aq->cq.bpid = pfvf->bpid[0];
 
-               /* Set backpressure level is same as cq pass level */
-               aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+                       /* Set backpressure level is same as cq pass level */
+                       aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+               }
        }
 
        /* Fill AQ info */
@@ -1183,7 +1188,7 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
        aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
 
        /* Enable backpressure for RQ aura */
-       if (aura_id < pfvf->hw.rqpool_cnt) {
+       if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
                aq->aura.bp_ena = 0;
                aq->aura.nix0_bpid = pfvf->bpid[0];
                /* Set backpressure level for RQ's Aura */
index 234b330..8fd58cd 100644 (file)
@@ -218,8 +218,8 @@ struct otx2_hw {
        unsigned long           cap_flag;
 
 #define LMT_LINE_SIZE          128
-#define NIX_LMTID_BASE         72 /* RX + TX + XDP */
-       void __iomem            *lmt_base;
+#define LMT_BURST_SIZE         32 /* 32 LMTST lines for burst SQE flush */
+       u64                     *lmt_base;
        u64                     *npa_lmt_base;
        u64                     *nix_lmt_base;
 };
@@ -288,6 +288,9 @@ struct otx2_flow_config {
        u16                     tc_flower_offset;
        u16                     ntuple_max_flows;
        u16                     tc_max_flows;
+       u8                      dmacflt_max_flows;
+       u8                      *bmap_to_dmacindex;
+       unsigned long           dmacflt_bmap;
        struct list_head        flow_list;
 };
 
@@ -329,6 +332,7 @@ struct otx2_nic {
 #define OTX2_FLAG_TC_FLOWER_SUPPORT            BIT_ULL(11)
 #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED   BIT_ULL(12)
 #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED  BIT_ULL(13)
+#define OTX2_FLAG_DMACFLTR_SUPPORT             BIT_ULL(14)
        u64                     flags;
 
        struct otx2_qset        qset;
@@ -363,8 +367,9 @@ struct otx2_nic {
        /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
        int                     nix_blkaddr;
        /* LMTST Lines info */
+       struct qmem             *dync_lmt;
        u16                     tot_lmt_lines;
-       u16                     nix_lmt_lines;
+       u16                     npa_lmt_lines;
        u32                     nix_lmt_size;
 
        struct otx2_ptp         *ptp;
@@ -833,4 +838,11 @@ int otx2_init_tc(struct otx2_nic *nic);
 void otx2_shutdown_tc(struct otx2_nic *nic);
 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
                  void *type_data);
+/* CGX/RPM DMAC filters support */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
 #endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
new file mode 100644 (file)
index 0000000..383a6b5
--- /dev/null
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "otx2_common.h"
+
+static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
+                              u8 *dmac_index)
+{
+       struct cgx_mac_addr_add_req *req;
+       struct cgx_mac_addr_add_rsp *rsp;
+       int err;
+
+       mutex_lock(&pf->mbox.lock);
+
+       req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox);
+       if (!req) {
+               mutex_unlock(&pf->mbox.lock);
+               return -ENOMEM;
+       }
+
+       ether_addr_copy(req->mac_addr, mac);
+       err = otx2_sync_mbox_msg(&pf->mbox);
+
+       if (!err) {
+               rsp = (struct cgx_mac_addr_add_rsp *)
+                        otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+               *dmac_index = rsp->index;
+       }
+
+       mutex_unlock(&pf->mbox.lock);
+       return err;
+}
+
+static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf)
+{
+       struct cgx_mac_addr_set_or_get *req;
+       int err;
+
+       mutex_lock(&pf->mbox.lock);
+
+       req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox);
+       if (!req) {
+               mutex_unlock(&pf->mbox.lock);
+               return -ENOMEM;
+       }
+
+       ether_addr_copy(req->mac_addr, pf->netdev->dev_addr);
+       err = otx2_sync_mbox_msg(&pf->mbox);
+
+       mutex_unlock(&pf->mbox.lock);
+       return err;
+}
+
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos)
+{
+       u8 *dmacindex;
+
+       /* Store dmacindex returned by CGX/RPM driver which will
+        * be used for macaddr update/remove
+        */
+       dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+       if (ether_addr_equal(mac, pf->netdev->dev_addr))
+               return otx2_dmacflt_add_pfmac(pf);
+       else
+               return otx2_dmacflt_do_add(pf, mac, dmacindex);
+}
+
+static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
+                                 u8 dmac_index)
+{
+       struct cgx_mac_addr_del_req *req;
+       int err;
+
+       mutex_lock(&pfvf->mbox.lock);
+       req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox);
+       if (!req) {
+               mutex_unlock(&pfvf->mbox.lock);
+               return -ENOMEM;
+       }
+
+       req->index = dmac_index;
+
+       err = otx2_sync_mbox_msg(&pfvf->mbox);
+       mutex_unlock(&pfvf->mbox.lock);
+
+       return err;
+}
+
+static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
+{
+       struct msg_req *req;
+       int err;
+
+       mutex_lock(&pf->mbox.lock);
+       req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox);
+       if (!req) {
+               mutex_unlock(&pf->mbox.lock);
+               return -ENOMEM;
+       }
+
+       err = otx2_sync_mbox_msg(&pf->mbox);
+
+       mutex_unlock(&pf->mbox.lock);
+       return err;
+}
+
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac,
+                       u8 bit_pos)
+{
+       u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+       if (ether_addr_equal(mac, pf->netdev->dev_addr))
+               return otx2_dmacflt_remove_pfmac(pf);
+       else
+               return otx2_dmacflt_do_remove(pf, mac, dmacindex);
+}
+
+/* CGX/RPM blocks support max unicast entries of 32.
+ * on typical configuration MAC block associated
+ * with 4 lmacs, each lmac will have 8 dmac entries
+ */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf)
+{
+       struct cgx_max_dmac_entries_get_rsp *rsp;
+       struct msg_req *msg;
+       int err;
+
+       mutex_lock(&pf->mbox.lock);
+       msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox);
+
+       if (!msg) {
+               mutex_unlock(&pf->mbox.lock);
+               return -ENOMEM;
+       }
+
+       err = otx2_sync_mbox_msg(&pf->mbox);
+       if (err)
+               goto out;
+
+       rsp = (struct cgx_max_dmac_entries_get_rsp *)
+                    otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr);
+       pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters;
+
+out:
+       mutex_unlock(&pf->mbox.lock);
+       return err;
+}
+
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos)
+{
+       struct cgx_mac_addr_update_req *req;
+       int rc;
+
+       mutex_lock(&pf->mbox.lock);
+
+       req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox);
+
+       if (!req) {
+               mutex_unlock(&pf->mbox.lock);
+               return -ENOMEM;
+       }
+
+       ether_addr_copy(req->mac_addr, mac);
+       req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+       rc = otx2_sync_mbox_msg(&pf->mbox);
+
+       mutex_unlock(&pf->mbox.lock);
+       return rc;
+}
index 8df748e..b906a0e 100644 (file)
@@ -298,15 +298,14 @@ static int otx2_set_channels(struct net_device *dev,
        err = otx2_set_real_num_queues(dev, channel->tx_count,
                                       channel->rx_count);
        if (err)
-               goto fail;
+               return err;
 
        pfvf->hw.rx_queues = channel->rx_count;
        pfvf->hw.tx_queues = channel->tx_count;
        pfvf->qset.cq_cnt = pfvf->hw.tx_queues +  pfvf->hw.rx_queues;
 
-fail:
        if (if_up)
-               dev->netdev_ops->ndo_open(dev);
+               err = dev->netdev_ops->ndo_open(dev);
 
        netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
                    pfvf->hw.tx_queues, pfvf->hw.rx_queues);
@@ -410,7 +409,7 @@ static int otx2_set_ringparam(struct net_device *netdev,
        qs->rqe_cnt = rx_count;
 
        if (if_up)
-               netdev->netdev_ops->ndo_open(netdev);
+               return netdev->netdev_ops->ndo_open(netdev);
 
        return 0;
 }
index 8c97106..4d9de52 100644 (file)
@@ -18,6 +18,12 @@ struct otx2_flow {
        bool is_vf;
        u8 rss_ctx_id;
        int vf;
+       bool dmac_filter;
+};
+
+enum dmac_req {
+       DMAC_ADDR_UPDATE,
+       DMAC_ADDR_DEL
 };
 
 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
@@ -219,6 +225,22 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
        if (!pf->mac_table)
                return -ENOMEM;
 
+       otx2_dmacflt_get_max_cnt(pf);
+
+       /* DMAC filters are not allocated */
+       if (!pf->flow_cfg->dmacflt_max_flows)
+               return 0;
+
+       pf->flow_cfg->bmap_to_dmacindex =
+                       devm_kzalloc(pf->dev, sizeof(u8) *
+                                    pf->flow_cfg->dmacflt_max_flows,
+                                    GFP_KERNEL);
+
+       if (!pf->flow_cfg->bmap_to_dmacindex)
+               return -ENOMEM;
+
+       pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
+
        return 0;
 }
 
@@ -280,6 +302,12 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
 {
        struct otx2_nic *pf = netdev_priv(netdev);
 
+       if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+                         pf->flow_cfg->dmacflt_max_flows))
+               netdev_warn(netdev,
+                           "Add %pM to CGX/RPM DMAC filters list as well\n",
+                           mac);
+
        return otx2_do_add_macfilter(pf, mac);
 }
 
@@ -351,12 +379,22 @@ static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
        list_add(&flow->list, head);
 }
 
+static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
+{
+       if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows ||
+           bitmap_weight(&flow_cfg->dmacflt_bmap,
+                         flow_cfg->dmacflt_max_flows))
+               return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows;
+       else
+               return flow_cfg->ntuple_max_flows;
+}
+
 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
                  u32 location)
 {
        struct otx2_flow *iter;
 
-       if (location >= pfvf->flow_cfg->ntuple_max_flows)
+       if (location >= otx2_get_maxflows(pfvf->flow_cfg))
                return -EINVAL;
 
        list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
@@ -378,7 +416,7 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
        int idx = 0;
        int err = 0;
 
-       nfc->data = pfvf->flow_cfg->ntuple_max_flows;
+       nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
        while ((!err || err == -ENOENT) && idx < rule_cnt) {
                err = otx2_get_flow(pfvf, nfc, location);
                if (!err)
@@ -760,6 +798,32 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
        return 0;
 }
 
+static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
+                                       struct ethtool_rx_flow_spec *fsp)
+{
+       struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
+       struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
+       u64 ring_cookie = fsp->ring_cookie;
+       u32 flow_type;
+
+       if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
+               return false;
+
+       flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+
+       /* CGX/RPM block dmac filtering configured for white listing
+        * check for action other than DROP
+        */
+       if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
+           !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
+               if (is_zero_ether_addr(eth_mask->h_dest) &&
+                   is_valid_ether_addr(eth_hdr->h_dest))
+                       return true;
+       }
+
+       return false;
+}
+
 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
 {
        u64 ring_cookie = flow->flow_spec.ring_cookie;
@@ -818,14 +882,46 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
        return err;
 }
 
+static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
+                                   struct otx2_flow *flow)
+{
+       struct otx2_flow *pf_mac;
+       struct ethhdr *eth_hdr;
+
+       pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
+       if (!pf_mac)
+               return -ENOMEM;
+
+       pf_mac->entry = 0;
+       pf_mac->dmac_filter = true;
+       pf_mac->location = pfvf->flow_cfg->ntuple_max_flows;
+       memcpy(&pf_mac->flow_spec, &flow->flow_spec,
+              sizeof(struct ethtool_rx_flow_spec));
+       pf_mac->flow_spec.location = pf_mac->location;
+
+       /* Copy PF mac address */
+       eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
+       ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
+
+       /* Install DMAC filter with PF mac address */
+       otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
+
+       otx2_add_flow_to_list(pfvf, pf_mac);
+       pfvf->flow_cfg->nr_flows++;
+       set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+
+       return 0;
+}
+
 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
 {
        struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
        struct ethtool_rx_flow_spec *fsp = &nfc->fs;
        struct otx2_flow *flow;
+       struct ethhdr *eth_hdr;
        bool new = false;
+       int err = 0;
        u32 ring;
-       int err;
 
        ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
        if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
@@ -834,16 +930,15 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
        if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
                return -EINVAL;
 
-       if (fsp->location >= flow_cfg->ntuple_max_flows)
+       if (fsp->location >= otx2_get_maxflows(flow_cfg))
                return -EINVAL;
 
        flow = otx2_find_flow(pfvf, fsp->location);
        if (!flow) {
-               flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+               flow = kzalloc(sizeof(*flow), GFP_KERNEL);
                if (!flow)
                        return -ENOMEM;
                flow->location = fsp->location;
-               flow->entry = flow_cfg->flow_ent[flow->location];
                new = true;
        }
        /* struct copy */
@@ -852,7 +947,54 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
        if (fsp->flow_type & FLOW_RSS)
                flow->rss_ctx_id = nfc->rss_context;
 
-       err = otx2_add_flow_msg(pfvf, flow);
+       if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
+               eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+               /* Sync dmac filter table with updated fields */
+               if (flow->dmac_filter)
+                       return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
+                                                  flow->entry);
+
+               if (bitmap_full(&flow_cfg->dmacflt_bmap,
+                               flow_cfg->dmacflt_max_flows)) {
+                       netdev_warn(pfvf->netdev,
+                                   "Can't insert the rule %d as max allowed dmac filters are %d\n",
+                                   flow->location +
+                                   flow_cfg->dmacflt_max_flows,
+                                   flow_cfg->dmacflt_max_flows);
+                       err = -EINVAL;
+                       if (new)
+                               kfree(flow);
+                       return err;
+               }
+
+               /* Install PF mac address to DMAC filter list */
+               if (!test_bit(0, &flow_cfg->dmacflt_bmap))
+                       otx2_add_flow_with_pfmac(pfvf, flow);
+
+               flow->dmac_filter = true;
+               flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
+                                                 flow_cfg->dmacflt_max_flows);
+               fsp->location = flow_cfg->ntuple_max_flows + flow->entry;
+               flow->flow_spec.location = fsp->location;
+               flow->location = fsp->location;
+
+               set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+               otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
+
+       } else {
+               if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) {
+                       netdev_warn(pfvf->netdev,
+                                   "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
+                                   flow->location,
+                                   flow_cfg->ntuple_max_flows - 1);
+                       err = -EINVAL;
+               } else {
+                       flow->entry = flow_cfg->flow_ent[flow->location];
+                       err = otx2_add_flow_msg(pfvf, flow);
+               }
+       }
+
        if (err) {
                if (new)
                        kfree(flow);
@@ -890,20 +1032,70 @@ static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
        return err;
 }
 
+static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
+{
+       struct otx2_flow *iter;
+       struct ethhdr *eth_hdr;
+       bool found = false;
+
+       list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+               if (iter->dmac_filter && iter->entry == 0) {
+                       eth_hdr = &iter->flow_spec.h_u.ether_spec;
+                       if (req == DMAC_ADDR_DEL) {
+                               otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+                                                   0);
+                               clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+                               found = true;
+                       } else {
+                               ether_addr_copy(eth_hdr->h_dest,
+                                               pfvf->netdev->dev_addr);
+                               otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
+                       }
+                       break;
+               }
+       }
+
+       if (found) {
+               list_del(&iter->list);
+               kfree(iter);
+               pfvf->flow_cfg->nr_flows--;
+       }
+}
+
 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
 {
        struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
        struct otx2_flow *flow;
        int err;
 
-       if (location >= flow_cfg->ntuple_max_flows)
+       if (location >= otx2_get_maxflows(flow_cfg))
                return -EINVAL;
 
        flow = otx2_find_flow(pfvf, location);
        if (!flow)
                return -ENOENT;
 
-       err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+       if (flow->dmac_filter) {
+               struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+               /* user not allowed to remove dmac filter with interface mac */
+               if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
+                       return -EPERM;
+
+               err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+                                         flow->entry);
+               clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+               /* If all dmac filters are removed delete macfilter with
+                * interface mac address and configure CGX/RPM block in
+                * promiscuous mode
+                */
+               if (bitmap_weight(&flow_cfg->dmacflt_bmap,
+                                 flow_cfg->dmacflt_max_flows) == 1)
+                       otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
+       } else {
+               err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+       }
+
        if (err)
                return err;
 
@@ -1100,3 +1292,22 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
        mutex_unlock(&pf->mbox.lock);
        return rsp_hdr->rc;
 }
+
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
+{
+       struct otx2_flow *iter;
+       struct ethhdr *eth_hdr;
+
+       list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
+               if (iter->dmac_filter) {
+                       eth_hdr = &iter->flow_spec.h_u.ether_spec;
+                       otx2_dmacflt_add(pf, eth_hdr->h_dest,
+                                        iter->entry);
+               }
+       }
+}
+
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
+{
+       otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
+}
index 59912f7..2c24944 100644 (file)
@@ -1110,6 +1110,11 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
        struct msg_req *msg;
        int err;
 
+       if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+                                   pf->flow_cfg->dmacflt_max_flows))
+               netdev_warn(pf->netdev,
+                           "CGX/RPM internal loopback might not work as DMAC filters are active\n");
+
        mutex_lock(&pf->mbox.lock);
        if (enable)
                msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
@@ -1533,10 +1538,10 @@ int otx2_open(struct net_device *netdev)
 
        if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
                /* Reserve LMT lines for NPA AURA batch free */
-               pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base;
+               pf->hw.npa_lmt_base = pf->hw.lmt_base;
                /* Reserve LMT lines for NIX TX */
-               pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base +
-                                     (NIX_LMTID_BASE * LMT_LINE_SIZE));
+               pf->hw.nix_lmt_base = (u64 *)((u64)pf->hw.npa_lmt_base +
+                                     (pf->npa_lmt_lines * LMT_LINE_SIZE));
        }
 
        err = otx2_init_hw_resources(pf);
@@ -1644,6 +1649,10 @@ int otx2_open(struct net_device *netdev)
        /* Restore pause frame settings */
        otx2_config_pause_frm(pf);
 
+       /* Install DMAC Filters */
+       if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+               otx2_dmacflt_reinstall_flows(pf);
+
        err = otx2_rxtx_enable(pf, true);
        if (err)
                goto err_tx_stop_queues;
@@ -1653,6 +1662,7 @@ int otx2_open(struct net_device *netdev)
 err_tx_stop_queues:
        netif_tx_stop_all_queues(netdev);
        netif_carrier_off(netdev);
+       pf->flags |= OTX2_FLAG_INTF_DOWN;
 err_free_cints:
        otx2_free_cints(pf, qidx);
        vec = pci_irq_vector(pf->pdev,
@@ -1680,6 +1690,10 @@ int otx2_stop(struct net_device *netdev)
        struct otx2_rss_info *rss;
        int qidx, vec, wrk;
 
+       /* If the DOWN flag is set resources are already freed */
+       if (pf->flags & OTX2_FLAG_INTF_DOWN)
+               return 0;
+
        netif_carrier_off(netdev);
        netif_tx_stop_all_queues(netdev);
 
@@ -2526,7 +2540,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (err)
                goto err_detach_rsrc;
 
-       err = cn10k_pf_lmtst_init(pf);
+       err = cn10k_lmtst_init(pf);
        if (err)
                goto err_detach_rsrc;
 
@@ -2630,8 +2644,8 @@ err_del_mcam_entries:
 err_ptp_destroy:
        otx2_ptp_destroy(pf);
 err_detach_rsrc:
-       if (hw->lmt_base)
-               iounmap(hw->lmt_base);
+       if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+               qmem_free(pf->dev, pf->dync_lmt);
        otx2_detach_resources(&pf->mbox);
 err_disable_mbox_intr:
        otx2_disable_mbox_intr(pf);
@@ -2772,9 +2786,8 @@ static void otx2_remove(struct pci_dev *pdev)
        otx2_mcam_flow_del(pf);
        otx2_shutdown_tc(pf);
        otx2_detach_resources(&pf->mbox);
-       if (pf->hw.lmt_base)
-               iounmap(pf->hw.lmt_base);
-
+       if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+               qmem_free(pf->dev, pf->dync_lmt);
        otx2_disable_mbox_intr(pf);
        otx2_pfaf_mbox_destroy(pf);
        pci_free_irq_vectors(pf->pdev);
index 905fc02..972b202 100644 (file)
@@ -288,7 +288,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
        struct otx2_nic *priv;
        u32 burst, mark = 0;
        u8 nr_police = 0;
-       bool pps;
+       bool pps = false;
        u64 rate;
        int i;
 
index 52486c1..2f144e2 100644 (file)
@@ -83,6 +83,7 @@ struct otx2_snd_queue {
        u16                     num_sqbs;
        u16                     sqe_thresh;
        u8                      sqe_per_sqb;
+       u32                     lmt_id;
        u64                      io_addr;
        u64                     *aura_fc_addr;
        u64                     *lmt_addr;
index 13a908f..a8bee5a 100644 (file)
@@ -609,7 +609,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (err)
                goto err_detach_rsrc;
 
-       err = cn10k_vf_lmtst_init(vf);
+       err = cn10k_lmtst_init(vf);
        if (err)
                goto err_detach_rsrc;
 
@@ -667,8 +667,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 err_unreg_netdev:
        unregister_netdev(netdev);
 err_detach_rsrc:
-       if (hw->lmt_base)
-               iounmap(hw->lmt_base);
+       if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+               qmem_free(vf->dev, vf->dync_lmt);
        otx2_detach_resources(&vf->mbox);
 err_disable_mbox_intr:
        otx2vf_disable_mbox_intr(vf);
@@ -700,10 +700,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
                destroy_workqueue(vf->otx2_wq);
        otx2vf_disable_mbox_intr(vf);
        otx2_detach_resources(&vf->mbox);
-
-       if (vf->hw.lmt_base)
-               iounmap(vf->hw.lmt_base);
-
+       if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+               qmem_free(vf->dev, vf->dync_lmt);
        otx2vf_vfaf_mbox_destroy(vf);
        pci_free_irq_vectors(vf->pdev);
        pci_set_drvdata(pdev, NULL);
index 00c8465..28ac469 100644 (file)
@@ -3535,6 +3535,7 @@ slave_start:
 
                if (!SRIOV_VALID_STATE(dev->flags)) {
                        mlx4_err(dev, "Invalid SRIOV state\n");
+                       err = -EINVAL;
                        goto err_close;
                }
        }
index ceebfc2..def2156 100644 (file)
@@ -500,10 +500,7 @@ static int next_phys_dev(struct device *dev, const void *data)
        return 1;
 }
 
-/* This function is called with two flows:
- * 1. During initialization of mlx5_core_dev and we don't need to lock it.
- * 2. During LAG configure stage and caller holds &mlx5_intf_mutex.
- */
+/* Must be called with intf_mutex held */
 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
 {
        struct auxiliary_device *adev;
index 150c8e8..2cbf18c 100644 (file)
@@ -471,6 +471,15 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
        param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
 }
 
+static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+{
+       bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
+               MLX5_CAP_GEN(mdev, relaxed_ordering_write);
+
+       return ro && params->lro_en ?
+               MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
+}
+
 int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
                         struct mlx5e_params *params,
                         struct mlx5e_xsk_param *xsk,
@@ -508,7 +517,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
        }
 
        MLX5_SET(wq, wq, wq_type,          params->rq_wq_type);
-       MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+       MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params));
        MLX5_SET(wq, wq, log_wq_stride,
                 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
        MLX5_SET(wq, wq, pd,               mdev->mlx5e_res.hw_objs.pdn);
index 778e229..efef4ad 100644 (file)
@@ -482,8 +482,11 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
                params->log_sq_size = orig->log_sq_size;
                mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
        }
-       if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+       /* RQ */
+       if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+               params->vlan_strip_disable = orig->vlan_strip_disable;
                mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
+       }
 }
 
 static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
@@ -494,7 +497,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
        int err;
 
        rq->wq_type      = params->rq_wq_type;
-       rq->pdev         = mdev->device;
+       rq->pdev         = c->pdev;
        rq->netdev       = priv->netdev;
        rq->priv         = priv;
        rq->clock        = &mdev->clock;
index 86ab4e8..7f94508 100644 (file)
@@ -37,7 +37,7 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params
        struct mlx5e_priv *priv = t->priv;
 
        rq->wq_type      = params->rq_wq_type;
-       rq->pdev         = mdev->device;
+       rq->pdev         = t->pdev;
        rq->netdev       = priv->netdev;
        rq->priv         = priv;
        rq->clock        = &mdev->clock;
index d09e655..37c4408 100644 (file)
@@ -3384,7 +3384,7 @@ static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool en
 
 static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
 {
-       int err = 0;
+       int err;
        int i;
 
        for (i = 0; i < chs->num; i++) {
@@ -3392,6 +3392,8 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
                if (err)
                        return err;
        }
+       if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state))
+               return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd);
 
        return 0;
 }
@@ -3829,6 +3831,24 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
        return 0;
 }
 
+static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev,
+                                                      netdev_features_t features)
+{
+       features &= ~NETIF_F_HW_TLS_RX;
+       if (netdev->features & NETIF_F_HW_TLS_RX)
+               netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
+
+       features &= ~NETIF_F_HW_TLS_TX;
+       if (netdev->features & NETIF_F_HW_TLS_TX)
+               netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
+
+       features &= ~NETIF_F_NTUPLE;
+       if (netdev->features & NETIF_F_NTUPLE)
+               netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
+
+       return features;
+}
+
 static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
                                            netdev_features_t features)
 {
@@ -3860,15 +3880,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
                        netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
        }
 
-       if (mlx5e_is_uplink_rep(priv)) {
-               features &= ~NETIF_F_HW_TLS_RX;
-               if (netdev->features & NETIF_F_HW_TLS_RX)
-                       netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
-
-               features &= ~NETIF_F_HW_TLS_TX;
-               if (netdev->features & NETIF_F_HW_TLS_TX)
-                       netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
-       }
+       if (mlx5e_is_uplink_rep(priv))
+               features = mlx5e_fix_uplink_rep_features(netdev, features);
 
        mutex_unlock(&priv->state_lock);
 
@@ -4859,6 +4872,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
        if (MLX5_CAP_ETH(mdev, scatter_fcs))
                netdev->hw_features |= NETIF_F_RXFCS;
 
+       if (mlx5_qos_is_supported(mdev))
+               netdev->hw_features |= NETIF_F_HW_TC;
+
        netdev->features          = netdev->hw_features;
 
        /* Defaults */
@@ -4879,8 +4895,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
                netdev->hw_features      |= NETIF_F_NTUPLE;
 #endif
        }
-       if (mlx5_qos_is_supported(mdev))
-               netdev->features |= NETIF_F_HW_TC;
 
        netdev->features         |= NETIF_F_HIGHDMA;
        netdev->features         |= NETIF_F_HW_VLAN_STAG_FILTER;
index 629a61e..d273758 100644 (file)
@@ -452,12 +452,32 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
 static
 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
 {
+       struct mlx5_core_dev *mdev;
        struct net_device *netdev;
        struct mlx5e_priv *priv;
 
-       netdev = __dev_get_by_index(net, ifindex);
+       netdev = dev_get_by_index(net, ifindex);
+       if (!netdev)
+               return ERR_PTR(-ENODEV);
+
        priv = netdev_priv(netdev);
-       return priv->mdev;
+       mdev = priv->mdev;
+       dev_put(netdev);
+
+       /* Mirred tc action holds a refcount on the ifindex net_device (see
+        * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
+        * after dev_put(netdev), while we're in the context of adding a tc flow.
+        *
+        * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
+        * stored in a hairpin object, which exists until all flows, that refer to it, get
+        * removed.
+        *
+        * On the other hand, after a hairpin object has been created, the peer net_device may
+        * be removed/unbound while there are still some hairpin flows that are using it. This
+        * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
+        * NETDEV_UNREGISTER event of the peer net_device.
+        */
+       return mdev;
 }
 
 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
@@ -666,6 +686,10 @@ mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params
 
        func_mdev = priv->mdev;
        peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+       if (IS_ERR(peer_mdev)) {
+               err = PTR_ERR(peer_mdev);
+               goto create_pair_err;
+       }
 
        pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
        if (IS_ERR(pair)) {
@@ -804,6 +828,11 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
        int err;
 
        peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+       if (IS_ERR(peer_mdev)) {
+               NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
+               return PTR_ERR(peer_mdev);
+       }
+
        if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
                NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
                return -EOPNOTSUPP;
index 48cac5b..d562edf 100644 (file)
@@ -636,7 +636,7 @@ struct esw_vport_tbl_namespace {
 };
 
 struct mlx5_vport_tbl_attr {
-       u16 chain;
+       u32 chain;
        u16 prio;
        u16 vport;
        const struct esw_vport_tbl_namespace *vport_ns;
index 7579f34..011e766 100644 (file)
@@ -382,10 +382,11 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
 {
        dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
        dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
-       dest[dest_idx].vport.vhca_id =
-               MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
-       if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+       if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
+               dest[dest_idx].vport.vhca_id =
+                       MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
                dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+       }
        if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
                if (pkt_reformat) {
                        flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
@@ -2367,6 +2368,9 @@ static int mlx5_esw_offloads_devcom_event(int event,
 
        switch (event) {
        case ESW_OFFLOADS_DEVCOM_PAIR:
+               if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev)
+                       break;
+
                if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
                    mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
                        break;
index d7bf0a3..c0697e1 100644 (file)
@@ -1024,17 +1024,19 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
                              struct fs_prio *prio)
 {
-       struct mlx5_flow_table *next_ft;
+       struct mlx5_flow_table *next_ft, *first_ft;
        int err = 0;
 
        /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
 
-       if (list_empty(&prio->node.children)) {
+       first_ft = list_first_entry_or_null(&prio->node.children,
+                                           struct mlx5_flow_table, node.list);
+       if (!first_ft || first_ft->level > ft->level) {
                err = connect_prev_fts(dev, ft, prio);
                if (err)
                        return err;
 
-               next_ft = find_next_chained_ft(prio);
+               next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
                err = connect_fwd_rules(dev, ft, next_ft);
                if (err)
                        return err;
@@ -2120,7 +2122,7 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft)
                                node.list) == ft))
                return 0;
 
-       next_ft = find_next_chained_ft(prio);
+       next_ft = find_next_ft(ft);
        err = connect_fwd_rules(dev, next_ft, ft);
        if (err)
                return err;
index 9ff163c..9abeb80 100644 (file)
@@ -626,8 +626,16 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
        }
        fw_reporter_ctx.err_synd = health->synd;
        fw_reporter_ctx.miss_counter = health->miss_counter;
-       devlink_health_report(health->fw_fatal_reporter,
-                             "FW fatal error reported", &fw_reporter_ctx);
+       if (devlink_health_report(health->fw_fatal_reporter,
+                                 "FW fatal error reported", &fw_reporter_ctx) == -ECANCELED) {
+               /* If recovery wasn't performed, due to grace period,
+                * unload the driver. This ensures that the driver
+                * closes all its resources and it is not subjected to
+                * requests from the kernel.
+                */
+               mlx5_core_err(dev, "Driver is in error state. Unloading\n");
+               mlx5_unload_one(dev);
+       }
 }
 
 static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
index a80419d..7bdbb2d 100644 (file)
@@ -2,6 +2,8 @@ config SPARX5_SWITCH
        tristate "Sparx5 switch driver"
        depends on NET_SWITCHDEV
        depends on HAS_IOMEM
+       depends on OF
+       depends on ARCH_SPARX5 || COMPILE_TEST
        select PHYLINK
        select PHY_SPARX5_SERDES
        select RESET_CONTROLLER
index 5249b64..49def69 100644 (file)
@@ -540,10 +540,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
        SET_NETDEV_DEV(ndev, &pdev->dev);
 
        ret = register_netdev(ndev);
-       if (ret) {
-               free_netdev(ndev);
+       if (ret)
                goto init_fail;
-       }
 
        netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n",
                   __func__, ndev->irq, ndev->dev_addr);
index 3e89e34..e9d260d 100644 (file)
@@ -1298,6 +1298,7 @@ static int ocelot_netdevice_lag_leave(struct net_device *dev,
 }
 
 static int ocelot_netdevice_changeupper(struct net_device *dev,
+                                       struct net_device *brport_dev,
                                        struct netdev_notifier_changeupper_info *info)
 {
        struct netlink_ext_ack *extack;
@@ -1307,11 +1308,11 @@ static int ocelot_netdevice_changeupper(struct net_device *dev,
 
        if (netif_is_bridge_master(info->upper_dev)) {
                if (info->linking)
-                       err = ocelot_netdevice_bridge_join(dev, dev,
+                       err = ocelot_netdevice_bridge_join(dev, brport_dev,
                                                           info->upper_dev,
                                                           extack);
                else
-                       err = ocelot_netdevice_bridge_leave(dev, dev,
+                       err = ocelot_netdevice_bridge_leave(dev, brport_dev,
                                                            info->upper_dev);
        }
        if (netif_is_lag_master(info->upper_dev)) {
@@ -1346,7 +1347,7 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev,
                if (ocelot_port->bond != dev)
                        return NOTIFY_OK;
 
-               err = ocelot_netdevice_changeupper(lower, info);
+               err = ocelot_netdevice_changeupper(lower, dev, info);
                if (err)
                        return notifier_from_errno(err);
        }
@@ -1385,7 +1386,7 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
                struct netdev_notifier_changeupper_info *info = ptr;
 
                if (ocelot_netdevice_dev_check(dev))
-                       return ocelot_netdevice_changeupper(dev, info);
+                       return ocelot_netdevice_changeupper(dev, dev, info);
 
                if (netif_is_lag_master(dev))
                        return ocelot_netdevice_lag_changeupper(dev, info);
index 273d529..062bb2d 100644 (file)
@@ -1141,20 +1141,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
                nfp_fl_ct_clean_flow_entry(ct_entry);
                kfree(ct_map_ent);
 
-               /* If this is the last pre_ct_rule it means that it is
-                * very likely that the nft table will be cleaned up next,
-                * as this happens on the removal of the last act_ct flow.
-                * However we cannot deregister the callback on the removal
-                * of the last nft flow as this runs into a deadlock situation.
-                * So deregister the callback on removal of the last pre_ct flow
-                * and remove any remaining nft flow entries. We also cannot
-                * save this state and delete the callback later since the
-                * nft table would already have been freed at that time.
-                */
                if (!zt->pre_ct_count) {
-                       nf_flow_table_offload_del_cb(zt->nft,
-                                                    nfp_fl_ct_handle_nft_flow,
-                                                    zt);
                        zt->nft = NULL;
                        nfp_fl_ct_clean_nft_entries(zt);
                }
@@ -1172,6 +1159,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
                                       nfp_ct_map_params);
                nfp_fl_ct_clean_flow_entry(ct_map_ent->ct_entry);
                kfree(ct_map_ent);
+               break;
        default:
                break;
        }
index af3a536..e795fa6 100644 (file)
@@ -29,7 +29,7 @@ static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
                                      */
 };
 
-static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
+static void ionic_lif_rx_mode(struct ionic_lif *lif);
 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
 static void ionic_link_status_check(struct ionic_lif *lif);
@@ -53,7 +53,19 @@ static void ionic_dim_work(struct work_struct *work)
        cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
        qcq = container_of(dim, struct ionic_qcq, dim);
        new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
-       qcq->intr.dim_coal_hw = new_coal ? new_coal : 1;
+       new_coal = new_coal ? new_coal : 1;
+
+       if (qcq->intr.dim_coal_hw != new_coal) {
+               unsigned int qi = qcq->cq.bound_q->index;
+               struct ionic_lif *lif = qcq->q.lif;
+
+               qcq->intr.dim_coal_hw = new_coal;
+
+               ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+                                    lif->rxqcqs[qi]->intr.index,
+                                    qcq->intr.dim_coal_hw);
+       }
+
        dim->state = DIM_START_MEASURE;
 }
 
@@ -77,7 +89,7 @@ static void ionic_lif_deferred_work(struct work_struct *work)
 
                switch (w->type) {
                case IONIC_DW_TYPE_RX_MODE:
-                       ionic_lif_rx_mode(lif, w->rx_mode);
+                       ionic_lif_rx_mode(lif);
                        break;
                case IONIC_DW_TYPE_RX_ADDR_ADD:
                        ionic_lif_addr_add(lif, w->addr);
@@ -1301,10 +1313,8 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
        return 0;
 }
 
-static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
-                         bool can_sleep)
+static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
 {
-       struct ionic_deferred_work *work;
        unsigned int nmfilters;
        unsigned int nufilters;
 
@@ -1330,97 +1340,46 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
                        lif->nucast--;
        }
 
-       if (!can_sleep) {
-               work = kzalloc(sizeof(*work), GFP_ATOMIC);
-               if (!work)
-                       return -ENOMEM;
-               work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
-                                  IONIC_DW_TYPE_RX_ADDR_DEL;
-               memcpy(work->addr, addr, ETH_ALEN);
-               netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
-                          add ? "add" : "del", addr);
-               ionic_lif_deferred_enqueue(&lif->deferred, work);
-       } else {
-               netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
-                          add ? "add" : "del", addr);
-               if (add)
-                       return ionic_lif_addr_add(lif, addr);
-               else
-                       return ionic_lif_addr_del(lif, addr);
-       }
+       netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
+                  add ? "add" : "del", addr);
+       if (add)
+               return ionic_lif_addr_add(lif, addr);
+       else
+               return ionic_lif_addr_del(lif, addr);
 
        return 0;
 }
 
 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
 {
-       return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_SLEEP);
-}
-
-static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr)
-{
-       return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_NOT_SLEEP);
+       return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR);
 }
 
 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
 {
-       return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_SLEEP);
+       return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR);
 }
 
-static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr)
+static void ionic_lif_rx_mode(struct ionic_lif *lif)
 {
-       return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_NOT_SLEEP);
-}
-
-static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
-{
-       struct ionic_admin_ctx ctx = {
-               .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
-               .cmd.rx_mode_set = {
-                       .opcode = IONIC_CMD_RX_MODE_SET,
-                       .lif_index = cpu_to_le16(lif->index),
-                       .rx_mode = cpu_to_le16(rx_mode),
-               },
-       };
+       struct net_device *netdev = lif->netdev;
+       unsigned int nfilters;
+       unsigned int nd_flags;
        char buf[128];
-       int err;
+       u16 rx_mode;
        int i;
 #define REMAIN(__x) (sizeof(buf) - (__x))
 
-       i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
-                     lif->rx_mode, rx_mode);
-       if (rx_mode & IONIC_RX_MODE_F_UNICAST)
-               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
-       if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
-               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
-       if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
-               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
-       if (rx_mode & IONIC_RX_MODE_F_PROMISC)
-               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
-       if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
-               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
-       netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
-
-       err = ionic_adminq_post_wait(lif, &ctx);
-       if (err)
-               netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
-                           rx_mode, err);
-       else
-               lif->rx_mode = rx_mode;
-}
+       mutex_lock(&lif->config_lock);
 
-static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
-{
-       struct ionic_lif *lif = netdev_priv(netdev);
-       struct ionic_deferred_work *work;
-       unsigned int nfilters;
-       unsigned int rx_mode;
+       /* grab the flags once for local use */
+       nd_flags = netdev->flags;
 
        rx_mode = IONIC_RX_MODE_F_UNICAST;
-       rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
-       rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
-       rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
-       rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
+       rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
+       rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
+       rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
+       rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
 
        /* sync unicast addresses
         * next check to see if we're in an overflow state
@@ -1429,49 +1388,83 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
         *       we remove our overflow flag and check the netdev flags
         *       to see if we can disable NIC PROMISC
         */
-       if (can_sleep)
-               __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
-       else
-               __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
+       __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
        nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
        if (netdev_uc_count(netdev) + 1 > nfilters) {
                rx_mode |= IONIC_RX_MODE_F_PROMISC;
                lif->uc_overflow = true;
        } else if (lif->uc_overflow) {
                lif->uc_overflow = false;
-               if (!(netdev->flags & IFF_PROMISC))
+               if (!(nd_flags & IFF_PROMISC))
                        rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
        }
 
        /* same for multicast */
-       if (can_sleep)
-               __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
-       else
-               __dev_mc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
+       __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
        nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
        if (netdev_mc_count(netdev) > nfilters) {
                rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
                lif->mc_overflow = true;
        } else if (lif->mc_overflow) {
                lif->mc_overflow = false;
-               if (!(netdev->flags & IFF_ALLMULTI))
+               if (!(nd_flags & IFF_ALLMULTI))
                        rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
        }
 
+       i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
+                     lif->rx_mode, rx_mode);
+       if (rx_mode & IONIC_RX_MODE_F_UNICAST)
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
+       if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
+       if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
+       if (rx_mode & IONIC_RX_MODE_F_PROMISC)
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
+       if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
+       if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER)
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER");
+       netdev_dbg(netdev, "lif%d %s\n", lif->index, buf);
+
        if (lif->rx_mode != rx_mode) {
-               if (!can_sleep) {
-                       work = kzalloc(sizeof(*work), GFP_ATOMIC);
-                       if (!work) {
-                               netdev_err(lif->netdev, "rxmode change dropped\n");
-                               return;
-                       }
-                       work->type = IONIC_DW_TYPE_RX_MODE;
-                       work->rx_mode = rx_mode;
-                       netdev_dbg(lif->netdev, "deferred: rx_mode\n");
-                       ionic_lif_deferred_enqueue(&lif->deferred, work);
-               } else {
-                       ionic_lif_rx_mode(lif, rx_mode);
+               struct ionic_admin_ctx ctx = {
+                       .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+                       .cmd.rx_mode_set = {
+                               .opcode = IONIC_CMD_RX_MODE_SET,
+                               .lif_index = cpu_to_le16(lif->index),
+                       },
+               };
+               int err;
+
+               ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode);
+               err = ionic_adminq_post_wait(lif, &ctx);
+               if (err)
+                       netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n",
+                                   rx_mode, err);
+               else
+                       lif->rx_mode = rx_mode;
+       }
+
+       mutex_unlock(&lif->config_lock);
+}
+
+static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
+{
+       struct ionic_lif *lif = netdev_priv(netdev);
+       struct ionic_deferred_work *work;
+
+       if (!can_sleep) {
+               work = kzalloc(sizeof(*work), GFP_ATOMIC);
+               if (!work) {
+                       netdev_err(lif->netdev, "rxmode change dropped\n");
+                       return;
                }
+               work->type = IONIC_DW_TYPE_RX_MODE;
+               netdev_dbg(lif->netdev, "deferred: rx_mode\n");
+               ionic_lif_deferred_enqueue(&lif->deferred, work);
+       } else {
+               ionic_lif_rx_mode(lif);
        }
 }
 
@@ -3058,6 +3051,7 @@ void ionic_lif_deinit(struct ionic_lif *lif)
        ionic_lif_qcq_deinit(lif, lif->notifyqcq);
        ionic_lif_qcq_deinit(lif, lif->adminqcq);
 
+       mutex_destroy(&lif->config_lock);
        mutex_destroy(&lif->queue_lock);
        ionic_lif_reset(lif);
 }
@@ -3185,7 +3179,7 @@ static int ionic_station_set(struct ionic_lif *lif)
                 */
                if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
                                      netdev->dev_addr))
-                       ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
+                       ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR);
        } else {
                /* Update the netdev mac with the device's mac */
                memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
@@ -3202,7 +3196,7 @@ static int ionic_station_set(struct ionic_lif *lif)
 
        netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
                   netdev->dev_addr);
-       ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
+       ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR);
 
        return 0;
 }
@@ -3225,6 +3219,7 @@ int ionic_lif_init(struct ionic_lif *lif)
 
        lif->hw_index = le16_to_cpu(comp.hw_index);
        mutex_init(&lif->queue_lock);
+       mutex_init(&lif->config_lock);
 
        /* now that we have the hw_index we can figure out our doorbell page */
        lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
index 346506f..69ab59f 100644 (file)
@@ -108,7 +108,6 @@ struct ionic_deferred_work {
        struct list_head list;
        enum ionic_deferred_work_type type;
        union {
-               unsigned int rx_mode;
                u8 addr[ETH_ALEN];
                u8 fw_status;
        };
@@ -179,6 +178,7 @@ struct ionic_lif {
        unsigned int index;
        unsigned int hw_index;
        struct mutex queue_lock;        /* lock for queue structures */
+       struct mutex config_lock;       /* lock for config actions */
        spinlock_t adminq_lock;         /* lock for AdminQ operations */
        struct ionic_qcq *adminqcq;
        struct ionic_qcq *notifyqcq;
@@ -199,7 +199,7 @@ struct ionic_lif {
        unsigned int nrxq_descs;
        u32 rx_copybreak;
        u64 rxq_features;
-       unsigned int rx_mode;
+       u16 rx_mode;
        u64 hw_features;
        bool registered;
        bool mc_overflow;
@@ -302,7 +302,7 @@ int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
 int ionic_lif_size(struct ionic *ionic);
 
 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
-int ionic_lif_hwstamp_replay(struct ionic_lif *lif);
+void ionic_lif_hwstamp_replay(struct ionic_lif *lif);
 int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr);
 int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr);
 ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter);
@@ -311,10 +311,7 @@ void ionic_lif_unregister_phc(struct ionic_lif *lif);
 void ionic_lif_alloc_phc(struct ionic_lif *lif);
 void ionic_lif_free_phc(struct ionic_lif *lif);
 #else
-static inline int ionic_lif_hwstamp_replay(struct ionic_lif *lif)
-{
-       return -EOPNOTSUPP;
-}
+static inline void ionic_lif_hwstamp_replay(struct ionic_lif *lif) {}
 
 static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
 {
index a87c87e..6e2403c 100644 (file)
@@ -188,6 +188,9 @@ int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
        struct hwtstamp_config config;
        int err;
 
+       if (!lif->phc || !lif->phc->ptp)
+               return -EOPNOTSUPP;
+
        if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
                return -EFAULT;
 
@@ -203,15 +206,16 @@ int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
        return 0;
 }
 
-int ionic_lif_hwstamp_replay(struct ionic_lif *lif)
+void ionic_lif_hwstamp_replay(struct ionic_lif *lif)
 {
        int err;
 
+       if (!lif->phc || !lif->phc->ptp)
+               return;
+
        err = ionic_lif_hwstamp_set_ts_config(lif, NULL);
        if (err)
                netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err);
-
-       return err;
 }
 
 int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr)
index 0893488..0887019 100644 (file)
@@ -274,12 +274,11 @@ static void ionic_rx_clean(struct ionic_queue *q,
                }
        }
 
-       if (likely(netdev->features & NETIF_F_RXCSUM)) {
-               if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
-                       skb->ip_summed = CHECKSUM_COMPLETE;
-                       skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
-                       stats->csum_complete++;
-               }
+       if (likely(netdev->features & NETIF_F_RXCSUM) &&
+           (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
+               skb->ip_summed = CHECKSUM_COMPLETE;
+               skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
+               stats->csum_complete++;
        } else {
                stats->csum_none++;
        }
@@ -451,11 +450,12 @@ void ionic_rx_empty(struct ionic_queue *q)
        q->tail_idx = 0;
 }
 
-static void ionic_dim_update(struct ionic_qcq *qcq)
+static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
 {
        struct dim_sample dim_sample;
        struct ionic_lif *lif;
        unsigned int qi;
+       u64 pkts, bytes;
 
        if (!qcq->intr.dim_coal_hw)
                return;
@@ -463,14 +463,23 @@ static void ionic_dim_update(struct ionic_qcq *qcq)
        lif = qcq->q.lif;
        qi = qcq->cq.bound_q->index;
 
-       ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
-                            lif->rxqcqs[qi]->intr.index,
-                            qcq->intr.dim_coal_hw);
+       switch (napi_mode) {
+       case IONIC_LIF_F_TX_DIM_INTR:
+               pkts = lif->txqstats[qi].pkts;
+               bytes = lif->txqstats[qi].bytes;
+               break;
+       case IONIC_LIF_F_RX_DIM_INTR:
+               pkts = lif->rxqstats[qi].pkts;
+               bytes = lif->rxqstats[qi].bytes;
+               break;
+       default:
+               pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
+               bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
+               break;
+       }
 
        dim_update_sample(qcq->cq.bound_intr->rearm_count,
-                         lif->txqstats[qi].pkts,
-                         lif->txqstats[qi].bytes,
-                         &dim_sample);
+                         pkts, bytes, &dim_sample);
 
        net_dim(&qcq->dim, dim_sample);
 }
@@ -491,7 +500,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
                                     ionic_tx_service, NULL, NULL);
 
        if (work_done < budget && napi_complete_done(napi, work_done)) {
-               ionic_dim_update(qcq);
+               ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
                flags |= IONIC_INTR_CRED_UNMASK;
                cq->bound_intr->rearm_count++;
        }
@@ -530,7 +539,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
                ionic_rx_fill(cq->bound_q);
 
        if (work_done < budget && napi_complete_done(napi, work_done)) {
-               ionic_dim_update(qcq);
+               ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
                flags |= IONIC_INTR_CRED_UNMASK;
                cq->bound_intr->rearm_count++;
        }
@@ -576,7 +585,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
                ionic_rx_fill(rxcq->bound_q);
 
        if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
-               ionic_dim_update(qcq);
+               ionic_dim_update(qcq, 0);
                flags |= IONIC_INTR_CRED_UNMASK;
                rxcq->bound_intr->rearm_count++;
        }
index c59b72c..a2e4dfb 100644 (file)
@@ -831,7 +831,7 @@ int qede_configure_vlan_filters(struct qede_dev *edev)
 int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
 {
        struct qede_dev *edev = netdev_priv(dev);
-       struct qede_vlan *vlan = NULL;
+       struct qede_vlan *vlan;
        int rc = 0;
 
        DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
@@ -842,7 +842,7 @@ int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
                if (vlan->vid == vid)
                        break;
 
-       if (!vlan || (vlan->vid != vid)) {
+       if (list_entry_is_head(vlan, &edev->vlan_list, list)) {
                DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
                           "Vlan isn't configured\n");
                goto out;
index 2376b27..c00ad57 100644 (file)
@@ -154,7 +154,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
                                      "driver lock acquired\n");
                        return 1;
                }
-               ssleep(1);
+               mdelay(1000);
        } while (++i < 10);
 
        netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
@@ -3274,7 +3274,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
                if ((value & ISP_CONTROL_SR) == 0)
                        break;
 
-               ssleep(1);
+               mdelay(1000);
        } while ((--max_wait_time));
 
        /*
@@ -3310,7 +3310,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev)
                                                   ispControlStatus);
                        if ((value & ISP_CONTROL_FSR) == 0)
                                break;
-                       ssleep(1);
+                       mdelay(1000);
                } while ((--max_wait_time));
        }
        if (max_wait_time == 0)
index 8543bf3..ad655f0 100644 (file)
@@ -735,12 +735,13 @@ static int emac_remove(struct platform_device *pdev)
 
        put_device(&adpt->phydev->mdio.dev);
        mdiobus_unregister(adpt->mii_bus);
-       free_netdev(netdev);
 
        if (adpt->phy.digital)
                iounmap(adpt->phy.digital);
        iounmap(adpt->phy.base);
 
+       free_netdev(netdev);
+
        return 0;
 }
 
index f744557..c7af5bc 100644 (file)
@@ -5084,7 +5084,8 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
        new_bus->priv = tp;
        new_bus->parent = &pdev->dev;
        new_bus->irq[0] = PHY_MAC_INTERRUPT;
-       snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev));
+       snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x",
+                pci_domain_nr(pdev->bus), pci_dev_id(pdev));
 
        new_bus->read = r8169_mdio_read_reg;
        new_bus->write = r8169_mdio_write_reg;
index 86a1eb0..80e62ca 100644 (file)
@@ -864,7 +864,7 @@ enum GECMR_BIT {
 
 /* The Ethernet AVB descriptor definitions. */
 struct ravb_desc {
-       __le16 ds;              /* Descriptor size */
+       __le16 ds;      /* Descriptor size */
        u8 cc;          /* Content control MSBs (reserved) */
        u8 die_dt;      /* Descriptor interrupt enable and type */
        __le32 dptr;    /* Descriptor pointer */
index 69c50f8..8053970 100644 (file)
@@ -920,7 +920,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
        if (ravb_rx(ndev, &quota, q))
                goto out;
 
-       /* Processing RX Descriptor Ring */
+       /* Processing TX Descriptor Ring */
        spin_lock_irqsave(&priv->lock, flags);
        /* Clear TX interrupt */
        ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
index a3ca406..e5b0d79 100644 (file)
@@ -152,6 +152,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
         * maximum size.
         */
        tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
+       tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL);
        n_xdp_tx = num_possible_cpus();
        n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
 
@@ -169,6 +170,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
                netif_err(efx, drv, efx->net_dev,
                          "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
                          n_xdp_ev, n_channels, max_channels);
+               netif_err(efx, drv, efx->net_dev,
+                         "XDP_TX and XDP_REDIRECT will not work on this interface");
                efx->n_xdp_channels = 0;
                efx->xdp_tx_per_channel = 0;
                efx->xdp_tx_queue_count = 0;
@@ -176,12 +179,14 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
                netif_err(efx, drv, efx->net_dev,
                          "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
                          n_xdp_tx, n_channels, efx->max_vis);
+               netif_err(efx, drv, efx->net_dev,
+                         "XDP_TX and XDP_REDIRECT will not work on this interface");
                efx->n_xdp_channels = 0;
                efx->xdp_tx_per_channel = 0;
                efx->xdp_tx_queue_count = 0;
        } else {
                efx->n_xdp_channels = n_xdp_ev;
-               efx->xdp_tx_per_channel = EFX_MAX_TXQ_PER_CHANNEL;
+               efx->xdp_tx_per_channel = tx_per_ev;
                efx->xdp_tx_queue_count = n_xdp_tx;
                n_channels += n_xdp_ev;
                netif_dbg(efx, drv, efx->net_dev,
@@ -891,18 +896,20 @@ int efx_set_channels(struct efx_nic *efx)
                        if (efx_channel_is_xdp_tx(channel)) {
                                efx_for_each_channel_tx_queue(tx_queue, channel) {
                                        tx_queue->queue = next_queue++;
-                                       netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
-                                                 channel->channel, tx_queue->label,
-                                                 xdp_queue_number, tx_queue->queue);
+
                                        /* We may have a few left-over XDP TX
                                         * queues owing to xdp_tx_queue_count
                                         * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
                                         * We still allocate and probe those
                                         * TXQs, but never use them.
                                         */
-                                       if (xdp_queue_number < efx->xdp_tx_queue_count)
+                                       if (xdp_queue_number < efx->xdp_tx_queue_count) {
+                                               netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
+                                                         channel->channel, tx_queue->label,
+                                                         xdp_queue_number, tx_queue->queue);
                                                efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
-                                       xdp_queue_number++;
+                                               xdp_queue_number++;
+                                       }
                                }
                        } else {
                                efx_for_each_channel_tx_queue(tx_queue, channel) {
@@ -914,8 +921,7 @@ int efx_set_channels(struct efx_nic *efx)
                        }
                }
        }
-       if (xdp_queue_number)
-               efx->xdp_tx_queue_count = xdp_queue_number;
+       WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
 
        rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
        if (rc)
index ca9c00b..cff87de 100644 (file)
@@ -443,7 +443,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
 #endif
 
        /* setup various bits in PCI command register */
-       ret = pci_enable_device(pci_dev);
+       ret = pcim_enable_device(pci_dev);
        if(ret) return ret;
 
        i = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
@@ -469,7 +469,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
        ioaddr = pci_iomap(pci_dev, 0, 0);
        if (!ioaddr) {
                ret = -ENOMEM;
-               goto err_out_cleardev;
+               goto err_out;
        }
 
        sis_priv = netdev_priv(net_dev);
@@ -581,8 +581,6 @@ err_unmap_tx:
                          sis_priv->tx_ring_dma);
 err_out_unmap:
        pci_iounmap(pci_dev, ioaddr);
-err_out_cleardev:
-       pci_release_regions(pci_dev);
  err_out:
        free_netdev(net_dev);
        return ret;
@@ -2499,7 +2497,6 @@ static void sis900_remove(struct pci_dev *pci_dev)
                          sis_priv->tx_ring_dma);
        pci_iounmap(pci_dev, sis_priv->ioaddr);
        free_netdev(net_dev);
-       pci_release_regions(pci_dev);
 }
 
 static int __maybe_unused sis900_suspend(struct device *dev)
index e108b0d..4c9a37d 100644 (file)
@@ -49,9 +49,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
 {
        struct plat_stmmacenet_data *plat;
        struct stmmac_resources res;
-       bool mdio = false;
-       int ret, i;
        struct device_node *np;
+       int ret, i, phy_mode;
+       bool mdio = false;
 
        np = dev_of_node(&pdev->dev);
 
@@ -108,10 +108,11 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
        if (plat->bus_id < 0)
                plat->bus_id = pci_dev_id(pdev);
 
-       plat->phy_interface = device_get_phy_mode(&pdev->dev);
-       if (plat->phy_interface < 0)
+       phy_mode = device_get_phy_mode(&pdev->dev);
+       if (phy_mode < 0)
                dev_err(&pdev->dev, "phy_mode not found\n");
 
+       plat->phy_interface = phy_mode;
        plat->interface = PHY_INTERFACE_MODE_GMII;
 
        pci_set_master(pdev);
index 67ba083..b217453 100644 (file)
@@ -1249,6 +1249,7 @@ const struct stmmac_ops dwmac410_ops = {
        .config_l3_filter = dwmac4_config_l3_filter,
        .config_l4_filter = dwmac4_config_l4_filter,
        .est_configure = dwmac5_est_configure,
+       .est_irq_status = dwmac5_est_irq_status,
        .fpe_configure = dwmac5_fpe_configure,
        .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
        .fpe_irq_status = dwmac5_fpe_irq_status,
@@ -1300,6 +1301,7 @@ const struct stmmac_ops dwmac510_ops = {
        .config_l3_filter = dwmac4_config_l3_filter,
        .config_l4_filter = dwmac4_config_l4_filter,
        .est_configure = dwmac5_est_configure,
+       .est_irq_status = dwmac5_est_irq_status,
        .fpe_configure = dwmac5_fpe_configure,
        .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
        .fpe_irq_status = dwmac5_fpe_irq_status,
index e735134..fcdb1d2 100644 (file)
@@ -349,6 +349,9 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue);
 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue);
 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags);
+struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
+                                          ktime_t current_time,
+                                          u64 cycle_time);
 
 #if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
 void stmmac_selftest_run(struct net_device *dev,
index 8d9d6ec..7b8404a 100644 (file)
@@ -7171,6 +7171,7 @@ int stmmac_suspend(struct device *dev)
                                     priv->plat->rx_queues_to_use, false);
 
                stmmac_fpe_handshake(priv, false);
+               stmmac_fpe_stop_wq(priv);
        }
 
        priv->speed = SPEED_UNKNOWN;
index 072eff8..5ca7108 100644 (file)
@@ -397,6 +397,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
        struct device_node *np = pdev->dev.of_node;
        struct plat_stmmacenet_data *plat;
        struct stmmac_dma_cfg *dma_cfg;
+       int phy_mode;
        void *ret;
        int rc;
 
@@ -412,10 +413,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
                eth_zero_addr(mac);
        }
 
-       plat->phy_interface = device_get_phy_mode(&pdev->dev);
-       if (plat->phy_interface < 0)
-               return ERR_PTR(plat->phy_interface);
+       phy_mode = device_get_phy_mode(&pdev->dev);
+       if (phy_mode < 0)
+               return ERR_PTR(phy_mode);
 
+       plat->phy_interface = phy_mode;
        plat->interface = stmmac_of_get_mac_mode(np);
        if (plat->interface < 0)
                plat->interface = plat->phy_interface;
index 4e86cdf..580cc03 100644 (file)
@@ -62,7 +62,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
        u32 sec, nsec;
        u32 quotient, reminder;
        int neg_adj = 0;
-       bool xmac;
+       bool xmac, est_rst = false;
+       int ret;
 
        xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
 
@@ -75,10 +76,48 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
        sec = quotient;
        nsec = reminder;
 
+       /* If EST is enabled, disabled it before adjust ptp time. */
+       if (priv->plat->est && priv->plat->est->enable) {
+               est_rst = true;
+               mutex_lock(&priv->plat->est->lock);
+               priv->plat->est->enable = false;
+               stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+                                    priv->plat->clk_ptp_rate);
+               mutex_unlock(&priv->plat->est->lock);
+       }
+
        spin_lock_irqsave(&priv->ptp_lock, flags);
        stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac);
        spin_unlock_irqrestore(&priv->ptp_lock, flags);
 
+       /* Caculate new basetime and re-configured EST after PTP time adjust. */
+       if (est_rst) {
+               struct timespec64 current_time, time;
+               ktime_t current_time_ns, basetime;
+               u64 cycle_time;
+
+               mutex_lock(&priv->plat->est->lock);
+               priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
+               current_time_ns = timespec64_to_ktime(current_time);
+               time.tv_nsec = priv->plat->est->btr_reserve[0];
+               time.tv_sec = priv->plat->est->btr_reserve[1];
+               basetime = timespec64_to_ktime(time);
+               cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC +
+                            priv->plat->est->ctr[0];
+               time = stmmac_calc_tas_basetime(basetime,
+                                               current_time_ns,
+                                               cycle_time);
+
+               priv->plat->est->btr[0] = (u32)time.tv_nsec;
+               priv->plat->est->btr[1] = (u32)time.tv_sec;
+               priv->plat->est->enable = true;
+               ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+                                          priv->plat->clk_ptp_rate);
+               mutex_unlock(&priv->plat->est->lock);
+               if (ret)
+                       netdev_err(priv->dev, "failed to configure EST\n");
+       }
+
        return 0;
 }
 
index 92dab60..4f3b643 100644 (file)
@@ -711,12 +711,35 @@ static int tc_setup_cls(struct stmmac_priv *priv,
        return ret;
 }
 
+struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
+                                          ktime_t current_time,
+                                          u64 cycle_time)
+{
+       struct timespec64 time;
+
+       if (ktime_after(old_base_time, current_time)) {
+               time = ktime_to_timespec64(old_base_time);
+       } else {
+               s64 n;
+               ktime_t base_time;
+
+               n = div64_s64(ktime_sub_ns(current_time, old_base_time),
+                             cycle_time);
+               base_time = ktime_add_ns(old_base_time,
+                                        (n + 1) * cycle_time);
+
+               time = ktime_to_timespec64(base_time);
+       }
+
+       return time;
+}
+
 static int tc_setup_taprio(struct stmmac_priv *priv,
                           struct tc_taprio_qopt_offload *qopt)
 {
        u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
        struct plat_stmmacenet_data *plat = priv->plat;
-       struct timespec64 time, current_time;
+       struct timespec64 time, current_time, qopt_time;
        ktime_t current_time_ns;
        bool fpe = false;
        int i, ret = 0;
@@ -773,14 +796,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
                                         GFP_KERNEL);
                if (!plat->est)
                        return -ENOMEM;
+
+               mutex_init(&priv->plat->est->lock);
        } else {
                memset(plat->est, 0, sizeof(*plat->est));
        }
 
        size = qopt->num_entries;
 
+       mutex_lock(&priv->plat->est->lock);
        priv->plat->est->gcl_size = size;
        priv->plat->est->enable = qopt->enable;
+       mutex_unlock(&priv->plat->est->lock);
 
        for (i = 0; i < size; i++) {
                s64 delta_ns = qopt->entries[i].interval;
@@ -811,32 +838,28 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
                priv->plat->est->gcl[i] = delta_ns | (gates << wid);
        }
 
+       mutex_lock(&priv->plat->est->lock);
        /* Adjust for real system time */
        priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
        current_time_ns = timespec64_to_ktime(current_time);
-       if (ktime_after(qopt->base_time, current_time_ns)) {
-               time = ktime_to_timespec64(qopt->base_time);
-       } else {
-               ktime_t base_time;
-               s64 n;
-
-               n = div64_s64(ktime_sub_ns(current_time_ns, qopt->base_time),
-                             qopt->cycle_time);
-               base_time = ktime_add_ns(qopt->base_time,
-                                        (n + 1) * qopt->cycle_time);
-
-               time = ktime_to_timespec64(base_time);
-       }
+       time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
+                                       qopt->cycle_time);
 
        priv->plat->est->btr[0] = (u32)time.tv_nsec;
        priv->plat->est->btr[1] = (u32)time.tv_sec;
 
+       qopt_time = ktime_to_timespec64(qopt->base_time);
+       priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
+       priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
+
        ctr = qopt->cycle_time;
        priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
        priv->plat->est->ctr[1] = (u32)ctr;
 
-       if (fpe && !priv->dma_cap.fpesel)
+       if (fpe && !priv->dma_cap.fpesel) {
+               mutex_unlock(&priv->plat->est->lock);
                return -EOPNOTSUPP;
+       }
 
        /* Actual FPE register configuration will be done after FPE handshake
         * is success.
@@ -845,6 +868,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
 
        ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
                                   priv->plat->clk_ptp_rate);
+       mutex_unlock(&priv->plat->est->lock);
        if (ret) {
                netdev_err(priv->dev, "failed to configure EST\n");
                goto disable;
@@ -860,9 +884,11 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
        return 0;
 
 disable:
+       mutex_lock(&priv->plat->est->lock);
        priv->plat->est->enable = false;
        stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
                             priv->plat->clk_ptp_rate);
+       mutex_unlock(&priv->plat->est->lock);
 
        priv->plat->fpe_cfg->enable = false;
        stmmac_fpe_configure(priv, priv->ioaddr,
index 74e7486..860644d 100644 (file)
@@ -8191,8 +8191,9 @@ static int niu_pci_vpd_fetch(struct niu *np, u32 start)
                err = niu_pci_vpd_scan_props(np, here, end);
                if (err < 0)
                        return err;
+               /* ret == 1 is not an error */
                if (err == 1)
-                       return -EINVAL;
+                       return 0;
        }
        return 0;
 }
index 0b2ce4b..e0cb713 100644 (file)
@@ -313,9 +313,8 @@ static void tlan_remove_one(struct pci_dev *pdev)
        pci_release_regions(pdev);
 #endif
 
-       free_netdev(dev);
-
        cancel_work_sync(&priv->tlan_tqueue);
+       free_netdev(dev);
 }
 
 static void tlan_start(struct net_device *dev)
index 99d4d94..a6fb88f 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/kernel.h>
 #include <linux/ptp_clock_kernel.h>
 #include <linux/soc/ixp4xx/cpu.h>
+#include <linux/module.h>
+#include <mach/ixp4xx-regs.h>
 
 #include "ixp46x_ts.h"
 
index 14f0705..0de2c45 100644 (file)
@@ -1504,9 +1504,8 @@ err_out_resource:
        release_mem_region(start, len);
 
 err_out_kfree:
-       free_netdev(dev);
-
        pr_err("%s: initialization failure, aborting!\n", fp->name);
+       free_netdev(dev);
        return ret;
 }
 
index 3811f1b..b80ed2f 100644 (file)
@@ -85,7 +85,7 @@ static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
                                       u32 *mykey, u32 *mysalt)
 {
        const char aes_gcm_name[] = "rfc4106(gcm(aes))";
-       struct net_device *dev = xs->xso.dev;
+       struct net_device *dev = xs->xso.real_dev;
        unsigned char *key_data;
        char *alg_name = NULL;
        int key_len;
@@ -134,7 +134,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
        u16 sa_idx;
        int ret;
 
-       dev = xs->xso.dev;
+       dev = xs->xso.real_dev;
        ns = netdev_priv(dev);
        ipsec = &ns->ipsec;
 
@@ -194,7 +194,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
 
 static void nsim_ipsec_del_sa(struct xfrm_state *xs)
 {
-       struct netdevsim *ns = netdev_priv(xs->xso.dev);
+       struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
        struct nsim_ipsec *ipsec = &ns->ipsec;
        u16 sa_idx;
 
@@ -211,7 +211,7 @@ static void nsim_ipsec_del_sa(struct xfrm_state *xs)
 
 static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
 {
-       struct netdevsim *ns = netdev_priv(xs->xso.dev);
+       struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
        struct nsim_ipsec *ipsec = &ns->ipsec;
 
        ipsec->ok++;
index 7bf3011..83aea5c 100644 (file)
@@ -288,7 +288,7 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
        if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) {
                if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E ||
                    BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810 ||
-                   BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E)
+                   BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54811)
                        val |= BCM54XX_SHD_SCR3_RXCTXC_DIS;
                else
                        val |= BCM54XX_SHD_SCR3_TRDDAPD;
index bbbc6ac..53a4334 100644 (file)
@@ -78,6 +78,11 @@ enum {
        /* Temperature read register (88E2110 only) */
        MV_PCS_TEMP             = 0x8042,
 
+       /* Number of ports on the device */
+       MV_PCS_PORT_INFO        = 0xd00d,
+       MV_PCS_PORT_INFO_NPORTS_MASK    = 0x0380,
+       MV_PCS_PORT_INFO_NPORTS_SHIFT   = 7,
+
        /* These registers appear at 0x800X and 0xa00X - the 0xa00X control
         * registers appear to set themselves to the 0x800X when AN is
         * restarted, but status registers appear readable from either.
@@ -966,6 +971,30 @@ static const struct mv3310_chip mv2111_type = {
 #endif
 };
 
+static int mv3310_get_number_of_ports(struct phy_device *phydev)
+{
+       int ret;
+
+       ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PORT_INFO);
+       if (ret < 0)
+               return ret;
+
+       ret &= MV_PCS_PORT_INFO_NPORTS_MASK;
+       ret >>= MV_PCS_PORT_INFO_NPORTS_SHIFT;
+
+       return ret + 1;
+}
+
+static int mv3310_match_phy_device(struct phy_device *phydev)
+{
+       return mv3310_get_number_of_ports(phydev) == 1;
+}
+
+static int mv3340_match_phy_device(struct phy_device *phydev)
+{
+       return mv3310_get_number_of_ports(phydev) == 4;
+}
+
 static int mv211x_match_phy_device(struct phy_device *phydev, bool has_5g)
 {
        int val;
@@ -994,7 +1023,8 @@ static int mv2111_match_phy_device(struct phy_device *phydev)
 static struct phy_driver mv3310_drivers[] = {
        {
                .phy_id         = MARVELL_PHY_ID_88X3310,
-               .phy_id_mask    = MARVELL_PHY_ID_88X33X0_MASK,
+               .phy_id_mask    = MARVELL_PHY_ID_MASK,
+               .match_phy_device = mv3310_match_phy_device,
                .name           = "mv88x3310",
                .driver_data    = &mv3310_type,
                .get_features   = mv3310_get_features,
@@ -1011,8 +1041,9 @@ static struct phy_driver mv3310_drivers[] = {
                .set_loopback   = genphy_c45_loopback,
        },
        {
-               .phy_id         = MARVELL_PHY_ID_88X3340,
-               .phy_id_mask    = MARVELL_PHY_ID_88X33X0_MASK,
+               .phy_id         = MARVELL_PHY_ID_88X3310,
+               .phy_id_mask    = MARVELL_PHY_ID_MASK,
+               .match_phy_device = mv3340_match_phy_device,
                .name           = "mv88x3340",
                .driver_data    = &mv3340_type,
                .get_features   = mv3310_get_features,
@@ -1069,8 +1100,7 @@ static struct phy_driver mv3310_drivers[] = {
 module_phy_driver(mv3310_drivers);
 
 static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
-       { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_88X33X0_MASK },
-       { MARVELL_PHY_ID_88X3340, MARVELL_PHY_ID_88X33X0_MASK },
+       { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK },
        { MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK },
        { },
 };
index aec97b0..2c11521 100644 (file)
@@ -701,6 +701,7 @@ static int ax88772_init_phy(struct usbnet *dev)
                return ret;
        }
 
+       phy_suspend(priv->phydev);
        priv->phydev->mac_managed_pm = 1;
 
        phy_attached_info(priv->phydev);
index 6300683..dec96e8 100644 (file)
@@ -2495,7 +2495,7 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
                           hso_net_init);
        if (!net) {
                dev_err(&interface->dev, "Unable to create ethernet device\n");
-               goto exit;
+               goto err_hso_dev;
        }
 
        hso_net = netdev_priv(net);
@@ -2508,13 +2508,13 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
                                      USB_DIR_IN);
        if (!hso_net->in_endp) {
                dev_err(&interface->dev, "Can't find BULK IN endpoint\n");
-               goto exit;
+               goto err_net;
        }
        hso_net->out_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK,
                                       USB_DIR_OUT);
        if (!hso_net->out_endp) {
                dev_err(&interface->dev, "Can't find BULK OUT endpoint\n");
-               goto exit;
+               goto err_net;
        }
        SET_NETDEV_DEV(net, &interface->dev);
        SET_NETDEV_DEVTYPE(net, &hso_type);
@@ -2523,18 +2523,18 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
        for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
                hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL);
                if (!hso_net->mux_bulk_rx_urb_pool[i])
-                       goto exit;
+                       goto err_mux_bulk_rx;
                hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE,
                                                           GFP_KERNEL);
                if (!hso_net->mux_bulk_rx_buf_pool[i])
-                       goto exit;
+                       goto err_mux_bulk_rx;
        }
        hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!hso_net->mux_bulk_tx_urb)
-               goto exit;
+               goto err_mux_bulk_rx;
        hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL);
        if (!hso_net->mux_bulk_tx_buf)
-               goto exit;
+               goto err_free_tx_urb;
 
        add_net_device(hso_dev);
 
@@ -2542,7 +2542,7 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
        result = register_netdev(net);
        if (result) {
                dev_err(&interface->dev, "Failed to register device\n");
-               goto exit;
+               goto err_free_tx_buf;
        }
 
        hso_log_port(hso_dev);
@@ -2550,8 +2550,21 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
        hso_create_rfkill(hso_dev, interface);
 
        return hso_dev;
-exit:
-       hso_free_net_device(hso_dev, true);
+
+err_free_tx_buf:
+       remove_net_device(hso_dev);
+       kfree(hso_net->mux_bulk_tx_buf);
+err_free_tx_urb:
+       usb_free_urb(hso_net->mux_bulk_tx_urb);
+err_mux_bulk_rx:
+       for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
+               usb_free_urb(hso_net->mux_bulk_rx_urb_pool[i]);
+               kfree(hso_net->mux_bulk_rx_buf_pool[i]);
+       }
+err_net:
+       free_netdev(net);
+err_hso_dev:
+       kfree(hso_dev);
        return NULL;
 }
 
index 1692d3b..e09b107 100644 (file)
@@ -1552,7 +1552,8 @@ static int
 rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex,
                  u32 advertising);
 
-static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
+static int __rtl8152_set_mac_address(struct net_device *netdev, void *p,
+                                    bool in_resume)
 {
        struct r8152 *tp = netdev_priv(netdev);
        struct sockaddr *addr = p;
@@ -1561,9 +1562,11 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
        if (!is_valid_ether_addr(addr->sa_data))
                goto out1;
 
-       ret = usb_autopm_get_interface(tp->intf);
-       if (ret < 0)
-               goto out1;
+       if (!in_resume) {
+               ret = usb_autopm_get_interface(tp->intf);
+               if (ret < 0)
+                       goto out1;
+       }
 
        mutex_lock(&tp->control);
 
@@ -1575,11 +1578,17 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
 
        mutex_unlock(&tp->control);
 
-       usb_autopm_put_interface(tp->intf);
+       if (!in_resume)
+               usb_autopm_put_interface(tp->intf);
 out1:
        return ret;
 }
 
+static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
+{
+       return __rtl8152_set_mac_address(netdev, p, false);
+}
+
 /* Devices containing proper chips can support a persistent
  * host system provided MAC address.
  * Examples of this are Dell TB15 and Dell WD15 docks
@@ -1698,7 +1707,7 @@ static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa)
        return ret;
 }
 
-static int set_ethernet_addr(struct r8152 *tp)
+static int set_ethernet_addr(struct r8152 *tp, bool in_resume)
 {
        struct net_device *dev = tp->netdev;
        struct sockaddr sa;
@@ -1711,7 +1720,7 @@ static int set_ethernet_addr(struct r8152 *tp)
        if (tp->version == RTL_VER_01)
                ether_addr_copy(dev->dev_addr, sa.sa_data);
        else
-               ret = rtl8152_set_mac_address(dev, &sa);
+               ret = __rtl8152_set_mac_address(dev, &sa, in_resume);
 
        return ret;
 }
@@ -6763,9 +6772,10 @@ static int rtl8152_close(struct net_device *netdev)
                tp->rtl_ops.down(tp);
 
                mutex_unlock(&tp->control);
+       }
 
+       if (!res)
                usb_autopm_put_interface(tp->intf);
-       }
 
        free_all_mem(tp);
 
@@ -8443,7 +8453,7 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
        clear_bit(SELECTIVE_SUSPEND, &tp->flags);
        tp->rtl_ops.init(tp);
        queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
-       set_ethernet_addr(tp);
+       set_ethernet_addr(tp, true);
        return rtl8152_resume(intf);
 }
 
@@ -9644,7 +9654,7 @@ static int rtl8152_probe(struct usb_interface *intf,
        tp->rtl_fw.retry = true;
 #endif
        queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
-       set_ethernet_addr(tp);
+       set_ethernet_addr(tp, false);
 
        usb_set_intfdata(intf, tp);
 
index 8a58a2f..56c3f85 100644 (file)
@@ -1771,6 +1771,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
 {
        struct scatterlist *sgs[4], hdr, stat;
        unsigned out_num = 0, tmp;
+       int ret;
 
        /* Caller should know better */
        BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
@@ -1790,7 +1791,12 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
        sgs[out_num] = &stat;
 
        BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
-       virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
+       ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
+       if (ret < 0) {
+               dev_warn(&vi->vdev->dev,
+                        "Failed to add sgs for command vq: %d\n.", ret);
+               return false;
+       }
 
        if (unlikely(!virtqueue_kick(vi->cvq)))
                return vi->ctrl->status == VIRTIO_NET_OK;
index c0bd9cb..1b483cf 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Linux driver for VMware's vmxnet3 ethernet NIC.
  *
- * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
 
 
 #include "vmxnet3_int.h"
+#include <net/vxlan.h>
+#include <net/geneve.h>
+
+#define VXLAN_UDP_PORT 8472
 
 struct vmxnet3_stat_desc {
        char desc[ETH_GSTRING_LEN];
@@ -262,6 +266,8 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb,
        if (VMXNET3_VERSION_GE_4(adapter) &&
            skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) {
                u8 l4_proto = 0;
+               u16 port;
+               struct udphdr *udph;
 
                switch (vlan_get_protocol(skb)) {
                case htons(ETH_P_IP):
@@ -274,8 +280,20 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb,
                        return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
                }
 
-               if (l4_proto != IPPROTO_UDP)
+               switch (l4_proto) {
+               case IPPROTO_UDP:
+                       udph = udp_hdr(skb);
+                       port = be16_to_cpu(udph->dest);
+                       /* Check if offloaded port is supported */
+                       if (port != GENEVE_UDP_PORT &&
+                           port != IANA_VXLAN_UDP_PORT &&
+                           port != VXLAN_UDP_PORT) {
+                               return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+                       }
+                       break;
+               default:
                        return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+               }
        }
        return features;
 }
index 349ca18..c54fdae 100644 (file)
@@ -364,19 +364,19 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
        return -EINVAL;
 }
 
-static int __init mod_init(void)
+static int __init hdlc_cisco_init(void)
 {
        register_hdlc_protocol(&proto);
        return 0;
 }
 
-static void __exit mod_exit(void)
+static void __exit hdlc_cisco_exit(void)
 {
        unregister_hdlc_protocol(&proto);
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_cisco_init);
+module_exit(hdlc_cisco_exit);
 
 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
 MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC");
index 72250fe..25e3564 100644 (file)
@@ -1279,19 +1279,19 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
        return -EINVAL;
 }
 
-static int __init mod_init(void)
+static int __init hdlc_fr_init(void)
 {
        register_hdlc_protocol(&proto);
        return 0;
 }
 
-static void __exit mod_exit(void)
+static void __exit hdlc_fr_exit(void)
 {
        unregister_hdlc_protocol(&proto);
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_fr_init);
+module_exit(hdlc_fr_exit);
 
 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
 MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
index 834be2a..b81ecf4 100644 (file)
@@ -705,20 +705,20 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
        return -EINVAL;
 }
 
-static int __init mod_init(void)
+static int __init hdlc_ppp_init(void)
 {
        skb_queue_head_init(&tx_queue);
        register_hdlc_protocol(&proto);
        return 0;
 }
 
-static void __exit mod_exit(void)
+static void __exit hdlc_ppp_exit(void)
 {
        unregister_hdlc_protocol(&proto);
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_ppp_init);
+module_exit(hdlc_ppp_exit);
 
 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
 MODULE_DESCRIPTION("PPP protocol support for generic HDLC");
index 388fcc0..54d2849 100644 (file)
@@ -90,7 +90,7 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
 }
 
 
-static int __init mod_init(void)
+static int __init hdlc_raw_init(void)
 {
        register_hdlc_protocol(&proto);
        return 0;
@@ -98,14 +98,14 @@ static int __init mod_init(void)
 
 
 
-static void __exit mod_exit(void)
+static void __exit hdlc_raw_exit(void)
 {
        unregister_hdlc_protocol(&proto);
 }
 
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_raw_init);
+module_exit(hdlc_raw_exit);
 
 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
 MODULE_DESCRIPTION("Raw HDLC protocol support for generic HDLC");
index c70a518..9275962 100644 (file)
@@ -110,7 +110,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
 }
 
 
-static int __init mod_init(void)
+static int __init hdlc_eth_init(void)
 {
        register_hdlc_protocol(&proto);
        return 0;
@@ -118,14 +118,14 @@ static int __init mod_init(void)
 
 
 
-static void __exit mod_exit(void)
+static void __exit hdlc_eth_exit(void)
 {
        unregister_hdlc_protocol(&proto);
 }
 
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_eth_init);
+module_exit(hdlc_eth_exit);
 
 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
 MODULE_DESCRIPTION("Ethernet encapsulation support for generic HDLC");
index d2bf72b..9b7ebf8 100644 (file)
@@ -365,19 +365,19 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
        return -EINVAL;
 }
 
-static int __init mod_init(void)
+static int __init hdlc_x25_init(void)
 {
        register_hdlc_protocol(&proto);
        return 0;
 }
 
-static void __exit mod_exit(void)
+static void __exit hdlc_x25_exit(void)
 {
        unregister_hdlc_protocol(&proto);
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_x25_init);
+module_exit(hdlc_x25_exit);
 
 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
 MODULE_DESCRIPTION("X.25 protocol support for generic HDLC");
index 7fd2104..63ec140 100644 (file)
@@ -389,6 +389,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
        case WLAN_CIPHER_SUITE_WEP104:
                if (!mvif->wep_sta)
                        return -EOPNOTSUPP;
+               break;
        case WLAN_CIPHER_SUITE_TKIP:
        case WLAN_CIPHER_SUITE_CCMP:
        case WLAN_CIPHER_SUITE_CCMP_256:
index c2c4dc1..cd690c6 100644 (file)
@@ -931,7 +931,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
        ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
        if (ret) {
                dev_dbg(dev->mt76.dev, "Firmware is already download\n");
-               return -EIO;
+               goto fw_loaded;
        }
 
        ret = mt7921_load_patch(dev);
@@ -949,6 +949,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
                return -EIO;
        }
 
+fw_loaded:
        mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
 
 #ifdef CONFIG_PM
index 1df9595..514f2c1 100644 (file)
@@ -136,6 +136,29 @@ static struct ieee80211_supported_band band_5ghz = {
 /* Assigned at module init. Guaranteed locally-administered and unicast. */
 static u8 fake_router_bssid[ETH_ALEN] __ro_after_init = {};
 
+static void virt_wifi_inform_bss(struct wiphy *wiphy)
+{
+       u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
+       struct cfg80211_bss *informed_bss;
+       static const struct {
+               u8 tag;
+               u8 len;
+               u8 ssid[8];
+       } __packed ssid = {
+               .tag = WLAN_EID_SSID,
+               .len = 8,
+               .ssid = "VirtWifi",
+       };
+
+       informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
+                                          CFG80211_BSS_FTYPE_PRESP,
+                                          fake_router_bssid, tsf,
+                                          WLAN_CAPABILITY_ESS, 0,
+                                          (void *)&ssid, sizeof(ssid),
+                                          DBM_TO_MBM(-50), GFP_KERNEL);
+       cfg80211_put_bss(wiphy, informed_bss);
+}
+
 /* Called with the rtnl lock held. */
 static int virt_wifi_scan(struct wiphy *wiphy,
                          struct cfg80211_scan_request *request)
@@ -156,28 +179,13 @@ static int virt_wifi_scan(struct wiphy *wiphy,
 /* Acquires and releases the rdev BSS lock. */
 static void virt_wifi_scan_result(struct work_struct *work)
 {
-       struct {
-               u8 tag;
-               u8 len;
-               u8 ssid[8];
-       } __packed ssid = {
-               .tag = WLAN_EID_SSID, .len = 8, .ssid = "VirtWifi",
-       };
-       struct cfg80211_bss *informed_bss;
        struct virt_wifi_wiphy_priv *priv =
                container_of(work, struct virt_wifi_wiphy_priv,
                             scan_result.work);
        struct wiphy *wiphy = priv_to_wiphy(priv);
        struct cfg80211_scan_info scan_info = { .aborted = false };
-       u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
 
-       informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
-                                          CFG80211_BSS_FTYPE_PRESP,
-                                          fake_router_bssid, tsf,
-                                          WLAN_CAPABILITY_ESS, 0,
-                                          (void *)&ssid, sizeof(ssid),
-                                          DBM_TO_MBM(-50), GFP_KERNEL);
-       cfg80211_put_bss(wiphy, informed_bss);
+       virt_wifi_inform_bss(wiphy);
 
        /* Schedules work which acquires and releases the rtnl lock. */
        cfg80211_scan_done(priv->scan_request, &scan_info);
@@ -225,10 +233,12 @@ static int virt_wifi_connect(struct wiphy *wiphy, struct net_device *netdev,
        if (!could_schedule)
                return -EBUSY;
 
-       if (sme->bssid)
+       if (sme->bssid) {
                ether_addr_copy(priv->connect_requested_bss, sme->bssid);
-       else
+       } else {
+               virt_wifi_inform_bss(wiphy);
                eth_zero_addr(priv->connect_requested_bss);
+       }
 
        wiphy_debug(wiphy, "connect\n");
 
@@ -241,11 +251,13 @@ static void virt_wifi_connect_complete(struct work_struct *work)
        struct virt_wifi_netdev_priv *priv =
                container_of(work, struct virt_wifi_netdev_priv, connect.work);
        u8 *requested_bss = priv->connect_requested_bss;
-       bool has_addr = !is_zero_ether_addr(requested_bss);
        bool right_addr = ether_addr_equal(requested_bss, fake_router_bssid);
        u16 status = WLAN_STATUS_SUCCESS;
 
-       if (!priv->is_up || (has_addr && !right_addr))
+       if (is_zero_ether_addr(requested_bss))
+               requested_bss = NULL;
+
+       if (!priv->is_up || (requested_bss && !right_addr))
                status = WLAN_STATUS_UNSPECIFIED_FAILURE;
        else
                priv->is_connected = true;
index 46f76e8..0a472ce 100644 (file)
@@ -24,15 +24,7 @@ int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
                return -EIO;
        }
 
-       /* check for the interafce id
-        * if if_id 1 to 8 then create IP MUX channel sessions.
-        * To start MUX session from 0 as network interface id would start
-        * from 1 so map it to if_id = if_id - 1
-        */
-       if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END)
-               return ipc_mux_open_session(ipc_imem->mux, if_id - 1);
-
-       return -EINVAL;
+       return ipc_mux_open_session(ipc_imem->mux, if_id);
 }
 
 /* Release a net link to CP. */
@@ -41,7 +33,7 @@ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
 {
        if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
            if_id <= IP_MUX_SESSION_END)
-               ipc_mux_close_session(ipc_imem->mux, if_id - 1);
+               ipc_mux_close_session(ipc_imem->mux, if_id);
 }
 
 /* Tasklet call to do uplink transfer. */
@@ -83,13 +75,8 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
                goto out;
        }
 
-       if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END)
-               /* Route the UL packet through IP MUX Layer */
-               ret = ipc_mux_ul_trigger_encode(ipc_imem->mux,
-                                               if_id - 1, skb);
-       else
-               dev_err(ipc_imem->dev,
-                       "invalid if_id %d: ", if_id);
+       /* Route the UL packet through IP MUX Layer */
+       ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb);
 out:
        return ret;
 }
index fd356da..2007fe2 100644 (file)
 #define BOOT_CHECK_DEFAULT_TIMEOUT 400
 
 /* IP MUX channel range */
-#define IP_MUX_SESSION_START 1
-#define IP_MUX_SESSION_END 8
+#define IP_MUX_SESSION_START 0
+#define IP_MUX_SESSION_END 7
 
 /* Default IP MUX channel */
-#define IP_MUX_SESSION_DEFAULT 1
+#define IP_MUX_SESSION_DEFAULT 0
 
 /**
  * ipc_imem_sys_port_open - Open a port link to CP.
index e634ffc..562de27 100644 (file)
@@ -288,7 +288,7 @@ static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
        /* Pass the packet to the netif layer. */
        dest_skb->priority = service_class;
 
-       return ipc_wwan_receive(wwan, dest_skb, false, if_id + 1);
+       return ipc_wwan_receive(wwan, dest_skb, false, if_id);
 }
 
 /* Decode Flow Credit Table in the block */
index 2229d75..d12188f 100644 (file)
@@ -37,7 +37,7 @@ void ipc_uevent_send(struct device *dev, char *uevent)
 
        /* Store the device and event information */
        info->dev = dev;
-       snprintf(info->uevent, MAX_UEVENT_LEN, "%s: %s", dev_name(dev), uevent);
+       snprintf(info->uevent, MAX_UEVENT_LEN, "IOSM_EVENT=%s", uevent);
 
        /* Schedule uevent in process context using work queue */
        schedule_work(&info->work);
index c999c64..b2357ad 100644 (file)
@@ -107,6 +107,7 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb,
 {
        struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
        struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
+       unsigned int len = skb->len;
        int if_id = priv->if_id;
        int ret;
 
@@ -123,6 +124,8 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb,
 
        /* Return code of zero is success */
        if (ret == 0) {
+               netdev->stats.tx_packets++;
+               netdev->stats.tx_bytes += len;
                ret = NETDEV_TX_OK;
        } else if (ret == -EBUSY) {
                ret = NETDEV_TX_BUSY;
@@ -140,7 +143,8 @@ exit:
                        ret);
 
        dev_kfree_skb_any(skb);
-       return ret;
+       netdev->stats.tx_dropped++;
+       return NETDEV_TX_OK;
 }
 
 /* Ops structure for wwan net link */
@@ -158,6 +162,7 @@ static void ipc_wwan_setup(struct net_device *iosm_dev)
        iosm_dev->priv_flags |= IFF_NO_QUEUE;
 
        iosm_dev->type = ARPHRD_NONE;
+       iosm_dev->mtu = ETH_DATA_LEN;
        iosm_dev->min_mtu = ETH_MIN_MTU;
        iosm_dev->max_mtu = ETH_MAX_MTU;
 
@@ -252,8 +257,8 @@ int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg,
 
        skb->pkt_type = PACKET_HOST;
 
-       if (if_id < (IP_MUX_SESSION_START - 1) ||
-           if_id > (IP_MUX_SESSION_END - 1)) {
+       if (if_id < IP_MUX_SESSION_START ||
+           if_id > IP_MUX_SESSION_END) {
                ret = -EINVAL;
                goto free;
        }
index 3e16c31..674a81d 100644 (file)
@@ -984,6 +984,8 @@ static void wwan_create_default_link(struct wwan_device *wwandev,
                goto unlock;
        }
 
+       rtnl_configure_link(dev, NULL); /* Link initialized, notify new link */
+
 unlock:
        rtnl_unlock();
 
index a9864fc..dd27c85 100644 (file)
@@ -192,8 +192,7 @@ static void nfcsim_recv_wq(struct work_struct *work)
 
                if (!IS_ERR(skb))
                        dev_kfree_skb(skb);
-
-               skb = ERR_PTR(-ENODEV);
+               return;
        }
 
        dev->cb(dev->nfc_digital_dev, dev->arg, skb);
index eb5d7a5..e3e72b8 100644 (file)
@@ -423,7 +423,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
        if (IS_ERR(tfm)) {
                ret = PTR_ERR(tfm);
                dev_err(&fw_info->ndev->nfc_dev->dev,
-                       "Cannot allocate shash (code=%d)\n", ret);
+                       "Cannot allocate shash (code=%pe)\n", tfm);
                goto out;
        }
 
index 11779be..dfd9dec 100644 (file)
@@ -900,7 +900,10 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
                cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
        cmnd->write_zeroes.length =
                cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
-       cmnd->write_zeroes.control = 0;
+       if (nvme_ns_has_pi(ns))
+               cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
+       else
+               cmnd->write_zeroes.control = 0;
        return BLK_STS_OK;
 }
 
@@ -3807,6 +3810,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
 
 static void nvme_ns_remove(struct nvme_ns *ns)
 {
+       bool last_path = false;
+
        if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
                return;
 
@@ -3815,8 +3820,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 
        mutex_lock(&ns->ctrl->subsys->lock);
        list_del_rcu(&ns->siblings);
-       if (list_empty(&ns->head->list))
-               list_del_init(&ns->head->entry);
        mutex_unlock(&ns->ctrl->subsys->lock);
 
        synchronize_rcu(); /* guarantee not available in head->list */
@@ -3836,7 +3839,15 @@ static void nvme_ns_remove(struct nvme_ns *ns)
        list_del_init(&ns->list);
        up_write(&ns->ctrl->namespaces_rwsem);
 
-       nvme_mpath_check_last_path(ns);
+       /* Synchronize with nvme_init_ns_head() */
+       mutex_lock(&ns->head->subsys->lock);
+       if (list_empty(&ns->head->list)) {
+               list_del_init(&ns->head->entry);
+               last_path = true;
+       }
+       mutex_unlock(&ns->head->subsys->lock);
+       if (last_path)
+               nvme_mpath_shutdown_disk(ns->head);
        nvme_put_ns(ns);
 }
 
index 0ea5298..3f32c5e 100644 (file)
@@ -760,14 +760,21 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
 #endif
 }
 
-void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
 {
        if (!head->disk)
                return;
+       kblockd_schedule_work(&head->requeue_work);
        if (head->disk->flags & GENHD_FL_UP) {
                nvme_cdev_del(&head->cdev, &head->cdev_device);
                del_gendisk(head->disk);
        }
+}
+
+void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+{
+       if (!head->disk)
+               return;
        blk_set_queue_dying(head->disk->queue);
        /* make sure all pending bios are cleaned up */
        kblockd_schedule_work(&head->requeue_work);
index 18ef8dd..5cd1fa3 100644 (file)
@@ -716,14 +716,7 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
 void nvme_mpath_stop(struct nvme_ctrl *ctrl);
 bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
-
-static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
-{
-       struct nvme_ns_head *head = ns->head;
-
-       if (head->disk && list_empty(&head->list))
-               kblockd_schedule_work(&head->requeue_work);
-}
+void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
 
 static inline void nvme_trace_bio_complete(struct request *req)
 {
@@ -772,7 +765,7 @@ static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
 static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
 {
 }
-static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
 {
 }
 static inline void nvme_trace_bio_complete(struct request *req)
index d3c5086..5185208 100644 (file)
@@ -1554,6 +1554,28 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
        wmb(); /* ensure the first interrupt sees the initialization */
 }
 
+/*
+ * Try getting shutdown_lock while setting up IO queues.
+ */
+static int nvme_setup_io_queues_trylock(struct nvme_dev *dev)
+{
+       /*
+        * Give up if the lock is being held by nvme_dev_disable.
+        */
+       if (!mutex_trylock(&dev->shutdown_lock))
+               return -ENODEV;
+
+       /*
+        * Controller is in wrong state, fail early.
+        */
+       if (dev->ctrl.state != NVME_CTRL_CONNECTING) {
+               mutex_unlock(&dev->shutdown_lock);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
 {
        struct nvme_dev *dev = nvmeq->dev;
@@ -1582,8 +1604,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
                goto release_cq;
 
        nvmeq->cq_vector = vector;
-       nvme_init_queue(nvmeq, qid);
 
+       result = nvme_setup_io_queues_trylock(dev);
+       if (result)
+               return result;
+       nvme_init_queue(nvmeq, qid);
        if (!polled) {
                result = queue_request_irq(nvmeq);
                if (result < 0)
@@ -1591,10 +1616,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
        }
 
        set_bit(NVMEQ_ENABLED, &nvmeq->flags);
+       mutex_unlock(&dev->shutdown_lock);
        return result;
 
 release_sq:
        dev->online_queues--;
+       mutex_unlock(&dev->shutdown_lock);
        adapter_delete_sq(dev, qid);
 release_cq:
        adapter_delete_cq(dev, qid);
@@ -2167,7 +2194,18 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        if (nr_io_queues == 0)
                return 0;
 
-       clear_bit(NVMEQ_ENABLED, &adminq->flags);
+       /*
+        * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions
+        * from set to unset. If there is a window to it is truely freed,
+        * pci_free_irq_vectors() jumping into this window will crash.
+        * And take lock to avoid racing with pci_free_irq_vectors() in
+        * nvme_dev_disable() path.
+        */
+       result = nvme_setup_io_queues_trylock(dev);
+       if (result)
+               return result;
+       if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
+               pci_free_irq(pdev, 0, adminq);
 
        if (dev->cmb_use_sqes) {
                result = nvme_cmb_qdepth(dev, nr_io_queues,
@@ -2183,14 +2221,17 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
                result = nvme_remap_bar(dev, size);
                if (!result)
                        break;
-               if (!--nr_io_queues)
-                       return -ENOMEM;
+               if (!--nr_io_queues) {
+                       result = -ENOMEM;
+                       goto out_unlock;
+               }
        } while (1);
        adminq->q_db = dev->dbs;
 
  retry:
        /* Deregister the admin queue's interrupt */
-       pci_free_irq(pdev, 0, adminq);
+       if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
+               pci_free_irq(pdev, 0, adminq);
 
        /*
         * If we enable msix early due to not intx, disable it again before
@@ -2199,8 +2240,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        pci_free_irq_vectors(pdev);
 
        result = nvme_setup_irqs(dev, nr_io_queues);
-       if (result <= 0)
-               return -EIO;
+       if (result <= 0) {
+               result = -EIO;
+               goto out_unlock;
+       }
 
        dev->num_vecs = result;
        result = max(result - 1, 1);
@@ -2214,8 +2257,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
         */
        result = queue_request_irq(adminq);
        if (result)
-               return result;
+               goto out_unlock;
        set_bit(NVMEQ_ENABLED, &adminq->flags);
+       mutex_unlock(&dev->shutdown_lock);
 
        result = nvme_create_io_queues(dev);
        if (result || dev->online_queues < 2)
@@ -2224,6 +2268,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        if (dev->online_queues - 1 < dev->max_qid) {
                nr_io_queues = dev->online_queues - 1;
                nvme_disable_io_queues(dev);
+               result = nvme_setup_io_queues_trylock(dev);
+               if (result)
+                       return result;
                nvme_suspend_io_queues(dev);
                goto retry;
        }
@@ -2232,6 +2279,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
                                        dev->io_queues[HCTX_TYPE_READ],
                                        dev->io_queues[HCTX_TYPE_POLL]);
        return 0;
+out_unlock:
+       mutex_unlock(&dev->shutdown_lock);
+       return result;
 }
 
 static void nvme_del_queue_end(struct request *req, blk_status_t error)
@@ -2581,7 +2631,9 @@ static void nvme_reset_work(struct work_struct *work)
        bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
        int result;
 
-       if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
+       if (dev->ctrl.state != NVME_CTRL_RESETTING) {
+               dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
+                        dev->ctrl.state);
                result = -ENODEV;
                goto out;
        }
@@ -2962,7 +3014,6 @@ static void nvme_remove(struct pci_dev *pdev)
        if (!pci_device_is_present(pdev)) {
                nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
                nvme_dev_disable(dev, true);
-               nvme_dev_remove_admin(dev);
        }
 
        flush_work(&dev->ctrl.reset_work);
index 12acfe0..8cb15ee 100644 (file)
@@ -123,7 +123,6 @@ struct nvme_tcp_ctrl {
        struct blk_mq_tag_set   admin_tag_set;
        struct sockaddr_storage addr;
        struct sockaddr_storage src_addr;
-       struct net_device       *ndev;
        struct nvme_ctrl        ctrl;
 
        struct work_struct      err_work;
@@ -2533,8 +2532,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
        }
 
        if (opts->mask & NVMF_OPT_HOST_IFACE) {
-               ctrl->ndev = dev_get_by_name(&init_net, opts->host_iface);
-               if (!ctrl->ndev) {
+               if (!__dev_get_by_name(&init_net, opts->host_iface)) {
                        pr_err("invalid interface passed: %s\n",
                               opts->host_iface);
                        ret = -ENODEV;
index daaf700..35bac7a 100644 (file)
@@ -56,7 +56,7 @@ TRACE_EVENT(nvme_setup_cmd,
                __field(u8, fctype)
                __field(u16, cid)
                __field(u32, nsid)
-               __field(u64, metadata)
+               __field(bool, metadata)
                __array(u8, cdw10, 24)
            ),
            TP_fast_assign(
@@ -66,13 +66,13 @@ TRACE_EVENT(nvme_setup_cmd,
                __entry->flags = cmd->common.flags;
                __entry->cid = cmd->common.command_id;
                __entry->nsid = le32_to_cpu(cmd->common.nsid);
-               __entry->metadata = le64_to_cpu(cmd->common.metadata);
+               __entry->metadata = !!blk_integrity_rq(req);
                __entry->fctype = cmd->fabrics.fctype;
                __assign_disk_name(__entry->disk, req->rq_disk);
                memcpy(__entry->cdw10, &cmd->common.cdw10,
                        sizeof(__entry->cdw10));
            ),
-           TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+           TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%x, cmd=(%s %s)",
                      __entry->ctrl_id, __print_disk_name(__entry->disk),
                      __entry->qid, __entry->cid, __entry->nsid,
                      __entry->flags, __entry->metadata,
index 9bab073..d32fbfc 100644 (file)
@@ -230,8 +230,8 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
                        break;
                }
                /* If arch decided it can't, fall through... */
-#endif /* HAVE_PCI_MMAP */
                fallthrough;
+#endif /* HAVE_PCI_MMAP */
        default:
                ret = -EINVAL;
                break;
index b9da58e..3481479 100644 (file)
 #define AMD_PMC_RESULT_CMD_UNKNOWN           0xFE
 #define AMD_PMC_RESULT_FAILED                0xFF
 
+/* FCH SSC Registers */
+#define FCH_S0I3_ENTRY_TIME_L_OFFSET   0x30
+#define FCH_S0I3_ENTRY_TIME_H_OFFSET   0x34
+#define FCH_S0I3_EXIT_TIME_L_OFFSET    0x38
+#define FCH_S0I3_EXIT_TIME_H_OFFSET    0x3C
+#define FCH_SSC_MAPPING_SIZE           0x800
+#define FCH_BASE_PHY_ADDR_LOW          0xFED81100
+#define FCH_BASE_PHY_ADDR_HIGH         0x00000000
+
+/* SMU Message Definations */
+#define SMU_MSG_GETSMUVERSION          0x02
+#define SMU_MSG_LOG_GETDRAM_ADDR_HI    0x04
+#define SMU_MSG_LOG_GETDRAM_ADDR_LO    0x05
+#define SMU_MSG_LOG_START              0x06
+#define SMU_MSG_LOG_RESET              0x07
+#define SMU_MSG_LOG_DUMP_DATA          0x08
+#define SMU_MSG_GET_SUP_CONSTRAINTS    0x09
 /* List of supported CPU ids */
 #define AMD_CPU_ID_RV                  0x15D0
 #define AMD_CPU_ID_RN                  0x1630
 #define AMD_CPU_ID_PCO                 AMD_CPU_ID_RV
 #define AMD_CPU_ID_CZN                 AMD_CPU_ID_RN
+#define AMD_CPU_ID_YC                  0x14B5
 
-#define AMD_SMU_FW_VERSION             0x0
 #define PMC_MSG_DELAY_MIN_US           100
 #define RESPONSE_REGISTER_LOOP_MAX     200
 
+#define SOC_SUBSYSTEM_IP_MAX   12
+#define DELAY_MIN_US           2000
+#define DELAY_MAX_US           3000
 enum amd_pmc_def {
        MSG_TEST = 0x01,
        MSG_OS_HINT_PCO,
        MSG_OS_HINT_RN,
 };
 
+struct amd_pmc_bit_map {
+       const char *name;
+       u32 bit_mask;
+};
+
+static const struct amd_pmc_bit_map soc15_ip_blk[] = {
+       {"DISPLAY",     BIT(0)},
+       {"CPU",         BIT(1)},
+       {"GFX",         BIT(2)},
+       {"VDD",         BIT(3)},
+       {"ACP",         BIT(4)},
+       {"VCN",         BIT(5)},
+       {"ISP",         BIT(6)},
+       {"NBIO",        BIT(7)},
+       {"DF",          BIT(8)},
+       {"USB0",        BIT(9)},
+       {"USB1",        BIT(10)},
+       {"LAPIC",       BIT(11)},
+       {}
+};
+
 struct amd_pmc_dev {
        void __iomem *regbase;
-       void __iomem *smu_base;
+       void __iomem *smu_virt_addr;
+       void __iomem *fch_virt_addr;
        u32 base_addr;
        u32 cpu_id;
+       u32 active_ips;
        struct device *dev;
+       struct mutex lock; /* generic mutex lock */
 #if IS_ENABLED(CONFIG_DEBUG_FS)
        struct dentry *dbgfs_dir;
 #endif /* CONFIG_DEBUG_FS */
 };
 
 static struct amd_pmc_dev pmc;
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret);
 
 static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
 {
@@ -85,18 +130,77 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
        iowrite32(val, dev->regbase + reg_offset);
 }
 
+struct smu_metrics {
+       u32 table_version;
+       u32 hint_count;
+       u32 s0i3_cyclecount;
+       u32 timein_s0i2;
+       u64 timeentering_s0i3_lastcapture;
+       u64 timeentering_s0i3_totaltime;
+       u64 timeto_resume_to_os_lastcapture;
+       u64 timeto_resume_to_os_totaltime;
+       u64 timein_s0i3_lastcapture;
+       u64 timein_s0i3_totaltime;
+       u64 timein_swdrips_lastcapture;
+       u64 timein_swdrips_totaltime;
+       u64 timecondition_notmet_lastcapture[SOC_SUBSYSTEM_IP_MAX];
+       u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX];
+} __packed;
+
 #ifdef CONFIG_DEBUG_FS
 static int smu_fw_info_show(struct seq_file *s, void *unused)
 {
        struct amd_pmc_dev *dev = s->private;
-       u32 value;
+       struct smu_metrics table;
+       int idx;
+
+       if (dev->cpu_id == AMD_CPU_ID_PCO)
+               return -EINVAL;
+
+       memcpy_fromio(&table, dev->smu_virt_addr, sizeof(struct smu_metrics));
+
+       seq_puts(s, "\n=== SMU Statistics ===\n");
+       seq_printf(s, "Table Version: %d\n", table.table_version);
+       seq_printf(s, "Hint Count: %d\n", table.hint_count);
+       seq_printf(s, "S0i3 Cycle Count: %d\n", table.s0i3_cyclecount);
+       seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture);
+       seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture);
+
+       seq_puts(s, "\n=== Active time (in us) ===\n");
+       for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) {
+               if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
+                       seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
+                                  table.timecondition_notmet_lastcapture[idx]);
+       }
 
-       value = ioread32(dev->smu_base + AMD_SMU_FW_VERSION);
-       seq_printf(s, "SMU FW Info: %x\n", value);
        return 0;
 }
 DEFINE_SHOW_ATTRIBUTE(smu_fw_info);
 
+static int s0ix_stats_show(struct seq_file *s, void *unused)
+{
+       struct amd_pmc_dev *dev = s->private;
+       u64 entry_time, exit_time, residency;
+
+       entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET);
+       entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET);
+
+       exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET);
+       exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET);
+
+       /* It's in 48MHz. We need to convert it */
+       residency = exit_time - entry_time;
+       do_div(residency, 48);
+
+       seq_puts(s, "=== S0ix statistics ===\n");
+       seq_printf(s, "S0ix Entry Time: %lld\n", entry_time);
+       seq_printf(s, "S0ix Exit Time: %lld\n", exit_time);
+       seq_printf(s, "Residency Time: %lld\n", residency);
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
+
 static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
 {
        debugfs_remove_recursive(dev->dbgfs_dir);
@@ -107,6 +211,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
        dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
        debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev,
                            &smu_fw_info_fops);
+       debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev,
+                           &s0ix_stats_fops);
 }
 #else
 static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
@@ -118,6 +224,32 @@ static inline void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
 }
 #endif /* CONFIG_DEBUG_FS */
 
+static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
+{
+       u32 phys_addr_low, phys_addr_hi;
+       u64 smu_phys_addr;
+
+       if (dev->cpu_id == AMD_CPU_ID_PCO)
+               return -EINVAL;
+
+       /* Get Active devices list from SMU */
+       amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, 1);
+
+       /* Get dram address */
+       amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, 1);
+       amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, 1);
+       smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
+
+       dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, sizeof(struct smu_metrics));
+       if (!dev->smu_virt_addr)
+               return -ENOMEM;
+
+       /* Start the logging */
+       amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, 0);
+
+       return 0;
+}
+
 static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
 {
        u32 value;
@@ -132,19 +264,19 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
        dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value);
 }
 
-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret)
 {
        int rc;
-       u8 msg;
        u32 val;
 
+       mutex_lock(&dev->lock);
        /* Wait until we get a valid response */
        rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
-                               val, val > 0, PMC_MSG_DELAY_MIN_US,
+                               val, val != 0, PMC_MSG_DELAY_MIN_US,
                                PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
        if (rc) {
                dev_err(dev->dev, "failed to talk to SMU\n");
-               return rc;
+               goto out_unlock;
        }
 
        /* Write zero to response register */
@@ -154,34 +286,91 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set)
        amd_pmc_reg_write(dev, AMD_PMC_REGISTER_ARGUMENT, set);
 
        /* Write message ID to message ID register */
-       msg = (dev->cpu_id == AMD_CPU_ID_RN) ? MSG_OS_HINT_RN : MSG_OS_HINT_PCO;
        amd_pmc_reg_write(dev, AMD_PMC_REGISTER_MESSAGE, msg);
-       return 0;
+
+       /* Wait until we get a valid response */
+       rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMC_REGISTER_RESPONSE,
+                               val, val != 0, PMC_MSG_DELAY_MIN_US,
+                               PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+       if (rc) {
+               dev_err(dev->dev, "SMU response timed out\n");
+               goto out_unlock;
+       }
+
+       switch (val) {
+       case AMD_PMC_RESULT_OK:
+               if (ret) {
+                       /* PMFW may take longer time to return back the data */
+                       usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
+                       *data = amd_pmc_reg_read(dev, AMD_PMC_REGISTER_ARGUMENT);
+               }
+               break;
+       case AMD_PMC_RESULT_CMD_REJECT_BUSY:
+               dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
+               rc = -EBUSY;
+               goto out_unlock;
+       case AMD_PMC_RESULT_CMD_UNKNOWN:
+               dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
+               rc = -EINVAL;
+               goto out_unlock;
+       case AMD_PMC_RESULT_CMD_REJECT_PREREQ:
+       case AMD_PMC_RESULT_FAILED:
+       default:
+               dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
+               rc = -EIO;
+               goto out_unlock;
+       }
+
+out_unlock:
+       mutex_unlock(&dev->lock);
+       amd_pmc_dump_registers(dev);
+       return rc;
+}
+
+static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
+{
+       switch (dev->cpu_id) {
+       case AMD_CPU_ID_PCO:
+               return MSG_OS_HINT_PCO;
+       case AMD_CPU_ID_RN:
+       case AMD_CPU_ID_YC:
+               return MSG_OS_HINT_RN;
+       }
+       return -EINVAL;
 }
 
 static int __maybe_unused amd_pmc_suspend(struct device *dev)
 {
        struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
        int rc;
+       u8 msg;
+
+       /* Reset and Start SMU logging - to monitor the s0i3 stats */
+       amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_RESET, 0);
+       amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_START, 0);
 
-       rc = amd_pmc_send_cmd(pdev, 1);
+       msg = amd_pmc_get_os_hint(pdev);
+       rc = amd_pmc_send_cmd(pdev, 1, NULL, msg, 0);
        if (rc)
                dev_err(pdev->dev, "suspend failed\n");
 
-       amd_pmc_dump_registers(pdev);
-       return 0;
+       return rc;
 }
 
 static int __maybe_unused amd_pmc_resume(struct device *dev)
 {
        struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
        int rc;
+       u8 msg;
+
+       /* Let SMU know that we are looking for stats */
+       amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
 
-       rc = amd_pmc_send_cmd(pdev, 0);
+       msg = amd_pmc_get_os_hint(pdev);
+       rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
        if (rc)
                dev_err(pdev->dev, "resume failed\n");
 
-       amd_pmc_dump_registers(pdev);
        return 0;
 }
 
@@ -190,6 +379,7 @@ static const struct dev_pm_ops amd_pmc_pm_ops = {
 };
 
 static const struct pci_device_id pmc_pci_ids[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
@@ -201,9 +391,8 @@ static int amd_pmc_probe(struct platform_device *pdev)
 {
        struct amd_pmc_dev *dev = &pmc;
        struct pci_dev *rdev;
-       u32 base_addr_lo;
-       u32 base_addr_hi;
-       u64 base_addr;
+       u32 base_addr_lo, base_addr_hi;
+       u64 base_addr, fch_phys_addr;
        int err;
        u32 val;
 
@@ -248,16 +437,25 @@ static int amd_pmc_probe(struct platform_device *pdev)
        pci_dev_put(rdev);
        base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
 
-       dev->smu_base = devm_ioremap(dev->dev, base_addr, AMD_PMC_MAPPING_SIZE);
-       if (!dev->smu_base)
-               return -ENOMEM;
-
        dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET,
                                    AMD_PMC_MAPPING_SIZE);
        if (!dev->regbase)
                return -ENOMEM;
 
-       amd_pmc_dump_registers(dev);
+       mutex_init(&dev->lock);
+
+       /* Use FCH registers to get the S0ix stats */
+       base_addr_lo = FCH_BASE_PHY_ADDR_LOW;
+       base_addr_hi = FCH_BASE_PHY_ADDR_HIGH;
+       fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+       dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE);
+       if (!dev->fch_virt_addr)
+               return -ENOMEM;
+
+       /* Use SMU to get the s0i3 debug stats */
+       err = amd_pmc_setup_smu_logging(dev);
+       if (err)
+               dev_err(dev->dev, "SMU debugging info not supported on this platform\n");
 
        platform_set_drvdata(pdev, dev);
        amd_pmc_dbgfs_register(dev);
@@ -269,11 +467,14 @@ static int amd_pmc_remove(struct platform_device *pdev)
        struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
 
        amd_pmc_dbgfs_unregister(dev);
+       mutex_destroy(&dev->lock);
        return 0;
 }
 
 static const struct acpi_device_id amd_pmc_acpi_ids[] = {
        {"AMDI0005", 0},
+       {"AMDI0006", 0},
+       {"AMDI0007", 0},
        {"AMD0004", 0},
        { }
 };
index 5529d7b..fbb224a 100644 (file)
@@ -141,6 +141,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
 
 static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
index 078648a..e5fbe01 100644 (file)
@@ -25,6 +25,7 @@ static const struct acpi_device_id intel_hid_ids[] = {
        {"INT33D5", 0},
        {"INTC1051", 0},
        {"INTC1054", 0},
+       {"INTC1070", 0},
        {"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
index 3671b5d..6cfed44 100644 (file)
@@ -571,6 +571,11 @@ static ssize_t current_value_store(struct kobject *kobj,
        else
                ret = tlmi_save_bios_settings("");
 
+       if (!ret && !tlmi_priv.pending_changes) {
+               tlmi_priv.pending_changes = true;
+               /* let userland know it may need to check reboot pending again */
+               kobject_uevent(&tlmi_priv.class_dev->kobj, KOBJ_CHANGE);
+       }
 out:
        kfree(auth_str);
        kfree(set_str);
@@ -647,6 +652,14 @@ static struct kobj_type tlmi_pwd_setting_ktype = {
        .sysfs_ops      = &tlmi_kobj_sysfs_ops,
 };
 
+static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr,
+                                  char *buf)
+{
+       return sprintf(buf, "%d\n", tlmi_priv.pending_changes);
+}
+
+static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot);
+
 /* ---- Initialisation --------------------------------------------------------- */
 static void tlmi_release_attr(void)
 {
@@ -659,6 +672,7 @@ static void tlmi_release_attr(void)
                        kobject_put(&tlmi_priv.setting[i]->kobj);
                }
        }
+       sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr);
        kset_unregister(tlmi_priv.attribute_kset);
 
        /* Authentication structures */
@@ -709,8 +723,8 @@ static int tlmi_sysfs_init(void)
 
                /* Build attribute */
                tlmi_priv.setting[i]->kobj.kset = tlmi_priv.attribute_kset;
-               ret = kobject_init_and_add(&tlmi_priv.setting[i]->kobj, &tlmi_attr_setting_ktype,
-                               NULL, "%s", tlmi_priv.setting[i]->display_name);
+               ret = kobject_add(&tlmi_priv.setting[i]->kobj, NULL,
+                                 "%s", tlmi_priv.setting[i]->display_name);
                if (ret)
                        goto fail_create_attr;
 
@@ -719,6 +733,10 @@ static int tlmi_sysfs_init(void)
                        goto fail_create_attr;
        }
 
+       ret = sysfs_create_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr);
+       if (ret)
+               goto fail_create_attr;
+
        /* Create authentication entries */
        tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL,
                                                                &tlmi_priv.class_dev->kobj);
@@ -727,8 +745,7 @@ static int tlmi_sysfs_init(void)
                goto fail_create_attr;
        }
        tlmi_priv.pwd_admin->kobj.kset = tlmi_priv.authentication_kset;
-       ret = kobject_init_and_add(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype,
-                       NULL, "%s", "Admin");
+       ret = kobject_add(&tlmi_priv.pwd_admin->kobj, NULL, "%s", "Admin");
        if (ret)
                goto fail_create_attr;
 
@@ -737,8 +754,7 @@ static int tlmi_sysfs_init(void)
                goto fail_create_attr;
 
        tlmi_priv.pwd_power->kobj.kset = tlmi_priv.authentication_kset;
-       ret = kobject_init_and_add(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype,
-                       NULL, "%s", "System");
+       ret = kobject_add(&tlmi_priv.pwd_power->kobj, NULL, "%s", "System");
        if (ret)
                goto fail_create_attr;
 
@@ -818,6 +834,7 @@ static int tlmi_analyze(void)
                                pr_info("Error retrieving possible values for %d : %s\n",
                                                i, setting->display_name);
                }
+               kobject_init(&setting->kobj, &tlmi_attr_setting_ktype);
                tlmi_priv.setting[i] = setting;
                tlmi_priv.settings_count++;
                kfree(item);
@@ -844,10 +861,12 @@ static int tlmi_analyze(void)
        if (pwdcfg.password_state & TLMI_PAP_PWD)
                tlmi_priv.pwd_admin->valid = true;
 
+       kobject_init(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype);
+
        tlmi_priv.pwd_power = kzalloc(sizeof(struct tlmi_pwd_setting), GFP_KERNEL);
        if (!tlmi_priv.pwd_power) {
                ret = -ENOMEM;
-               goto fail_clear_attr;
+               goto fail_free_pwd_admin;
        }
        strscpy(tlmi_priv.pwd_power->kbdlang, "us", TLMI_LANG_MAXLEN);
        tlmi_priv.pwd_power->encoding = TLMI_ENCODING_ASCII;
@@ -859,11 +878,19 @@ static int tlmi_analyze(void)
        if (pwdcfg.password_state & TLMI_POP_PWD)
                tlmi_priv.pwd_power->valid = true;
 
+       kobject_init(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype);
+
        return 0;
 
+fail_free_pwd_admin:
+       kfree(tlmi_priv.pwd_admin);
 fail_clear_attr:
-       for (i = 0; i < TLMI_SETTINGS_COUNT; ++i)
-               kfree(tlmi_priv.setting[i]);
+       for (i = 0; i < TLMI_SETTINGS_COUNT; ++i) {
+               if (tlmi_priv.setting[i]) {
+                       kfree(tlmi_priv.setting[i]->possible_values);
+                       kfree(tlmi_priv.setting[i]);
+               }
+       }
        return ret;
 }
 
index 6fa8da7..eb59884 100644 (file)
@@ -60,6 +60,7 @@ struct think_lmi {
        bool can_get_bios_selections;
        bool can_set_bios_password;
        bool can_get_password_settings;
+       bool pending_changes;
 
        struct tlmi_attr_setting *setting[TLMI_SETTINGS_COUNT];
        struct device *class_dev;
index b010e4c..11c60a2 100644 (file)
@@ -78,7 +78,7 @@ static int wl_add(struct acpi_device *device)
 
        err = wireless_input_setup();
        if (err)
-               pr_err("Failed to setup hp wireless hotkeys\n");
+               pr_err("Failed to setup wireless hotkeys\n");
 
        return err;
 }
index 3d45ed0..a6ebdb2 100644 (file)
@@ -1728,6 +1728,7 @@ static void ab8500_fg_algorithm_calibrate(struct ab8500_fg *di)
                break;
        case AB8500_FG_CALIB_WAIT:
                dev_dbg(di->dev, "Calibration WFI\n");
+               break;
        default:
                break;
        }
@@ -2224,6 +2225,7 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
                                        queue_work(di->fg_wq, &di->fg_work);
                                        break;
                                }
+                               break;
                        default:
                                break;
                        }
index a17849b..b72826c 100644 (file)
@@ -1150,6 +1150,7 @@ static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
                                default:
                                        break;
                                }
+                               break;
                        default:
                                break;
                        }
index 8673d17..28a6fe3 100644 (file)
@@ -3,7 +3,7 @@
 # Makefile for PTP 1588 clock support.
 #
 
-ptp-y                                  := ptp_clock.o ptp_chardev.o ptp_sysfs.o
+ptp-y                                  := ptp_clock.o ptp_chardev.o ptp_sysfs.o ptp_vclock.o
 ptp_kvm-$(CONFIG_X86)                  := ptp_kvm_x86.o ptp_kvm_common.o
 ptp_kvm-$(CONFIG_HAVE_ARM_SMCCC)       := ptp_kvm_arm.o ptp_kvm_common.o
 obj-$(CONFIG_PTP_1588_CLOCK)           += ptp.o
index a23a37a..4dfc52e 100644 (file)
 #define PTP_PPS_EVENT PPS_CAPTUREASSERT
 #define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
 
+struct class *ptp_class;
+
 /* private globals */
 
 static dev_t ptp_devt;
-static struct class *ptp_class;
 
 static DEFINE_IDA(ptp_clocks_map);
 
@@ -76,6 +77,11 @@ static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp
 {
        struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
 
+       if (ptp_vclock_in_use(ptp)) {
+               pr_err("ptp: virtual clock in use\n");
+               return -EBUSY;
+       }
+
        return  ptp->info->settime64(ptp->info, tp);
 }
 
@@ -97,6 +103,11 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
        struct ptp_clock_info *ops;
        int err = -EOPNOTSUPP;
 
+       if (ptp_vclock_in_use(ptp)) {
+               pr_err("ptp: virtual clock in use\n");
+               return -EBUSY;
+       }
+
        ops = ptp->info;
 
        if (tx->modes & ADJ_SETOFFSET) {
@@ -161,6 +172,7 @@ static void ptp_clock_release(struct device *dev)
        ptp_cleanup_pin_groups(ptp);
        mutex_destroy(&ptp->tsevq_mux);
        mutex_destroy(&ptp->pincfg_mux);
+       mutex_destroy(&ptp->n_vclocks_mux);
        ida_simple_remove(&ptp_clocks_map, ptp->index);
        kfree(ptp);
 }
@@ -185,6 +197,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
 {
        struct ptp_clock *ptp;
        int err = 0, index, major = MAJOR(ptp_devt);
+       size_t size;
 
        if (info->n_alarm > PTP_MAX_ALARMS)
                return ERR_PTR(-EINVAL);
@@ -208,6 +221,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
        spin_lock_init(&ptp->tsevq.lock);
        mutex_init(&ptp->tsevq_mux);
        mutex_init(&ptp->pincfg_mux);
+       mutex_init(&ptp->n_vclocks_mux);
        init_waitqueue_head(&ptp->tsev_wq);
 
        if (ptp->info->do_aux_work) {
@@ -218,7 +232,22 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
                        pr_err("failed to create ptp aux_worker %d\n", err);
                        goto kworker_err;
                }
-               ptp->pps_source->lookup_cookie = ptp;
+       }
+
+       /* PTP virtual clock is being registered under physical clock */
+       if (parent && parent->class && parent->class->name &&
+           strcmp(parent->class->name, "ptp") == 0)
+               ptp->is_virtual_clock = true;
+
+       if (!ptp->is_virtual_clock) {
+               ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS;
+
+               size = sizeof(int) * ptp->max_vclocks;
+               ptp->vclock_index = kzalloc(size, GFP_KERNEL);
+               if (!ptp->vclock_index) {
+                       err = -ENOMEM;
+                       goto no_mem_for_vclocks;
+               }
        }
 
        err = ptp_populate_pin_groups(ptp);
@@ -238,6 +267,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
                        pr_err("failed to register pps source\n");
                        goto no_pps;
                }
+               ptp->pps_source->lookup_cookie = ptp;
        }
 
        /* Initialize a new device of our class in our clock structure. */
@@ -265,11 +295,14 @@ no_clock:
 no_pps:
        ptp_cleanup_pin_groups(ptp);
 no_pin_groups:
+       kfree(ptp->vclock_index);
+no_mem_for_vclocks:
        if (ptp->kworker)
                kthread_destroy_worker(ptp->kworker);
 kworker_err:
        mutex_destroy(&ptp->tsevq_mux);
        mutex_destroy(&ptp->pincfg_mux);
+       mutex_destroy(&ptp->n_vclocks_mux);
        ida_simple_remove(&ptp_clocks_map, index);
 no_slot:
        kfree(ptp);
@@ -280,9 +313,16 @@ EXPORT_SYMBOL(ptp_clock_register);
 
 int ptp_clock_unregister(struct ptp_clock *ptp)
 {
+       if (ptp_vclock_in_use(ptp)) {
+               pr_err("ptp: virtual clock in use\n");
+               return -EBUSY;
+       }
+
        ptp->defunct = 1;
        wake_up_interruptible(&ptp->tsev_wq);
 
+       kfree(ptp->vclock_index);
+
        if (ptp->kworker) {
                kthread_cancel_delayed_work_sync(&ptp->aux_work);
                kthread_destroy_worker(ptp->kworker);
index 6b97155..dba6be4 100644 (file)
@@ -18,6 +18,7 @@
 
 #define PTP_MAX_TIMESTAMPS 128
 #define PTP_BUF_TIMESTAMPS 30
+#define PTP_DEFAULT_MAX_VCLOCKS 20
 
 struct timestamp_event_queue {
        struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS];
@@ -46,6 +47,24 @@ struct ptp_clock {
        const struct attribute_group *pin_attr_groups[2];
        struct kthread_worker *kworker;
        struct kthread_delayed_work aux_work;
+       unsigned int max_vclocks;
+       unsigned int n_vclocks;
+       int *vclock_index;
+       struct mutex n_vclocks_mux; /* protect concurrent n_vclocks access */
+       bool is_virtual_clock;
+};
+
+#define info_to_vclock(d) container_of((d), struct ptp_vclock, info)
+#define cc_to_vclock(d) container_of((d), struct ptp_vclock, cc)
+#define dw_to_vclock(d) container_of((d), struct ptp_vclock, refresh_work)
+
+struct ptp_vclock {
+       struct ptp_clock *pclock;
+       struct ptp_clock_info info;
+       struct ptp_clock *clock;
+       struct cyclecounter cc;
+       struct timecounter tc;
+       spinlock_t lock;        /* protects tc/cc */
 };
 
 /*
@@ -61,6 +80,24 @@ static inline int queue_cnt(struct timestamp_event_queue *q)
        return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
 }
 
+/* Check if ptp virtual clock is in use */
+static inline bool ptp_vclock_in_use(struct ptp_clock *ptp)
+{
+       bool in_use = false;
+
+       if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
+               return true;
+
+       if (!ptp->is_virtual_clock && ptp->n_vclocks)
+               in_use = true;
+
+       mutex_unlock(&ptp->n_vclocks_mux);
+
+       return in_use;
+}
+
+extern struct class *ptp_class;
+
 /*
  * see ptp_chardev.c
  */
@@ -89,4 +126,6 @@ extern const struct attribute_group *ptp_groups[];
 int ptp_populate_pin_groups(struct ptp_clock *ptp);
 void ptp_cleanup_pin_groups(struct ptp_clock *ptp);
 
+struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock);
+void ptp_vclock_unregister(struct ptp_vclock *vclock);
 #endif
index be076a9..b3d96b7 100644 (file)
@@ -3,6 +3,7 @@
  * PTP 1588 clock support - sysfs interface.
  *
  * Copyright (C) 2010 OMICRON electronics GmbH
+ * Copyright 2021 NXP
  */
 #include <linux/capability.h>
 #include <linux/slab.h>
@@ -148,6 +149,159 @@ out:
 }
 static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store);
 
+static int unregister_vclock(struct device *dev, void *data)
+{
+       struct ptp_clock *ptp = dev_get_drvdata(dev);
+       struct ptp_clock_info *info = ptp->info;
+       struct ptp_vclock *vclock;
+       u8 *num = data;
+
+       vclock = info_to_vclock(info);
+       dev_info(dev->parent, "delete virtual clock ptp%d\n",
+                vclock->clock->index);
+
+       ptp_vclock_unregister(vclock);
+       (*num)--;
+
+       /* For break. Not error. */
+       if (*num == 0)
+               return -EINVAL;
+
+       return 0;
+}
+
+static ssize_t n_vclocks_show(struct device *dev,
+                             struct device_attribute *attr, char *page)
+{
+       struct ptp_clock *ptp = dev_get_drvdata(dev);
+       ssize_t size;
+
+       if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
+               return -ERESTARTSYS;
+
+       size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->n_vclocks);
+
+       mutex_unlock(&ptp->n_vclocks_mux);
+
+       return size;
+}
+
+static ssize_t n_vclocks_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t count)
+{
+       struct ptp_clock *ptp = dev_get_drvdata(dev);
+       struct ptp_vclock *vclock;
+       int err = -EINVAL;
+       u32 num, i;
+
+       if (kstrtou32(buf, 0, &num))
+               return err;
+
+       if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
+               return -ERESTARTSYS;
+
+       if (num > ptp->max_vclocks) {
+               dev_err(dev, "max value is %d\n", ptp->max_vclocks);
+               goto out;
+       }
+
+       /* Need to create more vclocks */
+       if (num > ptp->n_vclocks) {
+               for (i = 0; i < num - ptp->n_vclocks; i++) {
+                       vclock = ptp_vclock_register(ptp);
+                       if (!vclock)
+                               goto out;
+
+                       *(ptp->vclock_index + ptp->n_vclocks + i) =
+                               vclock->clock->index;
+
+                       dev_info(dev, "new virtual clock ptp%d\n",
+                                vclock->clock->index);
+               }
+       }
+
+       /* Need to delete vclocks */
+       if (num < ptp->n_vclocks) {
+               i = ptp->n_vclocks - num;
+               device_for_each_child_reverse(dev, &i,
+                                             unregister_vclock);
+
+               for (i = 1; i <= ptp->n_vclocks - num; i++)
+                       *(ptp->vclock_index + ptp->n_vclocks - i) = -1;
+       }
+
+       if (num == 0)
+               dev_info(dev, "only physical clock in use now\n");
+       else
+               dev_info(dev, "guarantee physical clock free running\n");
+
+       ptp->n_vclocks = num;
+       mutex_unlock(&ptp->n_vclocks_mux);
+
+       return count;
+out:
+       mutex_unlock(&ptp->n_vclocks_mux);
+       return err;
+}
+static DEVICE_ATTR_RW(n_vclocks);
+
+static ssize_t max_vclocks_show(struct device *dev,
+                               struct device_attribute *attr, char *page)
+{
+       struct ptp_clock *ptp = dev_get_drvdata(dev);
+       ssize_t size;
+
+       size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->max_vclocks);
+
+       return size;
+}
+
+static ssize_t max_vclocks_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct ptp_clock *ptp = dev_get_drvdata(dev);
+       unsigned int *vclock_index;
+       int err = -EINVAL;
+       size_t size;
+       u32 max;
+
+       if (kstrtou32(buf, 0, &max) || max == 0)
+               return -EINVAL;
+
+       if (max == ptp->max_vclocks)
+               return count;
+
+       if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
+               return -ERESTARTSYS;
+
+       if (max < ptp->n_vclocks)
+               goto out;
+
+       size = sizeof(int) * max;
+       vclock_index = kzalloc(size, GFP_KERNEL);
+       if (!vclock_index) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       size = sizeof(int) * ptp->n_vclocks;
+       memcpy(vclock_index, ptp->vclock_index, size);
+
+       kfree(ptp->vclock_index);
+       ptp->vclock_index = vclock_index;
+       ptp->max_vclocks = max;
+
+       mutex_unlock(&ptp->n_vclocks_mux);
+
+       return count;
+out:
+       mutex_unlock(&ptp->n_vclocks_mux);
+       return err;
+}
+static DEVICE_ATTR_RW(max_vclocks);
+
 static struct attribute *ptp_attrs[] = {
        &dev_attr_clock_name.attr,
 
@@ -162,6 +316,8 @@ static struct attribute *ptp_attrs[] = {
        &dev_attr_fifo.attr,
        &dev_attr_period.attr,
        &dev_attr_pps_enable.attr,
+       &dev_attr_n_vclocks.attr,
+       &dev_attr_max_vclocks.attr,
        NULL
 };
 
@@ -183,6 +339,10 @@ static umode_t ptp_is_attribute_visible(struct kobject *kobj,
        } else if (attr == &dev_attr_pps_enable.attr) {
                if (!info->pps)
                        mode = 0;
+       } else if (attr == &dev_attr_n_vclocks.attr ||
+                  attr == &dev_attr_max_vclocks.attr) {
+               if (ptp->is_virtual_clock)
+                       mode = 0;
        }
 
        return mode;
diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c
new file mode 100644 (file)
index 0000000..e0f87c5
--- /dev/null
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PTP virtual clock driver
+ *
+ * Copyright 2021 NXP
+ */
+#include <linux/slab.h>
+#include "ptp_private.h"
+
+#define PTP_VCLOCK_CC_SHIFT            31
+#define PTP_VCLOCK_CC_MULT             (1 << PTP_VCLOCK_CC_SHIFT)
+#define PTP_VCLOCK_FADJ_SHIFT          9
+#define PTP_VCLOCK_FADJ_DENOMINATOR    15625ULL
+#define PTP_VCLOCK_REFRESH_INTERVAL    (HZ * 2)
+
+static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+       struct ptp_vclock *vclock = info_to_vclock(ptp);
+       unsigned long flags;
+       s64 adj;
+
+       adj = (s64)scaled_ppm << PTP_VCLOCK_FADJ_SHIFT;
+       adj = div_s64(adj, PTP_VCLOCK_FADJ_DENOMINATOR);
+
+       spin_lock_irqsave(&vclock->lock, flags);
+       timecounter_read(&vclock->tc);
+       vclock->cc.mult = PTP_VCLOCK_CC_MULT + adj;
+       spin_unlock_irqrestore(&vclock->lock, flags);
+
+       return 0;
+}
+
+static int ptp_vclock_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+       struct ptp_vclock *vclock = info_to_vclock(ptp);
+       unsigned long flags;
+
+       spin_lock_irqsave(&vclock->lock, flags);
+       timecounter_adjtime(&vclock->tc, delta);
+       spin_unlock_irqrestore(&vclock->lock, flags);
+
+       return 0;
+}
+
+static int ptp_vclock_gettime(struct ptp_clock_info *ptp,
+                             struct timespec64 *ts)
+{
+       struct ptp_vclock *vclock = info_to_vclock(ptp);
+       unsigned long flags;
+       u64 ns;
+
+       spin_lock_irqsave(&vclock->lock, flags);
+       ns = timecounter_read(&vclock->tc);
+       spin_unlock_irqrestore(&vclock->lock, flags);
+       *ts = ns_to_timespec64(ns);
+
+       return 0;
+}
+
+static int ptp_vclock_settime(struct ptp_clock_info *ptp,
+                             const struct timespec64 *ts)
+{
+       struct ptp_vclock *vclock = info_to_vclock(ptp);
+       u64 ns = timespec64_to_ns(ts);
+       unsigned long flags;
+
+       spin_lock_irqsave(&vclock->lock, flags);
+       timecounter_init(&vclock->tc, &vclock->cc, ns);
+       spin_unlock_irqrestore(&vclock->lock, flags);
+
+       return 0;
+}
+
+static long ptp_vclock_refresh(struct ptp_clock_info *ptp)
+{
+       struct ptp_vclock *vclock = info_to_vclock(ptp);
+       struct timespec64 ts;
+
+       ptp_vclock_gettime(&vclock->info, &ts);
+
+       return PTP_VCLOCK_REFRESH_INTERVAL;
+}
+
+static const struct ptp_clock_info ptp_vclock_info = {
+       .owner          = THIS_MODULE,
+       .name           = "ptp virtual clock",
+       /* The maximum ppb value that long scaled_ppm can support */
+       .max_adj        = 32767999,
+       .adjfine        = ptp_vclock_adjfine,
+       .adjtime        = ptp_vclock_adjtime,
+       .gettime64      = ptp_vclock_gettime,
+       .settime64      = ptp_vclock_settime,
+       .do_aux_work    = ptp_vclock_refresh,
+};
+
+static u64 ptp_vclock_read(const struct cyclecounter *cc)
+{
+       struct ptp_vclock *vclock = cc_to_vclock(cc);
+       struct ptp_clock *ptp = vclock->pclock;
+       struct timespec64 ts = {};
+
+       if (ptp->info->gettimex64)
+               ptp->info->gettimex64(ptp->info, &ts, NULL);
+       else
+               ptp->info->gettime64(ptp->info, &ts);
+
+       return timespec64_to_ns(&ts);
+}
+
+static const struct cyclecounter ptp_vclock_cc = {
+       .read   = ptp_vclock_read,
+       .mask   = CYCLECOUNTER_MASK(32),
+       .mult   = PTP_VCLOCK_CC_MULT,
+       .shift  = PTP_VCLOCK_CC_SHIFT,
+};
+
+struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock)
+{
+       struct ptp_vclock *vclock;
+
+       vclock = kzalloc(sizeof(*vclock), GFP_KERNEL);
+       if (!vclock)
+               return NULL;
+
+       vclock->pclock = pclock;
+       vclock->info = ptp_vclock_info;
+       vclock->cc = ptp_vclock_cc;
+
+       snprintf(vclock->info.name, PTP_CLOCK_NAME_LEN, "ptp%d_virt",
+                pclock->index);
+
+       spin_lock_init(&vclock->lock);
+
+       vclock->clock = ptp_clock_register(&vclock->info, &pclock->dev);
+       if (IS_ERR_OR_NULL(vclock->clock)) {
+               kfree(vclock);
+               return NULL;
+       }
+
+       timecounter_init(&vclock->tc, &vclock->cc, 0);
+       ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL);
+
+       return vclock;
+}
+
+void ptp_vclock_unregister(struct ptp_vclock *vclock)
+{
+       ptp_clock_unregister(vclock->clock);
+       kfree(vclock);
+}
+
+int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
+{
+       char name[PTP_CLOCK_NAME_LEN] = "";
+       struct ptp_clock *ptp;
+       struct device *dev;
+       int num = 0;
+
+       if (pclock_index < 0)
+               return num;
+
+       snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", pclock_index);
+       dev = class_find_device_by_name(ptp_class, name);
+       if (!dev)
+               return num;
+
+       ptp = dev_get_drvdata(dev);
+
+       if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) {
+               put_device(dev);
+               return num;
+       }
+
+       *vclock_index = kzalloc(sizeof(int) * ptp->n_vclocks, GFP_KERNEL);
+       if (!(*vclock_index))
+               goto out;
+
+       memcpy(*vclock_index, ptp->vclock_index, sizeof(int) * ptp->n_vclocks);
+       num = ptp->n_vclocks;
+out:
+       mutex_unlock(&ptp->n_vclocks_mux);
+       put_device(dev);
+       return num;
+}
+EXPORT_SYMBOL(ptp_get_vclocks_index);
+
+void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
+                          int vclock_index)
+{
+       char name[PTP_CLOCK_NAME_LEN] = "";
+       struct ptp_vclock *vclock;
+       struct ptp_clock *ptp;
+       unsigned long flags;
+       struct device *dev;
+       u64 ns;
+
+       snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", vclock_index);
+       dev = class_find_device_by_name(ptp_class, name);
+       if (!dev)
+               return;
+
+       ptp = dev_get_drvdata(dev);
+       if (!ptp->is_virtual_clock) {
+               put_device(dev);
+               return;
+       }
+
+       vclock = info_to_vclock(ptp->info);
+
+       ns = ktime_to_ns(hwtstamps->hwtstamp);
+
+       spin_lock_irqsave(&vclock->lock, flags);
+       ns = timecounter_cyc2time(&vclock->tc, ns);
+       spin_unlock_irqrestore(&vclock->lock, flags);
+
+       put_device(dev);
+       hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+EXPORT_SYMBOL(ptp_convert_timestamp);
index 5537b5f..e157273 100644 (file)
@@ -190,12 +190,9 @@ static int berlin_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return 0;
        }
 
-       if (state->period != pwm->state.period ||
-           state->duty_cycle != pwm->state.duty_cycle) {
-               err = berlin_pwm_config(chip, pwm, state->duty_cycle, state->period);
-               if (err)
-                       return err;
-       }
+       err = berlin_pwm_config(chip, pwm, state->duty_cycle, state->period);
+       if (err)
+               return err;
 
        if (!enabled)
                return berlin_pwm_enable(chip, pwm);
index 8a3d781..fc3cb7d 100644 (file)
@@ -64,6 +64,11 @@ static int ep93xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
        int ret;
        struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip);
        bool enabled = state->enabled;
+       void __iomem *base = ep93xx_pwm->base;
+       unsigned long long c;
+       unsigned long period_cycles;
+       unsigned long duty_cycles;
+       unsigned long term;
 
        if (state->polarity != pwm->state.polarity) {
                if (enabled) {
@@ -97,57 +102,47 @@ static int ep93xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return 0;
        }
 
-       if (state->period != pwm->state.period ||
-           state->duty_cycle != pwm->state.duty_cycle) {
-               struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip);
-               void __iomem *base = ep93xx_pwm->base;
-               unsigned long long c;
-               unsigned long period_cycles;
-               unsigned long duty_cycles;
-               unsigned long term;
+       /*
+        * The clock needs to be enabled to access the PWM registers.
+        * Configuration can be changed at any time.
+        */
+       if (!pwm_is_enabled(pwm)) {
+               ret = clk_prepare_enable(ep93xx_pwm->clk);
+               if (ret)
+                       return ret;
+       }
 
-               /*
-                * The clock needs to be enabled to access the PWM registers.
-                * Configuration can be changed at any time.
-                */
-               if (!pwm_is_enabled(pwm)) {
-                       ret = clk_prepare_enable(ep93xx_pwm->clk);
-                       if (ret)
-                               return ret;
-               }
+       c = clk_get_rate(ep93xx_pwm->clk);
+       c *= state->period;
+       do_div(c, 1000000000);
+       period_cycles = c;
+
+       c = period_cycles;
+       c *= state->duty_cycle;
+       do_div(c, state->period);
+       duty_cycles = c;
 
-               c = clk_get_rate(ep93xx_pwm->clk);
-               c *= state->period;
-               do_div(c, 1000000000);
-               period_cycles = c;
-
-               c = period_cycles;
-               c *= state->duty_cycle;
-               do_div(c, state->period);
-               duty_cycles = c;
-
-               if (period_cycles < 0x10000 && duty_cycles < 0x10000) {
-                       term = readw(base + EP93XX_PWMx_TERM_COUNT);
-
-                       /* Order is important if PWM is running */
-                       if (period_cycles > term) {
-                               writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT);
-                               writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE);
-                       } else {
-                               writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE);
-                               writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT);
-                       }
-                       ret = 0;
+       if (period_cycles < 0x10000 && duty_cycles < 0x10000) {
+               term = readw(base + EP93XX_PWMx_TERM_COUNT);
+
+               /* Order is important if PWM is running */
+               if (period_cycles > term) {
+                       writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT);
+                       writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE);
                } else {
-                       ret = -EINVAL;
+                       writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE);
+                       writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT);
                }
+               ret = 0;
+       } else {
+               ret = -EINVAL;
+       }
 
-               if (!pwm_is_enabled(pwm))
-                       clk_disable_unprepare(ep93xx_pwm->clk);
+       if (!pwm_is_enabled(pwm))
+               clk_disable_unprepare(ep93xx_pwm->clk);
 
-               if (ret)
-                       return ret;
-       }
+       if (ret)
+               return ret;
 
        if (!enabled) {
                ret = clk_prepare_enable(ep93xx_pwm->clk);
index 48c31da..54c7990 100644 (file)
@@ -177,12 +177,9 @@ static int spear_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return 0;
        }
 
-       if (state->period != pwm->state.period ||
-           state->duty_cycle != pwm->state.duty_cycle) {
-               err = spear_pwm_config(chip, pwm, state->duty_cycle, state->period);
-               if (err)
-                       return err;
-       }
+       err = spear_pwm_config(chip, pwm, state->duty_cycle, state->period);
+       if (err)
+               return err;
 
        if (!pwm->state.enabled)
                return spear_pwm_enable(chip, pwm);
index f2a85e8..7004f55 100644 (file)
@@ -183,13 +183,10 @@ static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                        }
                }
 
-               if (state->period != cstate->period ||
-                   state->duty_cycle != cstate->duty_cycle) {
-                       ret = sprd_pwm_config(spc, pwm, state->duty_cycle,
-                                             state->period);
-                       if (ret)
-                               return ret;
-               }
+               ret = sprd_pwm_config(spc, pwm, state->duty_cycle,
+                                     state->period);
+               if (ret)
+                       return ret;
 
                sprd_pwm_write(spc, pwm->hwpwm, SPRD_PWM_ENABLE, 1);
        } else if (cstate->enabled) {
index dec3f1f..35eb19a 100644 (file)
@@ -189,16 +189,13 @@ static int ecap_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return 0;
        }
 
-       if (state->period != pwm->state.period ||
-           state->duty_cycle != pwm->state.duty_cycle) {
-               if (state->period > NSEC_PER_SEC)
-                       return -ERANGE;
+       if (state->period > NSEC_PER_SEC)
+               return -ERANGE;
 
-               err = ecap_pwm_config(chip, pwm, state->duty_cycle,
-                                     state->period, enabled);
-               if (err)
-                       return err;
-       }
+       err = ecap_pwm_config(chip, pwm, state->duty_cycle,
+                             state->period, enabled);
+       if (err)
+               return err;
 
        if (!enabled)
                return ecap_pwm_enable(chip, pwm);
index e16c372..aa42da4 100644 (file)
@@ -294,9 +294,9 @@ static bool check_temp_flag_mismatch(struct regulator_dev *rdev, int severity,
                                    struct bd957x_regulator_data *r)
 {
        if ((severity == REGULATOR_SEVERITY_ERR &&
-            r->ovd_notif != REGULATOR_EVENT_OVER_TEMP) ||
+            r->temp_notif != REGULATOR_EVENT_OVER_TEMP) ||
             (severity == REGULATOR_SEVERITY_WARN &&
-            r->ovd_notif != REGULATOR_EVENT_OVER_TEMP_WARN)) {
+            r->temp_notif != REGULATOR_EVENT_OVER_TEMP_WARN)) {
                dev_warn(rdev_get_dev(rdev),
                         "Can't support both thermal WARN and ERR\n");
                if (severity == REGULATOR_SEVERITY_WARN)
index bff8c51..d144a4b 100644 (file)
@@ -366,9 +366,8 @@ static struct hi6421_regulator_info
 
 static int hi6421_regulator_enable(struct regulator_dev *rdev)
 {
-       struct hi6421_regulator_pdata *pdata;
+       struct hi6421_regulator_pdata *pdata = rdev_get_drvdata(rdev);
 
-       pdata = dev_get_drvdata(rdev->dev.parent);
        /* hi6421 spec requires regulator enablement must be serialized:
         *  - Because when BUCK, LDO switching from off to on, it will have
         *    a huge instantaneous current; so you can not turn on two or
@@ -385,9 +384,10 @@ static int hi6421_regulator_enable(struct regulator_dev *rdev)
 
 static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
 {
-       struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+       struct hi6421_regulator_info *info;
        unsigned int reg_val;
 
+       info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
        regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
        if (reg_val & info->mode_mask)
                return REGULATOR_MODE_IDLE;
@@ -397,9 +397,10 @@ static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
 
 static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev)
 {
-       struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+       struct hi6421_regulator_info *info;
        unsigned int reg_val;
 
+       info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
        regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
        if (reg_val & info->mode_mask)
                return REGULATOR_MODE_STANDBY;
@@ -410,9 +411,10 @@ static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev)
 static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev,
                                                unsigned int mode)
 {
-       struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+       struct hi6421_regulator_info *info;
        unsigned int new_mode;
 
+       info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
        switch (mode) {
        case REGULATOR_MODE_NORMAL:
                new_mode = 0;
@@ -434,9 +436,10 @@ static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev,
 static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev,
                                                unsigned int mode)
 {
-       struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+       struct hi6421_regulator_info *info;
        unsigned int new_mode;
 
+       info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
        switch (mode) {
        case REGULATOR_MODE_NORMAL:
                new_mode = 0;
@@ -459,7 +462,9 @@ static unsigned int
 hi6421_regulator_ldo_get_optimum_mode(struct regulator_dev *rdev,
                        int input_uV, int output_uV, int load_uA)
 {
-       struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
+       struct hi6421_regulator_info *info;
+
+       info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
 
        if (load_uA > info->eco_microamp)
                return REGULATOR_MODE_NORMAL;
@@ -543,14 +548,13 @@ static int hi6421_regulator_probe(struct platform_device *pdev)
        if (!pdata)
                return -ENOMEM;
        mutex_init(&pdata->lock);
-       platform_set_drvdata(pdev, pdata);
 
        for (i = 0; i < ARRAY_SIZE(hi6421_regulator_info); i++) {
                /* assign per-regulator data */
                info = &hi6421_regulator_info[i];
 
                config.dev = pdev->dev.parent;
-               config.driver_data = info;
+               config.driver_data = pdata;
                config.regmap = pmic->regmap;
 
                rdev = devm_regulator_register(&pdev->dev, &info->desc,
index 9b162c0..845bc3b 100644 (file)
@@ -98,10 +98,9 @@ static const unsigned int ldo34_voltages[] = {
 
 static int hi6421_spmi_regulator_enable(struct regulator_dev *rdev)
 {
-       struct hi6421_spmi_reg_priv *priv;
+       struct hi6421_spmi_reg_priv *priv = rdev_get_drvdata(rdev);
        int ret;
 
-       priv = dev_get_drvdata(rdev->dev.parent);
        /* cannot enable more than one regulator at one time */
        mutex_lock(&priv->enable_mutex);
 
@@ -119,9 +118,10 @@ static int hi6421_spmi_regulator_enable(struct regulator_dev *rdev)
 
 static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev)
 {
-       struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
+       struct hi6421_spmi_reg_info *sreg;
        unsigned int reg_val;
 
+       sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc);
        regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
 
        if (reg_val & sreg->eco_mode_mask)
@@ -133,9 +133,10 @@ static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev)
 static int hi6421_spmi_regulator_set_mode(struct regulator_dev *rdev,
                                          unsigned int mode)
 {
-       struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
+       struct hi6421_spmi_reg_info *sreg;
        unsigned int val;
 
+       sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc);
        switch (mode) {
        case REGULATOR_MODE_NORMAL:
                val = 0;
@@ -159,7 +160,9 @@ hi6421_spmi_regulator_get_optimum_mode(struct regulator_dev *rdev,
                                       int input_uV, int output_uV,
                                       int load_uA)
 {
-       struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev);
+       struct hi6421_spmi_reg_info *sreg;
+
+       sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc);
 
        if (!sreg->eco_uA || ((unsigned int)load_uA > sreg->eco_uA))
                return REGULATOR_MODE_NORMAL;
@@ -252,13 +255,12 @@ static int hi6421_spmi_regulator_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        mutex_init(&priv->enable_mutex);
-       platform_set_drvdata(pdev, priv);
 
        for (i = 0; i < ARRAY_SIZE(regulator_info); i++) {
                info = &regulator_info[i];
 
                config.dev = pdev->dev.parent;
-               config.driver_data = info;
+               config.driver_data = priv;
                config.regmap = pmic->regmap;
 
                rdev = devm_regulator_register(dev, &info->desc, &config);
index d3d8761..234af3a 100644 (file)
@@ -179,8 +179,7 @@ static int dvfsrc_vcore_regulator_probe(struct platform_device *pdev)
        for (i = 0; i < regulator_init_data->size; i++) {
                config.dev = dev->parent;
                config.driver_data = (mt_regulators + i);
-               rdev = devm_regulator_register(dev->parent,
-                                              &(mt_regulators + i)->desc,
+               rdev = devm_regulator_register(dev, &(mt_regulators + i)->desc,
                                               &config);
                if (IS_ERR(rdev)) {
                        dev_err(dev, "failed to register %s\n",
index 4bca64d..2ee3341 100644 (file)
@@ -37,7 +37,7 @@
 #define RTMV20_WIDTH2_MASK     GENMASK(7, 0)
 #define RTMV20_LBPLVL_MASK     GENMASK(3, 0)
 #define RTMV20_LBPEN_MASK      BIT(7)
-#define RTMV20_STROBEPOL_MASK  BIT(1)
+#define RTMV20_STROBEPOL_MASK  BIT(0)
 #define RTMV20_VSYNPOL_MASK    BIT(1)
 #define RTMV20_FSINEN_MASK     BIT(7)
 #define RTMV20_ESEN_MASK       BIT(6)
index 8abb429..cc8237a 100644 (file)
@@ -371,8 +371,6 @@ __tapechar_ioctl(struct tape_device *device,
                        case MTSEEK:
                                if (device->required_tapemarks)
                                        tape_std_terminate_write(device);
-                       default:
-                               ;
                }
                rc = tape_mtop(device, op.mt_op, op.mt_count);
 
index b341075..377e368 100644 (file)
@@ -1454,6 +1454,7 @@ again:
                                get_ccwdev_lock(ch->cdev), saveflags);
                if (rc != 0)
                        ctcm_ccw_check_rc(ch, rc, "normal RX");
+               break;
        default:
                break;
        }
index d308ff7..f0d6f20 100644 (file)
@@ -434,6 +434,7 @@ static int qeth_l3_correct_routing_type(struct qeth_card *card,
                        if (qeth_is_ipafunc_supported(card, prot,
                                                      IPA_OSA_MC_ROUTER))
                                return 0;
+                       goto out_inval;
                default:
                        goto out_inval;
                }
index 544efd4..b8cd75a 100644 (file)
@@ -487,6 +487,7 @@ static ssize_t zfcp_sysfs_port_fc_security_show(struct device *dev,
        if (0 == (status & ZFCP_STATUS_COMMON_OPEN) ||
            0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) ||
            0 == (status & ZFCP_STATUS_PORT_PHYS_OPEN) ||
+           0 != (status & ZFCP_STATUS_PORT_LINK_TEST) ||
            0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED) ||
            0 != (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
                i = sprintf(buf, "unknown\n");
index 84fc7a0..4a84599 100644 (file)
@@ -2642,6 +2642,7 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
 //#endif
                clear_bit(SCpnt->device->id * 8 +
                          (u8)(SCpnt->device->lun & 0x7), host->busyluns);
+               fallthrough;
 
        /*
         * We found the command, and cleared it out.  Either
index 30ed3d2..9c4458a 100644 (file)
@@ -1375,6 +1375,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
                case IS_COMPLETE:
                        break;
                }
+               break;
 
        default:
                break;
@@ -2010,7 +2011,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
                   "request sense complete, result=0x%04x%02x%02x",
                   result, SCpnt->SCp.Message, SCpnt->SCp.Status);
 
-       if (result != DID_OK || SCpnt->SCp.Status != GOOD)
+       if (result != DID_OK || SCpnt->SCp.Status != SAM_STAT_GOOD)
                /*
                 * Something went wrong.  Make sure that we don't
                 * have valid data in the sense buffer that could
index 929a3b0..3f6f14f 100644 (file)
@@ -488,6 +488,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
                shost_printk(KERN_WARNING, shost,
                        "error handler thread failed to spawn, error = %ld\n",
                        PTR_ERR(shost->ehandler));
+               shost->ehandler = NULL;
                goto fail;
        }
 
index 9f5068f..dd20541 100644 (file)
@@ -461,7 +461,7 @@ static void sas_discover_domain(struct work_struct *work)
                break;
 #else
                pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
-               /* Fall through */
+               fallthrough;
 #endif
                /* Fall through - only for the #else condition above. */
        default:
index 9eceafc..2dba2b0 100644 (file)
@@ -2607,14 +2607,13 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
                goto out;
        }
        drv_info->information_length = cpu_to_le32(data_len);
-       strncpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
-       strncpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
-       drv_info->os_name[sizeof(drv_info->os_name) - 1] = 0;
-       strncpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
-       drv_info->os_version[sizeof(drv_info->os_version) - 1] = 0;
-       strncpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
-       strncpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
-       strncpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, sizeof(drv_info->driver_release_date));
+       strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
+       strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
+       strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
+       strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
+       strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
+       strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
+           sizeof(drv_info->driver_release_date));
        drv_info->driver_capabilities = 0;
        memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
            sizeof(mrioc->driver_info));
index c399552..19b1c0c 100644 (file)
@@ -2983,13 +2983,13 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
 }
 
 /**
- * _base_free_irq - free irq
+ * mpt3sas_base_free_irq - free irq
  * @ioc: per adapter object
  *
  * Freeing respective reply_queue from the list.
  */
-static void
-_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
+void
+mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
 {
        struct adapter_reply_queue *reply_q, *next;
 
@@ -3191,12 +3191,12 @@ _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
 }
 
 /**
- * _base_disable_msix - disables msix
+ * mpt3sas_base_disable_msix - disables msix
  * @ioc: per adapter object
  *
  */
-static void
-_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
+void
+mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
 {
        if (!ioc->msix_enable)
                return;
@@ -3304,8 +3304,8 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
        for (i = 0; i < ioc->reply_queue_count; i++) {
                r = _base_request_irq(ioc, i);
                if (r) {
-                       _base_free_irq(ioc);
-                       _base_disable_msix(ioc);
+                       mpt3sas_base_free_irq(ioc);
+                       mpt3sas_base_disable_msix(ioc);
                        goto try_ioapic;
                }
        }
@@ -3342,8 +3342,8 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
 
        dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
-       _base_free_irq(ioc);
-       _base_disable_msix(ioc);
+       mpt3sas_base_free_irq(ioc);
+       mpt3sas_base_disable_msix(ioc);
 
        kfree(ioc->replyPostRegisterIndex);
        ioc->replyPostRegisterIndex = NULL;
@@ -7613,14 +7613,14 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
 }
 
 /**
- * _base_make_ioc_ready - put controller in READY state
+ * mpt3sas_base_make_ioc_ready - put controller in READY state
  * @ioc: per adapter object
  * @type: FORCE_BIG_HAMMER or SOFT_RESET
  *
  * Return: 0 for success, non-zero for failure.
  */
-static int
-_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
+int
+mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
 {
        u32 ioc_state;
        int rc;
@@ -7897,7 +7897,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
        if (ioc->chip_phys && ioc->chip) {
                mpt3sas_base_mask_interrupts(ioc);
                ioc->shost_recovery = 1;
-               _base_make_ioc_ready(ioc, SOFT_RESET);
+               mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
                ioc->shost_recovery = 0;
        }
 
@@ -8017,7 +8017,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
        ioc->build_sg_mpi = &_base_build_sg;
        ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
 
-       r = _base_make_ioc_ready(ioc, SOFT_RESET);
+       r = mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
        if (r)
                goto out_free_resources;
 
@@ -8471,7 +8471,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
        _base_pre_reset_handler(ioc);
        mpt3sas_wait_for_commands_to_complete(ioc);
        mpt3sas_base_mask_interrupts(ioc);
-       r = _base_make_ioc_ready(ioc, type);
+       r = mpt3sas_base_make_ioc_ready(ioc, type);
        if (r)
                goto out;
        _base_clear_outstanding_commands(ioc);
index d4834c8..0c6c3df 100644 (file)
@@ -1730,6 +1730,10 @@ do {     ioc_err(ioc, "In func: %s\n", __func__); \
        status, mpi_request, sz); } while (0)
 
 int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count);
+int
+mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type);
+void mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc);
 
 /* scsih shared API */
 struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
index 866d118..8e64a6f 100644 (file)
@@ -11295,7 +11295,12 @@ scsih_shutdown(struct pci_dev *pdev)
 
        _scsih_ir_shutdown(ioc);
        _scsih_nvme_shutdown(ioc);
-       mpt3sas_base_detach(ioc);
+       mpt3sas_base_mask_interrupts(ioc);
+       ioc->shost_recovery = 1;
+       mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
+       ioc->shost_recovery = 0;
+       mpt3sas_base_free_irq(ioc);
+       mpt3sas_base_disable_msix(ioc);
 }
 
 
index 0b8802b..ec05c42 100644 (file)
@@ -77,7 +77,7 @@ DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
  * @attr: device attribute (unused)
  * @buf: the buffer returned
  *
- * A sysfs 'read only' shost attribute.
+ * A sysfs 'read-only' shost attribute.
  */
 static ssize_t controller_fatal_error_show(struct device *cdev,
                struct device_attribute *attr, char *buf)
@@ -149,7 +149,7 @@ static ssize_t pm8001_ctl_ila_version_show(struct device *cdev,
 static DEVICE_ATTR(ila_version, 0444, pm8001_ctl_ila_version_show, NULL);
 
 /**
- * pm8001_ctl_inactive_fw_version_show - Inacative firmware version number
+ * pm8001_ctl_inactive_fw_version_show - Inactive firmware version number
  * @cdev: pointer to embedded class device
  * @attr: device attribute (unused)
  * @buf: the buffer returned
@@ -396,6 +396,7 @@ static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL);
  * @cdev:pointer to embedded class device
  * @attr: device attribute (unused)
  * @buf: the buffer returned
+ *
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
@@ -430,6 +431,7 @@ static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL);
  * @cdev:pointer to embedded class device
  * @attr: device attribute (unused)
  * @buf: the buffer returned
+ *
  * A sysfs 'read-only' shost attribute.
  */
 
@@ -464,6 +466,7 @@ static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL);
  * @cdev:pointer to embedded class device
  * @attr: device attribute (unused)
  * @buf:the buffer returned
+ *
  * A sysfs 'read-only' shost attribute.
  */
 static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
@@ -555,13 +558,13 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev,
 static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
 
 /**
- ** pm8001_ctl_fatal_log_show - fatal error logging
- ** @cdev:pointer to embedded class device
- ** @attr: device attribute
- ** @buf: the buffer returned
- **
- ** A sysfs 'read-only' shost attribute.
- **/
+ * pm8001_ctl_fatal_log_show - fatal error logging
+ * @cdev:pointer to embedded class device
+ * @attr: device attribute
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
 
 static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
        struct device_attribute *attr, char *buf)
@@ -575,13 +578,13 @@ static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
 static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL);
 
 /**
- ** non_fatal_log_show - non fatal error logging
- ** @cdev:pointer to embedded class device
- ** @attr: device attribute
- ** @buf: the buffer returned
- **
- ** A sysfs 'read-only' shost attribute.
- **/
+ * non_fatal_log_show - non fatal error logging
+ * @cdev:pointer to embedded class device
+ * @attr: device attribute
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
 static ssize_t non_fatal_log_show(struct device *cdev,
        struct device_attribute *attr, char *buf)
 {
@@ -620,12 +623,13 @@ static ssize_t non_fatal_count_store(struct device *cdev,
 static DEVICE_ATTR_RW(non_fatal_count);
 
 /**
- ** pm8001_ctl_gsm_log_show - gsm dump collection
- ** @cdev:pointer to embedded class device
- ** @attr: device attribute (unused)
- ** @buf: the buffer returned
- ** A sysfs 'read-only' shost attribute.
- **/
+ * pm8001_ctl_gsm_log_show - gsm dump collection
+ * @cdev:pointer to embedded class device
+ * @attr: device attribute (unused)
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
 static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev,
        struct device_attribute *attr, char *buf)
 {
index 33f8217..17c0f26 100644 (file)
@@ -384,7 +384,7 @@ static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
 
 /**
  * pm8001_bar4_shift - function is called to shift BAR base address
- * @pm8001_ha : our hba card infomation
+ * @pm8001_ha : our hba card information
  * @shiftValue : shifting value in memory bar.
  */
 int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
@@ -1151,7 +1151,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
 }
 
 /**
- * pm8001_chip_iounmap - which maped when initialized.
+ * pm8001_chip_iounmap - which mapped when initialized.
  * @pm8001_ha: our hba card information
  */
 void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
@@ -1187,10 +1187,10 @@ pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
        pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
 }
 
- /**
 * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
 * @pm8001_ha: our hba card information
 */
+/**
* pm8001_chip_intx_interrupt_disable - disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
 static void
 pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
 {
@@ -1876,8 +1876,8 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
  * @piomb: the message contents of this outbound message.
  *
  * When FW has completed a ssp request for example a IO request, after it has
- * filled the SG data with the data, it will trigger this event represent
- * that he has finished the job,please check the coresponding buffer.
+ * filled the SG data with the data, it will trigger this event representing
+ * that he has finished the job; please check the corresponding buffer.
  * So we will tell the caller who maybe waiting the result to tell upper layer
  * that the task has been finished.
  */
@@ -3522,7 +3522,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
  *
  * when sas layer find a device it will notify LLDD, then the driver register
  * the domain device to FW, this event is the return device ID which the FW
- * has assigned, from now,inter-communication with FW is no longer using the
+ * has assigned, from now, inter-communication with FW is no longer using the
  * SAS address, use device ID which FW assigned.
  */
 int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
index 313248c..47db7e0 100644 (file)
@@ -233,7 +233,7 @@ static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
 /**
  * pm8001_interrupt_handler_intx - main INTx interrupt handler.
  * @irq: interrupt number
- * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure.
+ * @dev_id: sas_ha structure. The HBA is retrieved from sas_ha structure.
  */
 
 static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
@@ -439,9 +439,9 @@ err_out:
 }
 
 /**
- * pm8001_ioremap - remap the pci high physical address to kernal virtual
+ * pm8001_ioremap - remap the pci high physical address to kernel virtual
  * address so that we can access them.
- * @pm8001_ha:our hba structure.
+ * @pm8001_ha: our hba structure.
  */
 static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
 {
@@ -652,7 +652,7 @@ static void  pm8001_post_sas_ha_init(struct Scsi_Host *shost,
  * pm8001_init_sas_add - initialize sas address
  * @pm8001_ha: our ha struct.
  *
- * Currently we just set the fixed SAS address to our HBA,for manufacture,
+ * Currently we just set the fixed SAS address to our HBA, for manufacture,
  * it should read from the EEPROM
  */
 static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
@@ -790,7 +790,7 @@ struct pm8001_mpi3_phy_pg_trx_config {
 };
 
 /**
- * pm8001_get_internal_phy_settings : Retrieves the internal PHY settings
+ * pm8001_get_internal_phy_settings - Retrieves the internal PHY settings
  * @pm8001_ha : our adapter
  * @phycfg : PHY config page to populate
  */
@@ -810,7 +810,7 @@ void pm8001_get_internal_phy_settings(struct pm8001_hba_info *pm8001_ha,
 }
 
 /**
- * pm8001_get_external_phy_settings : Retrieves the external PHY settings
+ * pm8001_get_external_phy_settings - Retrieves the external PHY settings
  * @pm8001_ha : our adapter
  * @phycfg : PHY config page to populate
  */
@@ -830,7 +830,7 @@ void pm8001_get_external_phy_settings(struct pm8001_hba_info *pm8001_ha,
 }
 
 /**
- * pm8001_get_phy_mask : Retrieves the mask that denotes if a PHY is int/ext
+ * pm8001_get_phy_mask - Retrieves the mask that denotes if a PHY is int/ext
  * @pm8001_ha : our adapter
  * @phymask : The PHY mask
  */
@@ -868,7 +868,7 @@ void pm8001_get_phy_mask(struct pm8001_hba_info *pm8001_ha, int *phymask)
 }
 
 /**
- * pm8001_set_phy_settings_ven_117c_12G() : Configure ATTO 12Gb PHY settings
+ * pm8001_set_phy_settings_ven_117c_12G() - Configure ATTO 12Gb PHY settings
  * @pm8001_ha : our adapter
  */
 static
@@ -903,7 +903,7 @@ int pm8001_set_phy_settings_ven_117c_12G(struct pm8001_hba_info *pm8001_ha)
 }
 
 /**
- * pm8001_configure_phy_settings : Configures PHY settings based on vendor ID.
+ * pm8001_configure_phy_settings - Configures PHY settings based on vendor ID.
  * @pm8001_ha : our hba.
  */
 static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha)
@@ -1053,8 +1053,8 @@ intx:
  * @ent: pci device id
  *
  * This function is the main initialization function, when register a new
- * pci driver it is invoked, all struct an hardware initilization should be done
- * here, also, register interrupt
+ * pci driver it is invoked, all struct and hardware initialization should be
+ * done here, also, register interrupt.
  */
 static int pm8001_pci_probe(struct pci_dev *pdev,
                            const struct pci_device_id *ent)
@@ -1172,10 +1172,11 @@ err_out_enable:
        return rc;
 }
 
-/*
+/**
  * pm8001_init_ccb_tag - allocate memory to CCB and tag.
  * @pm8001_ha: our hba card information.
  * @shost: scsi host which has been allocated outside.
+ * @pdev: pci device.
  */
 static int
 pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
@@ -1270,7 +1271,7 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
  * pm8001_pci_suspend - power management suspend main entry point
  * @dev: Device struct
  *
- * Returns 0 success, anything else error.
+ * Return: 0 on success, anything else on error.
  */
 static int __maybe_unused pm8001_pci_suspend(struct device *dev)
 {
@@ -1315,7 +1316,7 @@ static int __maybe_unused pm8001_pci_suspend(struct device *dev)
  * pm8001_pci_resume - power management resume main entry point
  * @dev: Device struct
  *
- * Returns 0 success, anything else error.
+ * Return: 0 on success, anything else on error.
  */
 static int __maybe_unused pm8001_pci_resume(struct device *dev)
 {
index 6f33d82..48548a9 100644 (file)
@@ -98,14 +98,16 @@ void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha)
                pm8001_tag_free(pm8001_ha, i);
 }
 
- /**
-  * pm8001_mem_alloc - allocate memory for pm8001.
-  * @pdev: pci device.
-  * @virt_addr: the allocated virtual address
-  * @pphys_addr_hi: the physical address high byte address.
-  * @pphys_addr_lo: the physical address low byte address.
-  * @mem_size: memory size.
-  */
+/**
+ * pm8001_mem_alloc - allocate memory for pm8001.
+ * @pdev: pci device.
+ * @virt_addr: the allocated virtual address
+ * @pphys_addr: DMA address for this device
+ * @pphys_addr_hi: the physical address high byte address.
+ * @pphys_addr_lo: the physical address low byte address.
+ * @mem_size: memory size.
+ * @align: requested byte alignment
+ */
 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
        dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
        u32 *pphys_addr_lo, u32 mem_size, u32 align)
@@ -339,7 +341,7 @@ static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
 }
 
 /**
-  * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task
+  * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task
   * @pm8001_ha: our hba card information
   * @ccb: the ccb which attached to ssp task
   */
@@ -554,10 +556,10 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
        pm8001_tag_free(pm8001_ha, ccb_idx);
 }
 
- /**
 * pm8001_alloc_dev - find a empty pm8001_device
 * @pm8001_ha: our hba card information
 */
+/**
+ * pm8001_alloc_dev - find a empty pm8001_device
+ * @pm8001_ha: our hba card information
+ */
 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
 {
        u32 dev;
@@ -705,7 +707,7 @@ static void pm8001_tmf_timedout(struct timer_list *t)
   * @parameter: ssp task parameter.
   *
   * when errors or exception happened, we may want to do something, for example
-  * abort the issued task which result in this execption, it is done by calling
+  * abort the issued task which result in this exception, it is done by calling
   * this function, note it is also with the task execute interface.
   */
 static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
@@ -984,11 +986,12 @@ void pm8001_open_reject_retry(
 }
 
 /**
- * pm8001_I_T_nexus_reset()
-  * Standard mandates link reset for ATA  (type 0) and hard reset for
-  * SSP (type 1) , only for RECOVERY
-  * @dev: the device structure for the device to reset.
-  */
+ * pm8001_I_T_nexus_reset() - reset the initiator/target connection
+ * @dev: the device structure for the device to reset.
+ *
+ * Standard mandates link reset for ATA (type 0) and hard reset for
+ * SSP (type 1), only for RECOVERY
+ */
 int pm8001_I_T_nexus_reset(struct domain_device *dev)
 {
        int rc = TMF_RESP_FUNC_FAILED;
index 45ecd96..6ffe17b 100644 (file)
@@ -140,7 +140,7 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
                pm8001_ha->fatal_bar_loc = 0;
        }
 
-       /* Read until accum_len is retrived */
+       /* Read until accum_len is retrieved */
        accum_len = pm8001_mr32(fatal_table_address,
                                MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
        /* Determine length of data between previously stored transfer length
@@ -1011,7 +1011,7 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
                           value);
                return -EBUSY;
        }
-       /* check the MPI-State for initialization upto 100ms*/
+       /* check the MPI-State for initialization up to 100ms*/
        max_wait_count = 5;/* 100 msec */
        do {
                msleep(FW_READY_INTERVAL);
@@ -1093,7 +1093,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
 
        value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
 
-       /**
+       /*
         * lower 26 bits of SCRATCHPAD0 register describes offset within the
         * PCIe BAR where the MPI configuration table is present
         */
@@ -1101,7 +1101,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
 
        pm8001_dbg(pm8001_ha, DEV, "Scratchpad 0 Offset: 0x%x value 0x%x\n",
                   offset, value);
-       /**
+       /*
         * Upper 6 bits describe the offset within PCI config space where BAR
         * is located.
         */
@@ -1109,7 +1109,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
        pcibar = get_pci_bar_index(pcilogic);
        pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar);
 
-       /**
+       /*
         * Make sure the offset falls inside the ioremapped PCI BAR
         */
        if (offset > pm8001_ha->io_mem[pcibar].memsize) {
@@ -1121,7 +1121,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
        pm8001_ha->main_cfg_tbl_addr = base_addr =
                pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
 
-       /**
+       /*
         * Validate main configuration table address: first DWord should read
         * "PMCS"
         */
@@ -1385,7 +1385,7 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
 }
 
 /**
- * pm80xx_encrypt_update - update flash with encryption informtion
+ * pm80xx_encrypt_update - update flash with encryption information
  * @pm8001_ha: our hba card information.
  */
 static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
@@ -1422,7 +1422,7 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
 }
 
 /**
- * pm80xx_chip_init - the main init function that initialize whole PM8001 chip.
+ * pm80xx_chip_init - the main init function that initializes whole PM8001 chip.
  * @pm8001_ha: our hba card information
  */
 static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
@@ -1541,7 +1541,7 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
 }
 
 /**
- * pm80xx_fatal_errors - returns non zero *ONLY* when fatal errors
+ * pm80xx_fatal_errors - returns non-zero *ONLY* when fatal errors
  * @pm8001_ha: our hba card information
  *
  * Fatal errors are recoverable only after a host reboot.
@@ -1576,8 +1576,8 @@ pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha)
 }
 
 /**
- * pm80xx_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
- * the FW register status to the originated status.
+ * pm80xx_chip_soft_rst - soft reset the PM8001 chip, so that all
+ * FW register status are reset to the originated status.
  * @pm8001_ha: our hba card information
  */
 
@@ -1895,13 +1895,13 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
 }
 
 /**
- * mpi_ssp_completion- process the event that FW response to the SSP request.
+ * mpi_ssp_completion - process the event that FW response to the SSP request.
  * @pm8001_ha: our hba card information
  * @piomb: the message contents of this outbound message.
  *
  * When FW has completed a ssp request for example a IO request, after it has
- * filled the SG data with the data, it will trigger this event represent
- * that he has finished the job,please check the coresponding buffer.
+ * filled the SG data with the data, it will trigger this event representing
+ * that he has finished the job; please check the corresponding buffer.
  * So we will tell the caller who maybe waiting the result to tell upper layer
  * that the task has been finished.
  */
@@ -3217,7 +3217,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 }
 
 /**
- * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW.
+ * pm80xx_hw_event_ack_req- For PM8001, some events need to acknowledge to FW.
  * @pm8001_ha: our hba card information
  * @Qnum: the outbound queue message number.
  * @SEA: source of event to ack
@@ -3275,7 +3275,7 @@ static void hw_event_port_recover(struct pm8001_hba_info *pm8001_ha,
 }
 
 /**
- * hw_event_sas_phy_up -FW tells me a SAS phy up event.
+ * hw_event_sas_phy_up - FW tells me a SAS phy up event.
  * @pm8001_ha: our hba card information
  * @piomb: IO message buffer
  */
@@ -3353,7 +3353,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
 }
 
 /**
- * hw_event_sata_phy_up -FW tells me a SATA phy up event.
+ * hw_event_sata_phy_up - FW tells me a SATA phy up event.
  * @pm8001_ha: our hba card information
  * @piomb: IO message buffer
  */
@@ -3400,7 +3400,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
 }
 
 /**
- * hw_event_phy_down -we should notify the libsas the phy is down.
+ * hw_event_phy_down - we should notify the libsas the phy is down.
  * @pm8001_ha: our hba card information
  * @piomb: IO message buffer
  */
@@ -3500,7 +3500,7 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 }
 
 /**
- * mpi_thermal_hw_event -The hw event has come.
+ * mpi_thermal_hw_event - a thermal hw event has come.
  * @pm8001_ha: our hba card information
  * @piomb: IO message buffer
  */
@@ -3530,7 +3530,7 @@ static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
 }
 
 /**
- * mpi_hw_event -The hw event has come.
+ * mpi_hw_event - The hw event has come.
  * @pm8001_ha: our hba card information
  * @piomb: IO message buffer
  */
@@ -4025,7 +4025,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_SET_DEV_INFO:
                pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEV_INFO\n");
                break;
-       /* spcv specifc commands */
+       /* spcv specific commands */
        case OPC_OUB_PHY_START_RESP:
                pm8001_dbg(pm8001_ha, MSG,
                           "OPC_OUB_PHY_START_RESP opcode:%x\n", opc);
@@ -4186,7 +4186,7 @@ static void build_smp_cmd(u32 deviceID, __le32 hTag,
 }
 
 /**
- * pm80xx_chip_smp_req - send a SMP task to FW
+ * pm80xx_chip_smp_req - send an SMP task to FW
  * @pm8001_ha: our hba card information.
  * @ccb: the ccb information this request used.
  */
@@ -4346,7 +4346,7 @@ static int check_enc_sat_cmd(struct sas_task *task)
 }
 
 /**
- * pm80xx_chip_ssp_io_req - send a SSP task to FW
+ * pm80xx_chip_ssp_io_req - send an SSP task to FW
  * @pm8001_ha: our hba card information.
  * @ccb: the ccb information this request used.
  */
@@ -4750,13 +4750,13 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
        payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
                        LINKMODE_AUTO | pm8001_ha->link_rate | phy_id);
        /* SSC Disable and SAS Analog ST configuration */
-       /**
+       /*
        payload.ase_sh_lm_slr_phyid =
                cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE |
                LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 |
                phy_id);
        Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need
-       **/
+       */
 
        payload.sas_identify.dev_type = SAS_END_DEVICE;
        payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
index 8f9727e..7456a26 100644 (file)
@@ -194,7 +194,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
  * @bufflen:   len of buffer
  * @sense:     optional sense buffer
  * @sshdr:     optional decoded sense header
- * @timeout:   request timeout in seconds
+ * @timeout:   request timeout in HZ
  * @retries:   number of times to retry request
  * @flags:     flags for ->cmd_flags
  * @rq_flags:  flags for ->rq_flags
index b07105a..d8b05d8 100644 (file)
@@ -439,39 +439,10 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
        struct device *dev = container_of(kobj, struct device, kobj);
        struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
        struct iscsi_transport *t = iface->transport;
-       int param;
-       int param_type;
+       int param = -1;
 
        if (attr == &dev_attr_iface_enabled.attr)
                param = ISCSI_NET_PARAM_IFACE_ENABLE;
-       else if (attr == &dev_attr_iface_vlan_id.attr)
-               param = ISCSI_NET_PARAM_VLAN_ID;
-       else if (attr == &dev_attr_iface_vlan_priority.attr)
-               param = ISCSI_NET_PARAM_VLAN_PRIORITY;
-       else if (attr == &dev_attr_iface_vlan_enabled.attr)
-               param = ISCSI_NET_PARAM_VLAN_ENABLED;
-       else if (attr == &dev_attr_iface_mtu.attr)
-               param = ISCSI_NET_PARAM_MTU;
-       else if (attr == &dev_attr_iface_port.attr)
-               param = ISCSI_NET_PARAM_PORT;
-       else if (attr == &dev_attr_iface_ipaddress_state.attr)
-               param = ISCSI_NET_PARAM_IPADDR_STATE;
-       else if (attr == &dev_attr_iface_delayed_ack_en.attr)
-               param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
-       else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
-               param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
-       else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
-               param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
-       else if (attr == &dev_attr_iface_tcp_wsf.attr)
-               param = ISCSI_NET_PARAM_TCP_WSF;
-       else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
-               param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
-       else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
-               param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
-       else if (attr == &dev_attr_iface_cache_id.attr)
-               param = ISCSI_NET_PARAM_CACHE_ID;
-       else if (attr == &dev_attr_iface_redirect_en.attr)
-               param = ISCSI_NET_PARAM_REDIRECT_EN;
        else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
                param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
        else if (attr == &dev_attr_iface_header_digest.attr)
@@ -508,6 +479,38 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
                param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN;
        else if (attr == &dev_attr_iface_initiator_name.attr)
                param = ISCSI_IFACE_PARAM_INITIATOR_NAME;
+
+       if (param != -1)
+               return t->attr_is_visible(ISCSI_IFACE_PARAM, param);
+
+       if (attr == &dev_attr_iface_vlan_id.attr)
+               param = ISCSI_NET_PARAM_VLAN_ID;
+       else if (attr == &dev_attr_iface_vlan_priority.attr)
+               param = ISCSI_NET_PARAM_VLAN_PRIORITY;
+       else if (attr == &dev_attr_iface_vlan_enabled.attr)
+               param = ISCSI_NET_PARAM_VLAN_ENABLED;
+       else if (attr == &dev_attr_iface_mtu.attr)
+               param = ISCSI_NET_PARAM_MTU;
+       else if (attr == &dev_attr_iface_port.attr)
+               param = ISCSI_NET_PARAM_PORT;
+       else if (attr == &dev_attr_iface_ipaddress_state.attr)
+               param = ISCSI_NET_PARAM_IPADDR_STATE;
+       else if (attr == &dev_attr_iface_delayed_ack_en.attr)
+               param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
+       else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
+               param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
+       else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
+               param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
+       else if (attr == &dev_attr_iface_tcp_wsf.attr)
+               param = ISCSI_NET_PARAM_TCP_WSF;
+       else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
+               param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
+       else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
+               param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
+       else if (attr == &dev_attr_iface_cache_id.attr)
+               param = ISCSI_NET_PARAM_CACHE_ID;
+       else if (attr == &dev_attr_iface_redirect_en.attr)
+               param = ISCSI_NET_PARAM_REDIRECT_EN;
        else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
                if (attr == &dev_attr_ipv4_iface_ipaddress.attr)
                        param = ISCSI_NET_PARAM_IPV4_ADDR;
@@ -598,32 +601,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
                return 0;
        }
 
-       switch (param) {
-       case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
-       case ISCSI_IFACE_PARAM_HDRDGST_EN:
-       case ISCSI_IFACE_PARAM_DATADGST_EN:
-       case ISCSI_IFACE_PARAM_IMM_DATA_EN:
-       case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
-       case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
-       case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
-       case ISCSI_IFACE_PARAM_ERL:
-       case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
-       case ISCSI_IFACE_PARAM_FIRST_BURST:
-       case ISCSI_IFACE_PARAM_MAX_R2T:
-       case ISCSI_IFACE_PARAM_MAX_BURST:
-       case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
-       case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
-       case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
-       case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
-       case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
-       case ISCSI_IFACE_PARAM_INITIATOR_NAME:
-               param_type = ISCSI_IFACE_PARAM;
-               break;
-       default:
-               param_type = ISCSI_NET_PARAM;
-       }
-
-       return t->attr_is_visible(param_type, param);
+       return t->attr_is_visible(ISCSI_NET_PARAM, param);
 }
 
 static struct attribute *iscsi_iface_attrs[] = {
index 6d2d636..b8d55af 100644 (file)
@@ -98,11 +98,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
 MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
 MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
 
-#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
 #define SD_MINORS      16
-#else
-#define SD_MINORS      0
-#endif
 
 static void sd_config_discard(struct scsi_disk *, unsigned int);
 static void sd_config_write_same(struct scsi_disk *);
index c98d540..194755c 100644 (file)
@@ -1229,8 +1229,13 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
 static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
                                        bool is_scsi_cmd)
 {
-       if (hba->vops && hba->vops->setup_xfer_req)
-               return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
+       if (hba->vops && hba->vops->setup_xfer_req) {
+               unsigned long flags;
+
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+       }
 }
 
 static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
index 654c717..714fa92 100644 (file)
@@ -71,6 +71,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
                .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
                .sram_pdn_bits = GENMASK(11, 8),
                .sram_pdn_ack_bits = 0,
+               .caps = MTK_SCPD_DOMAIN_SUPPLY,
        },
        [MT8173_POWER_DOMAIN_MFG_2D] = {
                .name = "mfg_2d",
index 579dfc8..9dee485 100644 (file)
 static const struct mtk_mmsys_routes mmsys_mt8183_routing_table[] = {
        {
                DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
-               MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L
+               MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L,
+               MT8183_OVL0_MOUT_EN_OVL0_2L
        }, {
                DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
-               MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0
+               MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0,
+               MT8183_OVL0_2L_MOUT_EN_DISP_PATH0
        }, {
                DDP_COMPONENT_OVL_2L1, DDP_COMPONENT_RDMA1,
-               MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1
+               MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1,
+               MT8183_OVL1_2L_MOUT_EN_RDMA1
        }, {
                DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
-               MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0
+               MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0,
+               MT8183_DITHER0_MOUT_IN_DSI0
        }, {
                DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
-               MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L
+               MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L,
+               MT8183_DISP_PATH0_SEL_IN_OVL0_2L
        }, {
                DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
-               MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1
+               MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1,
+               MT8183_DPI0_SEL_IN_RDMA1
        }, {
                DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
-               MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0
+               MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0,
+               MT8183_RDMA0_SOUT_COLOR0
        }
 };
 
diff --git a/drivers/soc/mediatek/mt8365-mmsys.h b/drivers/soc/mediatek/mt8365-mmsys.h
new file mode 100644 (file)
index 0000000..690e3fe
--- /dev/null
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8365_MMSYS_H
+#define __SOC_MEDIATEK_MT8365_MMSYS_H
+
+#define MT8365_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN       0xf3c
+#define MT8365_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL     0xf4c
+#define MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN    0xf50
+#define MT8365_DISP_REG_CONFIG_DISP_RDMA0_SEL_IN       0xf54
+#define MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN  0xf60
+#define MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN      0xf64
+#define MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN                0xf68
+
+#define MT8365_RDMA0_SOUT_COLOR0                       0x1
+#define MT8365_DITHER_MOUT_EN_DSI0                     0x1
+#define MT8365_DSI0_SEL_IN_DITHER                      0x1
+#define MT8365_RDMA0_SEL_IN_OVL0                       0x0
+#define MT8365_RDMA0_RSZ0_SEL_IN_RDMA0                 0x0
+#define MT8365_DISP_COLOR_SEL_IN_COLOR0                        0x0
+#define MT8365_OVL0_MOUT_PATH0_SEL                     BIT(0)
+
+static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = {
+       {
+               DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+               MT8365_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
+               MT8365_OVL0_MOUT_PATH0_SEL, MT8365_OVL0_MOUT_PATH0_SEL
+       },
+       {
+               DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+               MT8365_DISP_REG_CONFIG_DISP_RDMA0_SEL_IN,
+               MT8365_RDMA0_SEL_IN_OVL0, MT8365_RDMA0_SEL_IN_OVL0
+       },
+       {
+               DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
+               MT8365_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL,
+               MT8365_RDMA0_SOUT_COLOR0, MT8365_RDMA0_SOUT_COLOR0
+       },
+       {
+               DDP_COMPONENT_COLOR0, DDP_COMPONENT_CCORR,
+               MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
+               MT8365_DISP_COLOR_SEL_IN_COLOR0,MT8365_DISP_COLOR_SEL_IN_COLOR0
+       },
+       {
+               DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
+               MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN,
+               MT8365_DITHER_MOUT_EN_DSI0, MT8365_DITHER_MOUT_EN_DSI0
+       },
+       {
+               DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
+               MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN,
+               MT8365_DSI0_SEL_IN_DITHER, MT8365_DSI0_SEL_IN_DITHER
+       },
+       {
+               DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
+               MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN,
+               MT8365_RDMA0_RSZ0_SEL_IN_RDMA0, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0
+       },
+};
+
+#endif /* __SOC_MEDIATEK_MT8365_MMSYS_H */
index 080660e..a78e88f 100644 (file)
@@ -13,6 +13,7 @@
 #include "mtk-mmsys.h"
 #include "mt8167-mmsys.h"
 #include "mt8183-mmsys.h"
+#include "mt8365-mmsys.h"
 
 static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
        .clk_driver = "clk-mt2701-mm",
@@ -52,6 +53,12 @@ static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
        .num_routes = ARRAY_SIZE(mmsys_mt8183_routing_table),
 };
 
+static const struct mtk_mmsys_driver_data mt8365_mmsys_driver_data = {
+       .clk_driver = "clk-mt8365-mm",
+       .routes = mt8365_mmsys_routing_table,
+       .num_routes = ARRAY_SIZE(mt8365_mmsys_routing_table),
+};
+
 struct mtk_mmsys {
        void __iomem *regs;
        const struct mtk_mmsys_driver_data *data;
@@ -68,7 +75,9 @@ void mtk_mmsys_ddp_connect(struct device *dev,
 
        for (i = 0; i < mmsys->data->num_routes; i++)
                if (cur == routes[i].from_comp && next == routes[i].to_comp) {
-                       reg = readl_relaxed(mmsys->regs + routes[i].addr) | routes[i].val;
+                       reg = readl_relaxed(mmsys->regs + routes[i].addr);
+                       reg &= ~routes[i].mask;
+                       reg |= routes[i].val;
                        writel_relaxed(reg, mmsys->regs + routes[i].addr);
                }
 }
@@ -85,7 +94,8 @@ void mtk_mmsys_ddp_disconnect(struct device *dev,
 
        for (i = 0; i < mmsys->data->num_routes; i++)
                if (cur == routes[i].from_comp && next == routes[i].to_comp) {
-                       reg = readl_relaxed(mmsys->regs + routes[i].addr) & ~routes[i].val;
+                       reg = readl_relaxed(mmsys->regs + routes[i].addr);
+                       reg &= ~routes[i].mask;
                        writel_relaxed(reg, mmsys->regs + routes[i].addr);
                }
 }
@@ -157,6 +167,10 @@ static const struct of_device_id of_match_mtk_mmsys[] = {
                .compatible = "mediatek,mt8183-mmsys",
                .data = &mt8183_mmsys_driver_data,
        },
+       {
+               .compatible = "mediatek,mt8365-mmsys",
+               .data = &mt8365_mmsys_driver_data,
+       },
        { }
 };
 
index a760a34..9e2b81b 100644 (file)
 #define RDMA0_SOUT_DSI1                                0x1
 #define RDMA0_SOUT_DSI2                                0x4
 #define RDMA0_SOUT_DSI3                                0x5
+#define RDMA0_SOUT_MASK                                0x7
 #define RDMA1_SOUT_DPI0                                0x2
 #define RDMA1_SOUT_DPI1                                0x3
 #define RDMA1_SOUT_DSI1                                0x1
 #define RDMA1_SOUT_DSI2                                0x4
 #define RDMA1_SOUT_DSI3                                0x5
+#define RDMA1_SOUT_MASK                                0x7
 #define RDMA2_SOUT_DPI0                                0x2
 #define RDMA2_SOUT_DPI1                                0x3
 #define RDMA2_SOUT_DSI1                                0x1
 #define RDMA2_SOUT_DSI2                                0x4
 #define RDMA2_SOUT_DSI3                                0x5
+#define RDMA2_SOUT_MASK                                0x7
 #define DPI0_SEL_IN_RDMA1                      0x1
 #define DPI0_SEL_IN_RDMA2                      0x3
+#define DPI0_SEL_IN_MASK                       0x3
 #define DPI1_SEL_IN_RDMA1                      (0x1 << 8)
 #define DPI1_SEL_IN_RDMA2                      (0x3 << 8)
+#define DPI1_SEL_IN_MASK                       (0x3 << 8)
 #define DSI0_SEL_IN_RDMA1                      0x1
 #define DSI0_SEL_IN_RDMA2                      0x4
+#define DSI0_SEL_IN_MASK                       0x7
 #define DSI1_SEL_IN_RDMA1                      0x1
 #define DSI1_SEL_IN_RDMA2                      0x4
+#define DSI1_SEL_IN_MASK                       0x7
 #define DSI2_SEL_IN_RDMA1                      (0x1 << 16)
 #define DSI2_SEL_IN_RDMA2                      (0x4 << 16)
+#define DSI2_SEL_IN_MASK                       (0x7 << 16)
 #define DSI3_SEL_IN_RDMA1                      (0x1 << 16)
 #define DSI3_SEL_IN_RDMA2                      (0x4 << 16)
+#define DSI3_SEL_IN_MASK                       (0x7 << 16)
 #define COLOR1_SEL_IN_OVL1                     0x1
 
 #define OVL_MOUT_EN_RDMA                       0x1
 #define BLS_TO_DSI_RDMA1_TO_DPI1               0x8
 #define BLS_TO_DPI_RDMA1_TO_DSI                        0x2
+#define BLS_RDMA1_DSI_DPI_MASK                 0xf
 #define DSI_SEL_IN_BLS                         0x0
 #define DPI_SEL_IN_BLS                         0x0
+#define DPI_SEL_IN_MASK                                0x1
 #define DSI_SEL_IN_RDMA                                0x1
+#define DSI_SEL_IN_MASK                                0x1
 
 struct mtk_mmsys_routes {
        u32 from_comp;
        u32 to_comp;
        u32 addr;
+       u32 mask;
        u32 val;
 };
 
@@ -91,124 +104,168 @@ struct mtk_mmsys_driver_data {
 static const struct mtk_mmsys_routes mmsys_default_routing_table[] = {
        {
                DDP_COMPONENT_BLS, DDP_COMPONENT_DSI0,
-               DISP_REG_CONFIG_OUT_SEL, BLS_TO_DSI_RDMA1_TO_DPI1
+               DISP_REG_CONFIG_OUT_SEL, BLS_RDMA1_DSI_DPI_MASK,
+               BLS_TO_DSI_RDMA1_TO_DPI1
        }, {
                DDP_COMPONENT_BLS, DDP_COMPONENT_DSI0,
-               DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_BLS
+               DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_MASK,
+               DSI_SEL_IN_BLS
        }, {
                DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0,
-               DISP_REG_CONFIG_OUT_SEL, BLS_TO_DPI_RDMA1_TO_DSI
+               DISP_REG_CONFIG_OUT_SEL, BLS_RDMA1_DSI_DPI_MASK,
+               BLS_TO_DPI_RDMA1_TO_DSI
        }, {
                DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0,
-               DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_RDMA
+               DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_MASK,
+               DSI_SEL_IN_RDMA
        }, {
                DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0,
-               DISP_REG_CONFIG_DPI_SEL, DPI_SEL_IN_BLS
+               DISP_REG_CONFIG_DPI_SEL, DPI_SEL_IN_MASK,
+               DPI_SEL_IN_BLS
        }, {
                DDP_COMPONENT_GAMMA, DDP_COMPONENT_RDMA1,
-               DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN, GAMMA_MOUT_EN_RDMA1
+               DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN, GAMMA_MOUT_EN_RDMA1,
+               GAMMA_MOUT_EN_RDMA1
        }, {
                DDP_COMPONENT_OD0, DDP_COMPONENT_RDMA0,
-               DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD_MOUT_EN_RDMA0
+               DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD_MOUT_EN_RDMA0,
+               OD_MOUT_EN_RDMA0
        }, {
                DDP_COMPONENT_OD1, DDP_COMPONENT_RDMA1,
-               DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD1_MOUT_EN_RDMA1
+               DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD1_MOUT_EN_RDMA1,
+               OD1_MOUT_EN_RDMA1
        }, {
                DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
-               DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0
+               DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0,
+               OVL0_MOUT_EN_COLOR0
        }, {
                DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
-               DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0
+               DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0,
+               COLOR0_SEL_IN_OVL0
        }, {
                DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
-               DISP_REG_CONFIG_DISP_OVL_MOUT_EN, OVL_MOUT_EN_RDMA
+               DISP_REG_CONFIG_DISP_OVL_MOUT_EN, OVL_MOUT_EN_RDMA,
+               OVL_MOUT_EN_RDMA
        }, {
                DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
-               DISP_REG_CONFIG_DISP_OVL1_MOUT_EN, OVL1_MOUT_EN_COLOR1
+               DISP_REG_CONFIG_DISP_OVL1_MOUT_EN, OVL1_MOUT_EN_COLOR1,
+               OVL1_MOUT_EN_COLOR1
        }, {
                DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
-               DISP_REG_CONFIG_DISP_COLOR1_SEL_IN, COLOR1_SEL_IN_OVL1
+               DISP_REG_CONFIG_DISP_COLOR1_SEL_IN, COLOR1_SEL_IN_OVL1,
+               COLOR1_SEL_IN_OVL1
        }, {
                DDP_COMPONENT_RDMA0, DDP_COMPONENT_DPI0,
-               DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DPI0
+               DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
+               RDMA0_SOUT_DPI0
        }, {
                DDP_COMPONENT_RDMA0, DDP_COMPONENT_DPI1,
-               DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DPI1
+               DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
+               RDMA0_SOUT_DPI1
        }, {
                DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI1,
-               DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DSI1
+               DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
+               RDMA0_SOUT_DSI1
        }, {
                DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI2,
-               DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DSI2
+               DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
+               RDMA0_SOUT_DSI2
        }, {
                DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI3,
-               DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DSI3
+               DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
+               RDMA0_SOUT_DSI3
        }, {
                DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
-               DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DPI0
+               DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
+               RDMA1_SOUT_DPI0
        }, {
                DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
-               DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_RDMA1
+               DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_MASK,
+               DPI0_SEL_IN_RDMA1
        }, {
                DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI1,
-               DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DPI1
+               DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
+               RDMA1_SOUT_DPI1
        }, {
                DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI1,
-               DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_RDMA1
+               DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_MASK,
+               DPI1_SEL_IN_RDMA1
        }, {
                DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI0,
-               DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_RDMA1
+               DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_MASK,
+               DSI0_SEL_IN_RDMA1
        }, {
                DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI1,
-               DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DSI1
+               DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
+               RDMA1_SOUT_DSI1
        }, {
                DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI1,
-               DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_RDMA1
+               DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_MASK,
+               DSI1_SEL_IN_RDMA1
        }, {
                DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI2,
-               DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DSI2
+               DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
+               RDMA1_SOUT_DSI2
        }, {
                DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI2,
-               DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_RDMA1
+               DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_MASK,
+               DSI2_SEL_IN_RDMA1
        }, {
                DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI3,
-               DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DSI3
+               DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
+               RDMA1_SOUT_DSI3
        }, {
                DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI3,
-               DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_RDMA1
+               DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_MASK,
+               DSI3_SEL_IN_RDMA1
        }, {
                DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI0,
-               DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DPI0
+               DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
+               RDMA2_SOUT_DPI0
        }, {
                DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI0,
-               DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_RDMA2
+               DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_MASK,
+               DPI0_SEL_IN_RDMA2
        }, {
                DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI1,
-               DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DPI1
+               DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
+               RDMA2_SOUT_DPI1
        }, {
                DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI1,
-               DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_RDMA2
+               DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_MASK,
+               DPI1_SEL_IN_RDMA2
        }, {
                DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI0,
-               DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_RDMA2
+               DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_MASK,
+               DSI0_SEL_IN_RDMA2
        }, {
                DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI1,
-               DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DSI1
+               DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
+               RDMA2_SOUT_DSI1
        }, {
                DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI1,
-               DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_RDMA2
+               DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_MASK,
+               DSI1_SEL_IN_RDMA2
        }, {
                DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI2,
-               DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DSI2
+               DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
+               RDMA2_SOUT_DSI2
        }, {
                DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI2,
-               DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_RDMA2
+               DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_MASK,
+               DSI2_SEL_IN_RDMA2
        }, {
                DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI3,
-               DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DSI3
+               DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
+               RDMA2_SOUT_DSI3
        }, {
                DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI3,
-               DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_RDMA2
+               DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_MASK,
+               DSI3_SEL_IN_RDMA2
+       }, {
+               DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0,
+               DISP_REG_CONFIG_DISP_UFOE_MOUT_EN, UFOE_MOUT_EN_DSI0,
+               UFOE_MOUT_EN_DSI0
        }
 };
 
index 21a4e11..c5ac649 100644 (file)
@@ -60,7 +60,7 @@
 #define BUS_PROT_UPDATE_TOPAXI(_mask)                          \
                BUS_PROT_UPDATE(_mask,                          \
                                INFRA_TOPAXI_PROTECTEN,         \
-                               INFRA_TOPAXI_PROTECTEN_CLR,     \
+                               INFRA_TOPAXI_PROTECTEN,         \
                                INFRA_TOPAXI_PROTECTSTA1)
 
 struct scpsys_bus_prot_data {
index 71b44c3..07e0ecd 100644 (file)
@@ -208,6 +208,7 @@ config ARCH_R8A77951
        help
          This enables support for the Renesas R-Car H3 SoC (revisions 2.0 and
          later).
+         This includes different gradings like R-Car H3e-2G.
 
 config ARCH_R8A77965
        bool "ARM64 Platform support for R-Car M3-N"
@@ -229,6 +230,7 @@ config ARCH_R8A77961
        select SYSC_R8A77961
        help
          This enables support for the Renesas R-Car M3-W+ SoC.
+         This includes different gradings like R-Car M3e-2G.
 
 config ARCH_R8A77980
        bool "ARM64 Platform support for R-Car V3H"
index d464ffa..7410b9f 100644 (file)
@@ -404,19 +404,21 @@ static int __init r8a779a0_sysc_pd_init(void)
        for (i = 0; i < info->num_areas; i++) {
                const struct r8a779a0_sysc_area *area = &info->areas[i];
                struct r8a779a0_sysc_pd *pd;
+               size_t n;
 
                if (!area->name) {
                        /* Skip NULLified area */
                        continue;
                }
 
-               pd = kzalloc(sizeof(*pd) + strlen(area->name) + 1, GFP_KERNEL);
+               n = strlen(area->name) + 1;
+               pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL);
                if (!pd) {
                        error = -ENOMEM;
                        goto out_put;
                }
 
-               strcpy(pd->name, area->name);
+               memcpy(pd->name, area->name, n);
                pd->genpd.name = pd->name;
                pd->pdr = area->pdr;
                pd->flags = area->flags;
index 53387a7..b0a80de 100644 (file)
@@ -396,19 +396,21 @@ static int __init rcar_sysc_pd_init(void)
        for (i = 0; i < info->num_areas; i++) {
                const struct rcar_sysc_area *area = &info->areas[i];
                struct rcar_sysc_pd *pd;
+               size_t n;
 
                if (!area->name) {
                        /* Skip NULLified area */
                        continue;
                }
 
-               pd = kzalloc(sizeof(*pd) + strlen(area->name) + 1, GFP_KERNEL);
+               n = strlen(area->name) + 1;
+               pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL);
                if (!pd) {
                        error = -ENOMEM;
                        goto out_put;
                }
 
-               strcpy(pd->name, area->name);
+               memcpy(pd->name, area->name, n);
                pd->genpd.name = pd->name;
                pd->ch.chan_offs = area->chan_offs;
                pd->ch.chan_bit = area->chan_bit;
index 8310fce..dab9f5a 100644 (file)
@@ -284,11 +284,15 @@ static const struct of_device_id renesas_socs[] __initconst = {
 #if defined(CONFIG_ARCH_R8A77950) || defined(CONFIG_ARCH_R8A77951)
        { .compatible = "renesas,r8a7795",      .data = &soc_rcar_h3 },
 #endif
+#ifdef CONFIG_ARCH_R8A77951
+       { .compatible = "renesas,r8a779m1",     .data = &soc_rcar_h3 },
+#endif
 #ifdef CONFIG_ARCH_R8A77960
        { .compatible = "renesas,r8a7796",      .data = &soc_rcar_m3_w },
 #endif
 #ifdef CONFIG_ARCH_R8A77961
        { .compatible = "renesas,r8a77961",     .data = &soc_rcar_m3_w },
+       { .compatible = "renesas,r8a779m3",     .data = &soc_rcar_m3_w },
 #endif
 #ifdef CONFIG_ARCH_R8A77965
        { .compatible = "renesas,r8a77965",     .data = &soc_rcar_m3_n },
index 3d9da3d..f215181 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/sys_soc.h>
 
@@ -210,6 +211,8 @@ static int tegra_fuse_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, fuse);
        fuse->dev = &pdev->dev;
 
+       pm_runtime_enable(&pdev->dev);
+
        if (fuse->soc->probe) {
                err = fuse->soc->probe(fuse);
                if (err < 0)
@@ -246,14 +249,71 @@ static int tegra_fuse_probe(struct platform_device *pdev)
        return 0;
 
 restore:
+       fuse->clk = NULL;
        fuse->base = base;
+       pm_runtime_disable(&pdev->dev);
        return err;
 }
 
+static int __maybe_unused tegra_fuse_runtime_resume(struct device *dev)
+{
+       int err;
+
+       err = clk_prepare_enable(fuse->clk);
+       if (err < 0) {
+               dev_err(dev, "failed to enable FUSE clock: %d\n", err);
+               return err;
+       }
+
+       return 0;
+}
+
+static int __maybe_unused tegra_fuse_runtime_suspend(struct device *dev)
+{
+       clk_disable_unprepare(fuse->clk);
+
+       return 0;
+}
+
+static int __maybe_unused tegra_fuse_suspend(struct device *dev)
+{
+       int ret;
+
+       /*
+        * Critical for RAM re-repair operation, which must occur on resume
+        * from LP1 system suspend and as part of CCPLEX cluster switching.
+        */
+       if (fuse->soc->clk_suspend_on)
+               ret = pm_runtime_resume_and_get(dev);
+       else
+               ret = pm_runtime_force_suspend(dev);
+
+       return ret;
+}
+
+static int __maybe_unused tegra_fuse_resume(struct device *dev)
+{
+       int ret = 0;
+
+       if (fuse->soc->clk_suspend_on)
+               pm_runtime_put(dev);
+       else
+               ret = pm_runtime_force_resume(dev);
+
+       return ret;
+}
+
+static const struct dev_pm_ops tegra_fuse_pm = {
+       SET_RUNTIME_PM_OPS(tegra_fuse_runtime_suspend, tegra_fuse_runtime_resume,
+                          NULL)
+       SET_SYSTEM_SLEEP_PM_OPS(tegra_fuse_suspend, tegra_fuse_resume)
+};
+
 static struct platform_driver tegra_fuse_driver = {
        .driver = {
                .name = "tegra-fuse",
                .of_match_table = tegra_fuse_match,
+               .pm = &tegra_fuse_pm,
                .suppress_bind_attrs = true,
        },
        .probe = tegra_fuse_probe,
index 16aaa28..8ec9fc5 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/kobject.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/random.h>
 
 #include <soc/tegra/fuse.h>
@@ -46,6 +47,10 @@ static u32 tegra20_fuse_read(struct tegra_fuse *fuse, unsigned int offset)
        u32 value = 0;
        int err;
 
+       err = pm_runtime_resume_and_get(fuse->dev);
+       if (err)
+               return err;
+
        mutex_lock(&fuse->apbdma.lock);
 
        fuse->apbdma.config.src_addr = fuse->phys + FUSE_BEGIN + offset;
@@ -66,8 +71,6 @@ static u32 tegra20_fuse_read(struct tegra_fuse *fuse, unsigned int offset)
 
        reinit_completion(&fuse->apbdma.wait);
 
-       clk_prepare_enable(fuse->clk);
-
        dmaengine_submit(dma_desc);
        dma_async_issue_pending(fuse->apbdma.chan);
        time_left = wait_for_completion_timeout(&fuse->apbdma.wait,
@@ -78,10 +81,9 @@ static u32 tegra20_fuse_read(struct tegra_fuse *fuse, unsigned int offset)
        else
                value = *fuse->apbdma.virt;
 
-       clk_disable_unprepare(fuse->clk);
-
 out:
        mutex_unlock(&fuse->apbdma.lock);
+       pm_runtime_put(fuse->dev);
        return value;
 }
 
@@ -165,4 +167,5 @@ const struct tegra_fuse_soc tegra20_fuse_soc = {
        .probe = tegra20_fuse_probe,
        .info = &tegra20_fuse_info,
        .soc_attr_group = &tegra_soc_attr_group,
+       .clk_suspend_on = false,
 };
index c1aa781..b071d43 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/of_device.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/random.h>
 
 #include <soc/tegra/fuse.h>
@@ -52,15 +53,13 @@ static u32 tegra30_fuse_read(struct tegra_fuse *fuse, unsigned int offset)
        u32 value;
        int err;
 
-       err = clk_prepare_enable(fuse->clk);
-       if (err < 0) {
-               dev_err(fuse->dev, "failed to enable FUSE clock: %d\n", err);
+       err = pm_runtime_resume_and_get(fuse->dev);
+       if (err)
                return 0;
-       }
 
        value = readl_relaxed(fuse->base + FUSE_BEGIN + offset);
 
-       clk_disable_unprepare(fuse->clk);
+       pm_runtime_put(fuse->dev);
 
        return value;
 }
@@ -113,6 +112,7 @@ const struct tegra_fuse_soc tegra30_fuse_soc = {
        .speedo_init = tegra30_init_speedo_data,
        .info = &tegra30_fuse_info,
        .soc_attr_group = &tegra_soc_attr_group,
+       .clk_suspend_on = false,
 };
 #endif
 
@@ -128,6 +128,7 @@ const struct tegra_fuse_soc tegra114_fuse_soc = {
        .speedo_init = tegra114_init_speedo_data,
        .info = &tegra114_fuse_info,
        .soc_attr_group = &tegra_soc_attr_group,
+       .clk_suspend_on = false,
 };
 #endif
 
@@ -209,6 +210,7 @@ const struct tegra_fuse_soc tegra124_fuse_soc = {
        .lookups = tegra124_fuse_lookups,
        .num_lookups = ARRAY_SIZE(tegra124_fuse_lookups),
        .soc_attr_group = &tegra_soc_attr_group,
+       .clk_suspend_on = true,
 };
 #endif
 
@@ -295,6 +297,7 @@ const struct tegra_fuse_soc tegra210_fuse_soc = {
        .lookups = tegra210_fuse_lookups,
        .num_lookups = ARRAY_SIZE(tegra210_fuse_lookups),
        .soc_attr_group = &tegra_soc_attr_group,
+       .clk_suspend_on = false,
 };
 #endif
 
@@ -325,6 +328,7 @@ const struct tegra_fuse_soc tegra186_fuse_soc = {
        .lookups = tegra186_fuse_lookups,
        .num_lookups = ARRAY_SIZE(tegra186_fuse_lookups),
        .soc_attr_group = &tegra_soc_attr_group,
+       .clk_suspend_on = false,
 };
 #endif
 
@@ -355,6 +359,7 @@ const struct tegra_fuse_soc tegra194_fuse_soc = {
        .lookups = tegra194_fuse_lookups,
        .num_lookups = ARRAY_SIZE(tegra194_fuse_lookups),
        .soc_attr_group = &tegra194_soc_attr_group,
+       .clk_suspend_on = false,
 };
 #endif
 
@@ -385,5 +390,6 @@ const struct tegra_fuse_soc tegra234_fuse_soc = {
        .lookups = tegra234_fuse_lookups,
        .num_lookups = ARRAY_SIZE(tegra234_fuse_lookups),
        .soc_attr_group = &tegra194_soc_attr_group,
+       .clk_suspend_on = false,
 };
 #endif
index e057a58..de58feb 100644 (file)
@@ -34,6 +34,8 @@ struct tegra_fuse_soc {
        unsigned int num_lookups;
 
        const struct attribute_group *soc_attr_group;
+
+       bool clk_suspend_on;
 };
 
 struct tegra_fuse {
index ea62f84..50091c4 100644 (file)
@@ -436,7 +436,7 @@ struct tegra_pmc {
 
 static struct tegra_pmc *pmc = &(struct tegra_pmc) {
        .base = NULL,
-       .suspend_mode = TEGRA_SUSPEND_NONE,
+       .suspend_mode = TEGRA_SUSPEND_NOT_READY,
 };
 
 static inline struct tegra_powergate *
@@ -1812,6 +1812,7 @@ static int tegra_pmc_parse_dt(struct tegra_pmc *pmc, struct device_node *np)
        u32 value, values[2];
 
        if (of_property_read_u32(np, "nvidia,suspend-mode", &value)) {
+               pmc->suspend_mode = TEGRA_SUSPEND_NONE;
        } else {
                switch (value) {
                case 0:
@@ -2785,6 +2786,11 @@ static int tegra_pmc_regmap_init(struct tegra_pmc *pmc)
        return 0;
 }
 
+static void tegra_pmc_reset_suspend_mode(void *data)
+{
+       pmc->suspend_mode = TEGRA_SUSPEND_NOT_READY;
+}
+
 static int tegra_pmc_probe(struct platform_device *pdev)
 {
        void __iomem *base;
@@ -2803,6 +2809,11 @@ static int tegra_pmc_probe(struct platform_device *pdev)
        if (err < 0)
                return err;
 
+       err = devm_add_action_or_reset(&pdev->dev, tegra_pmc_reset_suspend_mode,
+                                      NULL);
+       if (err)
+               return err;
+
        /* take over the memory region from the early initialization */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        base = devm_ioremap_resource(&pdev->dev, res);
@@ -2909,6 +2920,7 @@ static int tegra_pmc_probe(struct platform_device *pdev)
 
        tegra_pmc_clock_register(pmc, pdev->dev.of_node);
        platform_set_drvdata(pdev, pmc);
+       tegra_pm_init_suspend();
 
        return 0;
 
index 06c792b..8eaf50d 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/platform_device.h>
 #include <linux/pm_domain.h>
 #include <linux/slab.h>
-#include <linux/version.h>
 
 #include <soc/tegra/bpmp.h>
 #include <soc/tegra/bpmp-abi.h>
index f22ac1e..49da387 100644 (file)
@@ -338,6 +338,7 @@ static const struct of_device_id pruss_of_match[] = {
        { .compatible = "ti,k2g-pruss" },
        { .compatible = "ti,am654-icssg", .data = &am65x_j721e_pruss_data, },
        { .compatible = "ti,j721e-icssg", .data = &am65x_j721e_pruss_data, },
+       { .compatible = "ti,am642-icssg", .data = &am65x_j721e_pruss_data, },
        {},
 };
 MODULE_DEVICE_TABLE(of, pruss_of_match);
index 06cbee5..b5b2fa5 100644 (file)
@@ -126,23 +126,13 @@ static irqreturn_t sr_interrupt(int irq, void *data)
 
 static void sr_set_clk_length(struct omap_sr *sr)
 {
-       struct clk *fck;
        u32 fclk_speed;
 
        /* Try interconnect target module fck first if it already exists */
-       fck = clk_get(sr->pdev->dev.parent, "fck");
-       if (IS_ERR(fck)) {
-               fck = clk_get(&sr->pdev->dev, "fck");
-               if (IS_ERR(fck)) {
-                       dev_err(&sr->pdev->dev,
-                               "%s: unable to get fck for device %s\n",
-                               __func__, dev_name(&sr->pdev->dev));
-                       return;
-               }
-       }
+       if (IS_ERR(sr->fck))
+               return;
 
-       fclk_speed = clk_get_rate(fck);
-       clk_put(fck);
+       fclk_speed = clk_get_rate(sr->fck);
 
        switch (fclk_speed) {
        case 12000000:
@@ -587,21 +577,25 @@ int sr_enable(struct omap_sr *sr, unsigned long volt)
        /* errminlimit is opp dependent and hence linked to voltage */
        sr->err_minlimit = nvalue_row->errminlimit;
 
-       pm_runtime_get_sync(&sr->pdev->dev);
+       clk_enable(sr->fck);
 
        /* Check if SR is already enabled. If yes do nothing */
        if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE)
-               return 0;
+               goto out_enabled;
 
        /* Configure SR */
        ret = sr_class->configure(sr);
        if (ret)
-               return ret;
+               goto out_enabled;
 
        sr_write_reg(sr, NVALUERECIPROCAL, nvalue_row->nvalue);
 
        /* SRCONFIG - enable SR */
        sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, SRCONFIG_SRENABLE);
+
+out_enabled:
+       sr->enabled = 1;
+
        return 0;
 }
 
@@ -621,7 +615,7 @@ void sr_disable(struct omap_sr *sr)
        }
 
        /* Check if SR clocks are already disabled. If yes do nothing */
-       if (pm_runtime_suspended(&sr->pdev->dev))
+       if (!sr->enabled)
                return;
 
        /*
@@ -642,7 +636,8 @@ void sr_disable(struct omap_sr *sr)
                }
        }
 
-       pm_runtime_put_sync_suspend(&sr->pdev->dev);
+       clk_disable(sr->fck);
+       sr->enabled = 0;
 }
 
 /**
@@ -851,8 +846,12 @@ static int omap_sr_probe(struct platform_device *pdev)
 
        irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 
+       sr_info->fck = devm_clk_get(pdev->dev.parent, "fck");
+       if (IS_ERR(sr_info->fck))
+               return PTR_ERR(sr_info->fck);
+       clk_prepare(sr_info->fck);
+
        pm_runtime_enable(&pdev->dev);
-       pm_runtime_irq_safe(&pdev->dev);
 
        snprintf(sr_info->name, SMARTREFLEX_NAME_LEN, "%s", pdata->name);
 
@@ -878,12 +877,6 @@ static int omap_sr_probe(struct platform_device *pdev)
 
        list_add(&sr_info->node, &sr_list);
 
-       ret = pm_runtime_get_sync(&pdev->dev);
-       if (ret < 0) {
-               pm_runtime_put_noidle(&pdev->dev);
-               goto err_list_del;
-       }
-
        /*
         * Call into late init to do initializations that require
         * both sr driver and sr class driver to be initiallized.
@@ -933,16 +926,13 @@ static int omap_sr_probe(struct platform_device *pdev)
 
        }
 
-       pm_runtime_put_sync(&pdev->dev);
-
        return ret;
 
 err_debugfs:
        debugfs_remove_recursive(sr_info->dbg_dir);
 err_list_del:
        list_del(&sr_info->node);
-
-       pm_runtime_put_sync(&pdev->dev);
+       clk_unprepare(sr_info->fck);
 
        return ret;
 }
@@ -950,6 +940,7 @@ err_list_del:
 static int omap_sr_remove(struct platform_device *pdev)
 {
        struct omap_sr_data *pdata = pdev->dev.platform_data;
+       struct device *dev = &pdev->dev;
        struct omap_sr *sr_info;
 
        if (!pdata) {
@@ -968,7 +959,8 @@ static int omap_sr_remove(struct platform_device *pdev)
                sr_stop_vddautocomp(sr_info);
        debugfs_remove_recursive(sr_info->dbg_dir);
 
-       pm_runtime_disable(&pdev->dev);
+       pm_runtime_disable(dev);
+       clk_unprepare(sr_info->fck);
        list_del(&sr_info->node);
        return 0;
 }
index 2ef7488..788dcdf 100644 (file)
@@ -352,8 +352,6 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
                }
 
                mr = spi_readl(as, MR);
-               if (spi->cs_gpiod)
-                       gpiod_set_value(spi->cs_gpiod, 1);
        } else {
                u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
                int i;
@@ -369,8 +367,6 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
 
                mr = spi_readl(as, MR);
                mr = SPI_BFINS(PCS, ~(1 << chip_select), mr);
-               if (spi->cs_gpiod)
-                       gpiod_set_value(spi->cs_gpiod, 1);
                spi_writel(as, MR, mr);
        }
 
@@ -400,8 +396,6 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
 
        if (!spi->cs_gpiod)
                spi_writel(as, CR, SPI_BIT(LASTXFER));
-       else
-               gpiod_set_value(spi->cs_gpiod, 0);
 }
 
 static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
@@ -1483,7 +1477,8 @@ static int atmel_spi_probe(struct platform_device *pdev)
        master->bus_num = pdev->id;
        master->num_chipselect = 4;
        master->setup = atmel_spi_setup;
-       master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
+       master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX |
+                       SPI_MASTER_GPIO_SS);
        master->transfer_one = atmel_spi_one_transfer;
        master->set_cs = atmel_spi_set_cs;
        master->cleanup = atmel_spi_cleanup;
index 5f8771f..775c0bf 100644 (file)
@@ -83,6 +83,7 @@ MODULE_PARM_DESC(polling_limit_us,
  * struct bcm2835_spi - BCM2835 SPI controller
  * @regs: base address of register map
  * @clk: core clock, divided to calculate serial clock
+ * @clk_hz: core clock cached speed
  * @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full
  * @tfr: SPI transfer currently processed
  * @ctlr: SPI controller reverse lookup
@@ -116,6 +117,7 @@ MODULE_PARM_DESC(polling_limit_us,
 struct bcm2835_spi {
        void __iomem *regs;
        struct clk *clk;
+       unsigned long clk_hz;
        int irq;
        struct spi_transfer *tfr;
        struct spi_controller *ctlr;
@@ -1045,19 +1047,18 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
 {
        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
        struct bcm2835_spidev *slv = spi_get_ctldata(spi);
-       unsigned long spi_hz, clk_hz, cdiv;
+       unsigned long spi_hz, cdiv;
        unsigned long hz_per_byte, byte_limit;
        u32 cs = slv->prepare_cs;
 
        /* set clock */
        spi_hz = tfr->speed_hz;
-       clk_hz = clk_get_rate(bs->clk);
 
-       if (spi_hz >= clk_hz / 2) {
+       if (spi_hz >= bs->clk_hz / 2) {
                cdiv = 2; /* clk_hz/2 is the fastest we can go */
        } else if (spi_hz) {
                /* CDIV must be a multiple of two */
-               cdiv = DIV_ROUND_UP(clk_hz, spi_hz);
+               cdiv = DIV_ROUND_UP(bs->clk_hz, spi_hz);
                cdiv += (cdiv % 2);
 
                if (cdiv >= 65536)
@@ -1065,7 +1066,7 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
        } else {
                cdiv = 0; /* 0 is the slowest we can go */
        }
-       tfr->effective_speed_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536);
+       tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536);
        bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
 
        /* handle all the 3-wire mode */
@@ -1354,6 +1355,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
                return bs->irq ? bs->irq : -ENODEV;
 
        clk_prepare_enable(bs->clk);
+       bs->clk_hz = clk_get_rate(bs->clk);
 
        err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
        if (err)
index 7a00346..a2de235 100644 (file)
@@ -309,6 +309,9 @@ static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr)
 {
        unsigned int dummy_clk;
 
+       if (!op->dummy.nbytes)
+               return 0;
+
        dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
        if (dtr)
                dummy_clk /= 2;
@@ -797,19 +800,20 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
        reg = cqspi_calc_rdreg(f_pdata);
        writel(reg, reg_base + CQSPI_REG_RD_INSTR);
 
-       if (f_pdata->dtr) {
-               /*
-                * Some flashes like the cypress Semper flash expect a 4-byte
-                * dummy address with the Read SR command in DTR mode, but this
-                * controller does not support sending address with the Read SR
-                * command. So, disable write completion polling on the
-                * controller's side. spi-nor will take care of polling the
-                * status register.
-                */
-               reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
-               reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
-               writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
-       }
+       /*
+        * SPI NAND flashes require the address of the status register to be
+        * passed in the Read SR command. Also, some SPI NOR flashes like the
+        * cypress Semper flash expect a 4-byte dummy address in the Read SR
+        * command in DTR mode.
+        *
+        * But this controller does not support address phase in the Read SR
+        * command when doing auto-HW polling. So, disable write completion
+        * polling on the controller's side. spinand and spi-nor will take
+        * care of polling the status register.
+        */
+       reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+       reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
+       writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
 
        reg = readl(reg_base + CQSPI_REG_SIZE);
        reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
index a3afd1b..ceb16e7 100644 (file)
@@ -517,6 +517,12 @@ static int cdns_spi_probe(struct platform_device *pdev)
                goto clk_dis_apb;
        }
 
+       pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+       pm_runtime_get_noresume(&pdev->dev);
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+
        ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
        if (ret < 0)
                master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
@@ -531,11 +537,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
        /* SPI controller initializations */
        cdns_spi_init_hw(xspi);
 
-       pm_runtime_set_active(&pdev->dev);
-       pm_runtime_enable(&pdev->dev);
-       pm_runtime_use_autosuspend(&pdev->dev);
-       pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
-
        irq = platform_get_irq(pdev, 0);
        if (irq <= 0) {
                ret = -ENXIO;
@@ -566,6 +567,9 @@ static int cdns_spi_probe(struct platform_device *pdev)
 
        master->bits_per_word_mask = SPI_BPW_MASK(8);
 
+       pm_runtime_mark_last_busy(&pdev->dev);
+       pm_runtime_put_autosuspend(&pdev->dev);
+
        ret = spi_register_master(master);
        if (ret) {
                dev_err(&pdev->dev, "spi_register_master failed\n");
index 39dc02e..63a8d7b 100644 (file)
@@ -77,6 +77,11 @@ struct spi_imx_devtype_data {
        bool has_slavemode;
        unsigned int fifo_size;
        bool dynamic_burst;
+       /*
+        * ERR009165 fixed or not:
+        * https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
+        */
+       bool tx_glitch_fixed;
        enum spi_imx_devtype devtype;
 };
 
@@ -506,7 +511,7 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
 {
        struct spi_device *spi = msg->spi;
        u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
-       u32 testreg;
+       u32 testreg, delay;
        u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
 
        /* set Master or Slave mode */
@@ -567,6 +572,23 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
 
        writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
 
+       /*
+        * Wait until the changes in the configuration register CONFIGREG
+        * propagate into the hardware. It takes exactly one tick of the
+        * SCLK clock, but we will wait two SCLK clock just to be sure. The
+        * effect of the delay it takes for the hardware to apply changes
+        * is noticable if the SCLK clock run very slow. In such a case, if
+        * the polarity of SCLK should be inverted, the GPIO ChipSelect might
+        * be asserted before the SCLK polarity changes, which would disrupt
+        * the SPI communication as the device on the other end would consider
+        * the change of SCLK polarity as a clock tick already.
+        */
+       delay = (2 * 1000000) / spi_imx->spi_bus_clk;
+       if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
+               udelay(delay);
+       else                    /* SCLK is _very_ slow */
+               usleep_range(delay, delay + 10);
+
        return 0;
 }
 
@@ -574,7 +596,7 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
                                       struct spi_device *spi)
 {
        u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
-       u32 clk, delay;
+       u32 clk;
 
        /* Clear BL field and set the right value */
        ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
@@ -591,39 +613,32 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
        ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk);
        spi_imx->spi_bus_clk = clk;
 
-       if (spi_imx->usedma)
+       /*
+        * ERR009165: work in XHC mode instead of SMC as PIO on the chips
+        * before i.mx6ul.
+        */
+       if (spi_imx->usedma && spi_imx->devtype_data->tx_glitch_fixed)
                ctrl |= MX51_ECSPI_CTRL_SMC;
+       else
+               ctrl &= ~MX51_ECSPI_CTRL_SMC;
 
        writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
 
-       /*
-        * Wait until the changes in the configuration register CONFIGREG
-        * propagate into the hardware. It takes exactly one tick of the
-        * SCLK clock, but we will wait two SCLK clock just to be sure. The
-        * effect of the delay it takes for the hardware to apply changes
-        * is noticable if the SCLK clock run very slow. In such a case, if
-        * the polarity of SCLK should be inverted, the GPIO ChipSelect might
-        * be asserted before the SCLK polarity changes, which would disrupt
-        * the SPI communication as the device on the other end would consider
-        * the change of SCLK polarity as a clock tick already.
-        */
-       delay = (2 * 1000000) / clk;
-       if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
-               udelay(delay);
-       else                    /* SCLK is _very_ slow */
-               usleep_range(delay, delay + 10);
-
        return 0;
 }
 
 static void mx51_setup_wml(struct spi_imx_data *spi_imx)
 {
+       u32 tx_wml = 0;
+
+       if (spi_imx->devtype_data->tx_glitch_fixed)
+               tx_wml = spi_imx->wml;
        /*
         * Configure the DMA register: setup the watermark
         * and enable DMA request.
         */
        writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
-               MX51_ECSPI_DMA_TX_WML(spi_imx->wml) |
+               MX51_ECSPI_DMA_TX_WML(tx_wml) |
                MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
                MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
                MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
@@ -1014,6 +1029,23 @@ static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
        .devtype = IMX53_ECSPI,
 };
 
+static struct spi_imx_devtype_data imx6ul_ecspi_devtype_data = {
+       .intctrl = mx51_ecspi_intctrl,
+       .prepare_message = mx51_ecspi_prepare_message,
+       .prepare_transfer = mx51_ecspi_prepare_transfer,
+       .trigger = mx51_ecspi_trigger,
+       .rx_available = mx51_ecspi_rx_available,
+       .reset = mx51_ecspi_reset,
+       .setup_wml = mx51_setup_wml,
+       .fifo_size = 64,
+       .has_dmamode = true,
+       .dynamic_burst = true,
+       .has_slavemode = true,
+       .tx_glitch_fixed = true,
+       .disable = mx51_ecspi_disable,
+       .devtype = IMX51_ECSPI,
+};
+
 static const struct of_device_id spi_imx_dt_ids[] = {
        { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
        { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
@@ -1022,6 +1054,7 @@ static const struct of_device_id spi_imx_dt_ids[] = {
        { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
        { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
        { .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, },
+       { .compatible = "fsl,imx6ul-ecspi", .data = &imx6ul_ecspi_devtype_data, },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
@@ -1239,10 +1272,6 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
 {
        int ret;
 
-       /* use pio mode for i.mx6dl chip TKT238285 */
-       if (of_machine_is_compatible("fsl,imx6dl"))
-               return 0;
-
        spi_imx->wml = spi_imx->devtype_data->fifo_size / 2;
 
        /* Prepare for TX DMA: */
index 976f73b..68dca8c 100644 (file)
@@ -427,13 +427,23 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
        mtk_spi_setup_packet(master);
 
        cnt = xfer->len / 4;
-       iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
+       if (xfer->tx_buf)
+               iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
+
+       if (xfer->rx_buf)
+               ioread32_rep(mdata->base + SPI_RX_DATA_REG, xfer->rx_buf, cnt);
 
        remainder = xfer->len % 4;
        if (remainder > 0) {
                reg_val = 0;
-               memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
-               writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+               if (xfer->tx_buf) {
+                       memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
+                       writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+               }
+               if (xfer->rx_buf) {
+                       reg_val = readl(mdata->base + SPI_RX_DATA_REG);
+                       memcpy(xfer->rx_buf + (cnt * 4), &reg_val, remainder);
+               }
        }
 
        mtk_spi_enable_transfer(master);
@@ -793,12 +803,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
 
        pm_runtime_enable(&pdev->dev);
 
-       ret = devm_spi_register_master(&pdev->dev, master);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
-               goto err_disable_runtime_pm;
-       }
-
        if (mdata->dev_comp->need_pad_sel) {
                if (mdata->pad_num != master->num_chipselect) {
                        dev_err(&pdev->dev,
@@ -838,6 +842,12 @@ static int mtk_spi_probe(struct platform_device *pdev)
                dev_notice(&pdev->dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
                           addr_bits, ret);
 
+       ret = devm_spi_register_master(&pdev->dev, master);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
+               goto err_disable_runtime_pm;
+       }
+
        return 0;
 
 err_disable_runtime_pm:
index 8ffcffb..05618a6 100644 (file)
@@ -884,15 +884,18 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
        ier = readl_relaxed(spi->base + STM32H7_SPI_IER);
 
        mask = ier;
-       /* EOTIE is triggered on EOT, SUSP and TXC events. */
+       /*
+        * EOTIE enables irq from EOT, SUSP and TXC events. We need to set
+        * SUSP to acknowledge it later. TXC is automatically cleared
+        */
+
        mask |= STM32H7_SPI_SR_SUSP;
        /*
-        * When TXTF is set, DXPIE and TXPIE are cleared. So in case of
-        * Full-Duplex, need to poll RXP event to know if there are remaining
-        * data, before disabling SPI.
+        * DXPIE is set in Full-Duplex, one IT will be raised if TXP and RXP
+        * are set. So in case of Full-Duplex, need to poll TXP and RXP event.
         */
-       if (spi->rx_buf && !spi->cur_usedma)
-               mask |= STM32H7_SPI_SR_RXP;
+       if ((spi->cur_comm == SPI_FULL_DUPLEX) && !spi->cur_usedma)
+               mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP;
 
        if (!(sr & mask)) {
                dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
@@ -1925,6 +1928,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
                master->can_dma = stm32_spi_can_dma;
 
        pm_runtime_set_active(&pdev->dev);
+       pm_runtime_get_noresume(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
 
        ret = spi_register_master(master);
@@ -1940,6 +1944,8 @@ static int stm32_spi_probe(struct platform_device *pdev)
 
 err_pm_disable:
        pm_runtime_disable(&pdev->dev);
+       pm_runtime_put_noidle(&pdev->dev);
+       pm_runtime_set_suspended(&pdev->dev);
 err_dma_release:
        if (spi->dma_tx)
                dma_release_channel(spi->dma_tx);
@@ -1956,9 +1962,14 @@ static int stm32_spi_remove(struct platform_device *pdev)
        struct spi_master *master = platform_get_drvdata(pdev);
        struct stm32_spi *spi = spi_master_get_devdata(master);
 
+       pm_runtime_get_sync(&pdev->dev);
+
        spi_unregister_master(master);
        spi->cfg->disable(spi);
 
+       pm_runtime_disable(&pdev->dev);
+       pm_runtime_put_noidle(&pdev->dev);
+       pm_runtime_set_suspended(&pdev->dev);
        if (master->dma_tx)
                dma_release_channel(master->dma_tx);
        if (master->dma_rx)
@@ -1966,7 +1977,6 @@ static int stm32_spi_remove(struct platform_device *pdev)
 
        clk_disable_unprepare(spi->clk);
 
-       pm_runtime_disable(&pdev->dev);
 
        pinctrl_pm_select_sleep_state(&pdev->dev);
 
index b32f4ee..ca1b231 100644 (file)
@@ -25,7 +25,7 @@
 #include "target_core_alua.h"
 
 static sense_reason_t
-sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
+sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool);
 static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
 
 static sense_reason_t
@@ -279,14 +279,14 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
 }
 
 static sense_reason_t
-sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
+sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops)
 {
        struct se_device *dev = cmd->se_dev;
        sector_t end_lba = dev->transport->get_blocks(dev) + 1;
        unsigned int sectors = sbc_get_write_same_sectors(cmd);
        sense_reason_t ret;
 
-       if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
+       if ((flags & 0x04) || (flags & 0x02)) {
                pr_err("WRITE_SAME PBDATA and LBDATA"
                        " bits not supported for Block Discard"
                        " Emulation\n");
@@ -308,7 +308,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
        }
 
        /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
-       if (flags[0] & 0x10) {
+       if (flags & 0x10) {
                pr_warn("WRITE SAME with ANCHOR not supported\n");
                return TCM_INVALID_CDB_FIELD;
        }
@@ -316,7 +316,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
         * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
         * translated into block discard requests within backend code.
         */
-       if (flags[0] & 0x08) {
+       if (flags & 0x08) {
                if (!ops->execute_unmap)
                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 
@@ -331,7 +331,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
        if (!ops->execute_write_same)
                return TCM_UNSUPPORTED_SCSI_OPCODE;
 
-       ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
+       ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true);
        if (ret)
                return ret;
 
@@ -717,10 +717,9 @@ sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_
 }
 
 static sense_reason_t
-sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
+sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect,
               u32 sectors, bool is_write)
 {
-       u8 protect = cdb[1] >> 5;
        int sp_ops = cmd->se_sess->sup_prot_ops;
        int pi_prot_type = dev->dev_attrib.pi_prot_type;
        bool fabric_prot = false;
@@ -768,7 +767,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
                fallthrough;
        default:
                pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
-                      "PROTECT: 0x%02x\n", cdb[0], protect);
+                      "PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect);
                return TCM_INVALID_CDB_FIELD;
        }
 
@@ -843,7 +842,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                if (sbc_check_dpofua(dev, cmd, cdb))
                        return TCM_INVALID_CDB_FIELD;
 
-               ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+               ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
                if (ret)
                        return ret;
 
@@ -857,7 +856,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                if (sbc_check_dpofua(dev, cmd, cdb))
                        return TCM_INVALID_CDB_FIELD;
 
-               ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+               ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
                if (ret)
                        return ret;
 
@@ -871,7 +870,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                if (sbc_check_dpofua(dev, cmd, cdb))
                        return TCM_INVALID_CDB_FIELD;
 
-               ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+               ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
                if (ret)
                        return ret;
 
@@ -892,7 +891,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                if (sbc_check_dpofua(dev, cmd, cdb))
                        return TCM_INVALID_CDB_FIELD;
 
-               ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+               ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
                if (ret)
                        return ret;
 
@@ -906,7 +905,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                if (sbc_check_dpofua(dev, cmd, cdb))
                        return TCM_INVALID_CDB_FIELD;
 
-               ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+               ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
                if (ret)
                        return ret;
 
@@ -921,7 +920,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                if (sbc_check_dpofua(dev, cmd, cdb))
                        return TCM_INVALID_CDB_FIELD;
 
-               ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+               ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
                if (ret)
                        return ret;
 
@@ -980,7 +979,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                        size = sbc_get_size(cmd, 1);
                        cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
 
-                       ret = sbc_setup_write_same(cmd, &cdb[10], ops);
+                       ret = sbc_setup_write_same(cmd, cdb[10], ops);
                        if (ret)
                                return ret;
                        break;
@@ -1079,7 +1078,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                size = sbc_get_size(cmd, 1);
                cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
 
-               ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+               ret = sbc_setup_write_same(cmd, cdb[1], ops);
                if (ret)
                        return ret;
                break;
@@ -1097,7 +1096,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
                 * of byte 1 bit 3 UNMAP instead of original reserved field
                 */
-               ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+               ret = sbc_setup_write_same(cmd, cdb[1], ops);
                if (ret)
                        return ret;
                break;
index 7e35edd..26ceabe 100644 (file)
@@ -886,7 +886,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
        INIT_WORK(&cmd->work, success ? target_complete_ok_work :
                  target_complete_failure_work);
 
-       if (wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
+       if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
                cpu = cmd->cpuid;
        else
                cpu = wwn->cmd_compl_affinity;
index fdf79bc..35d5908 100644 (file)
@@ -824,7 +824,7 @@ static struct usb_class_driver wdm_class = {
 };
 
 /* --- WWAN framework integration --- */
-#ifdef CONFIG_WWAN
+#ifdef CONFIG_WWAN_CORE
 static int wdm_wwan_port_start(struct wwan_port *port)
 {
        struct wdm_device *desc = wwan_port_get_drvdata(port);
@@ -963,11 +963,11 @@ static void wdm_wwan_rx(struct wdm_device *desc, int length)
        /* inbuf has been copied, it is safe to check for outstanding data */
        schedule_work(&desc->service_outs_intr);
 }
-#else /* CONFIG_WWAN */
+#else /* CONFIG_WWAN_CORE */
 static void wdm_wwan_init(struct wdm_device *desc) {}
 static void wdm_wwan_deinit(struct wdm_device *desc) {}
 static void wdm_wwan_rx(struct wdm_device *desc, int length) {}
-#endif /* CONFIG_WWAN */
+#endif /* CONFIG_WWAN_CORE */
 
 /* --- error handling --- */
 static void wdm_rxwork(struct work_struct *work)
index b974644..9618ba6 100644 (file)
@@ -1133,7 +1133,7 @@ static int do_proc_control(struct usb_dev_state *ps,
                "wIndex=%04x wLength=%04x\n",
                ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
                ctrl->wIndex, ctrl->wLength);
-       if (ctrl->bRequestType & 0x80) {
+       if ((ctrl->bRequestType & USB_DIR_IN) && ctrl->wLength) {
                pipe = usb_rcvctrlpipe(dev, 0);
                snoop_urb(dev, NULL, pipe, ctrl->wLength, tmo, SUBMIT, NULL, 0);
 
index d1efc71..86658a8 100644 (file)
@@ -48,6 +48,7 @@
 
 #define USB_TP_TRANSMISSION_DELAY      40      /* ns */
 #define USB_TP_TRANSMISSION_DELAY_MAX  65535   /* ns */
+#define USB_PING_RESPONSE_TIME         400     /* ns */
 
 /* Protect struct usb_device->state and ->children members
  * Note: Both are also protected by ->dev.sem, except that ->state can
@@ -182,8 +183,9 @@ int usb_device_supports_lpm(struct usb_device *udev)
 }
 
 /*
- * Set the Maximum Exit Latency (MEL) for the host to initiate a transition from
- * either U1 or U2.
+ * Set the Maximum Exit Latency (MEL) for the host to wakup up the path from
+ * U1/U2, send a PING to the device and receive a PING_RESPONSE.
+ * See USB 3.1 section C.1.5.2
  */
 static void usb_set_lpm_mel(struct usb_device *udev,
                struct usb3_lpm_parameters *udev_lpm_params,
@@ -193,35 +195,37 @@ static void usb_set_lpm_mel(struct usb_device *udev,
                unsigned int hub_exit_latency)
 {
        unsigned int total_mel;
-       unsigned int device_mel;
-       unsigned int hub_mel;
 
        /*
-        * Calculate the time it takes to transition all links from the roothub
-        * to the parent hub into U0.  The parent hub must then decode the
-        * packet (hub header decode latency) to figure out which port it was
-        * bound for.
-        *
-        * The Hub Header decode latency is expressed in 0.1us intervals (0x1
-        * means 0.1us).  Multiply that by 100 to get nanoseconds.
+        * tMEL1. time to transition path from host to device into U0.
+        * MEL for parent already contains the delay up to parent, so only add
+        * the exit latency for the last link (pick the slower exit latency),
+        * and the hub header decode latency. See USB 3.1 section C 2.2.1
+        * Store MEL in nanoseconds
         */
        total_mel = hub_lpm_params->mel +
-               (hub->descriptor->u.ss.bHubHdrDecLat * 100);
+               max(udev_exit_latency, hub_exit_latency) * 1000 +
+               hub->descriptor->u.ss.bHubHdrDecLat * 100;
 
        /*
-        * How long will it take to transition the downstream hub's port into
-        * U0?  The greater of either the hub exit latency or the device exit
-        * latency.
-        *
-        * The BOS U1/U2 exit latencies are expressed in 1us intervals.
-        * Multiply that by 1000 to get nanoseconds.
+        * tMEL2. Time to submit PING packet. Sum of tTPTransmissionDelay for
+        * each link + wHubDelay for each hub. Add only for last link.
+        * tMEL4, the time for PING_RESPONSE to traverse upstream is similar.
+        * Multiply by 2 to include it as well.
         */
-       device_mel = udev_exit_latency * 1000;
-       hub_mel = hub_exit_latency * 1000;
-       if (device_mel > hub_mel)
-               total_mel += device_mel;
-       else
-               total_mel += hub_mel;
+       total_mel += (__le16_to_cpu(hub->descriptor->u.ss.wHubDelay) +
+                     USB_TP_TRANSMISSION_DELAY) * 2;
+
+       /*
+        * tMEL3, tPingResponse. Time taken by device to generate PING_RESPONSE
+        * after receiving PING. Also add 2100ns as stated in USB 3.1 C 1.5.2.4
+        * to cover the delay if the PING_RESPONSE is queued behind a Max Packet
+        * Size DP.
+        * Note these delays should be added only once for the entire path, so
+        * add them to the MEL of the device connected to the roothub.
+        */
+       if (!hub->hdev->parent)
+               total_mel += USB_PING_RESPONSE_TIME + 2100;
 
        udev_lpm_params->mel = total_mel;
 }
@@ -4113,6 +4117,47 @@ static int usb_set_lpm_timeout(struct usb_device *udev,
 }
 
 /*
+ * Don't allow device intiated U1/U2 if the system exit latency + one bus
+ * interval is greater than the minimum service interval of any active
+ * periodic endpoint. See USB 3.2 section 9.4.9
+ */
+static bool usb_device_may_initiate_lpm(struct usb_device *udev,
+                                       enum usb3_link_state state)
+{
+       unsigned int sel;               /* us */
+       int i, j;
+
+       if (state == USB3_LPM_U1)
+               sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
+       else if (state == USB3_LPM_U2)
+               sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
+       else
+               return false;
+
+       for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
+               struct usb_interface *intf;
+               struct usb_endpoint_descriptor *desc;
+               unsigned int interval;
+
+               intf = udev->actconfig->interface[i];
+               if (!intf)
+                       continue;
+
+               for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++) {
+                       desc = &intf->cur_altsetting->endpoint[j].desc;
+
+                       if (usb_endpoint_xfer_int(desc) ||
+                           usb_endpoint_xfer_isoc(desc)) {
+                               interval = (1 << (desc->bInterval - 1)) * 125;
+                               if (sel + 125 > interval)
+                                       return false;
+                       }
+               }
+       }
+       return true;
+}
+
+/*
  * Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated
  * U1/U2 entry.
  *
@@ -4184,20 +4229,23 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
         * U1/U2_ENABLE
         */
        if (udev->actconfig &&
-           usb_set_device_initiated_lpm(udev, state, true) == 0) {
-               if (state == USB3_LPM_U1)
-                       udev->usb3_lpm_u1_enabled = 1;
-               else if (state == USB3_LPM_U2)
-                       udev->usb3_lpm_u2_enabled = 1;
-       } else {
-               /* Don't request U1/U2 entry if the device
-                * cannot transition to U1/U2.
-                */
-               usb_set_lpm_timeout(udev, state, 0);
-               hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
+           usb_device_may_initiate_lpm(udev, state)) {
+               if (usb_set_device_initiated_lpm(udev, state, true)) {
+                       /*
+                        * Request to enable device initiated U1/U2 failed,
+                        * better to turn off lpm in this case.
+                        */
+                       usb_set_lpm_timeout(udev, state, 0);
+                       hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
+                       return;
+               }
        }
-}
 
+       if (state == USB3_LPM_U1)
+               udev->usb3_lpm_u1_enabled = 1;
+       else if (state == USB3_LPM_U2)
+               udev->usb3_lpm_u2_enabled = 1;
+}
 /*
  * Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated
  * U1/U2 entry.
index 6114cf8..8239fe7 100644 (file)
@@ -501,10 +501,6 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* DJI CineSSD */
        { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
 
-       /* Fibocom L850-GL LTE Modem */
-       { USB_DEVICE(0x2cb7, 0x0007), .driver_info =
-                       USB_QUIRK_IGNORE_REMOTE_WAKEUP },
-
        /* INTEL VALUE SSD */
        { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
 
index ab6b815..483de2b 100644 (file)
@@ -383,6 +383,9 @@ enum dwc2_ep0_state {
  *                     0 - No (default)
  *                     1 - Partial power down
  *                     2 - Hibernation
+ * @no_clock_gating:   Specifies whether to avoid clock gating feature.
+ *                     0 - No (use clock gating)
+ *                     1 - Yes (avoid it)
  * @lpm:               Enable LPM support.
  *                     0 - No
  *                     1 - Yes
@@ -480,6 +483,7 @@ struct dwc2_core_params {
 #define DWC2_POWER_DOWN_PARAM_NONE             0
 #define DWC2_POWER_DOWN_PARAM_PARTIAL          1
 #define DWC2_POWER_DOWN_PARAM_HIBERNATION      2
+       bool no_clock_gating;
 
        bool lpm;
        bool lpm_clock_gating;
index a5ab038..a5c52b2 100644 (file)
@@ -556,7 +556,8 @@ static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg)
                                 * If neither hibernation nor partial power down are supported,
                                 * clock gating is used to save power.
                                 */
-                               dwc2_gadget_enter_clock_gating(hsotg);
+                               if (!hsotg->params.no_clock_gating)
+                                       dwc2_gadget_enter_clock_gating(hsotg);
                        }
 
                        /*
index c581ee4..3146df6 100644 (file)
@@ -2749,12 +2749,14 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
                return;
        }
 
-       /* Zlp for all endpoints, for ep0 only in DATA IN stage */
+       /* Zlp for all endpoints in non DDMA, for ep0 only in DATA IN stage */
        if (hs_ep->send_zlp) {
-               dwc2_hsotg_program_zlp(hsotg, hs_ep);
                hs_ep->send_zlp = 0;
-               /* transfer will be completed on next complete interrupt */
-               return;
+               if (!using_desc_dma(hsotg)) {
+                       dwc2_hsotg_program_zlp(hsotg, hs_ep);
+                       /* transfer will be completed on next complete interrupt */
+                       return;
+               }
        }
 
        if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {
@@ -3900,9 +3902,27 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
                                         __func__);
                }
        } else {
+               /* Mask GINTSTS_GOUTNAKEFF interrupt */
+               dwc2_hsotg_disable_gsint(hsotg, GINTSTS_GOUTNAKEFF);
+
                if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF))
                        dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
 
+               if (!using_dma(hsotg)) {
+                       /* Wait for GINTSTS_RXFLVL interrupt */
+                       if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
+                                                   GINTSTS_RXFLVL, 100)) {
+                               dev_warn(hsotg->dev, "%s: timeout GINTSTS.RXFLVL\n",
+                                        __func__);
+                       } else {
+                               /*
+                                * Pop GLOBAL OUT NAK status packet from RxFIFO
+                                * to assert GOUTNAKEFF interrupt
+                                */
+                               dwc2_readl(hsotg, GRXSTSP);
+                       }
+               }
+
                /* Wait for global nak to take effect */
                if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
                                            GINTSTS_GOUTNAKEFF, 100))
@@ -4348,6 +4368,9 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
                epctl = dwc2_readl(hs, epreg);
 
                if (value) {
+                       /* Unmask GOUTNAKEFF interrupt */
+                       dwc2_hsotg_en_gsint(hs, GINTSTS_GOUTNAKEFF);
+
                        if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF))
                                dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK);
                        // STALL bit will be set in GOUTNAKEFF interrupt handler
index 035d491..2a78289 100644 (file)
@@ -3338,7 +3338,8 @@ int dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
                 * If not hibernation nor partial power down are supported,
                 * clock gating is used to save power.
                 */
-               dwc2_host_enter_clock_gating(hsotg);
+               if (!hsotg->params.no_clock_gating)
+                       dwc2_host_enter_clock_gating(hsotg);
                break;
        }
 
@@ -4402,7 +4403,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
                 * If not hibernation nor partial power down are supported,
                 * clock gating is used to save power.
                 */
-               dwc2_host_enter_clock_gating(hsotg);
+               if (!hsotg->params.no_clock_gating)
+                       dwc2_host_enter_clock_gating(hsotg);
 
                /* After entering suspend, hardware is not accessible */
                clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
index 67c5eb1..59e1193 100644 (file)
@@ -76,6 +76,7 @@ static void dwc2_set_s3c6400_params(struct dwc2_hsotg *hsotg)
        struct dwc2_core_params *p = &hsotg->params;
 
        p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
+       p->no_clock_gating = true;
        p->phy_utmi_width = 8;
 }
 
index dccdf13..5991766 100644 (file)
@@ -1279,6 +1279,7 @@ struct dwc3 {
        unsigned                dis_metastability_quirk:1;
 
        unsigned                dis_split_quirk:1;
+       unsigned                async_callbacks:1;
 
        u16                     imod_interval;
 };
index 3cd2942..2f9e45e 100644 (file)
@@ -597,11 +597,13 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
 
 static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
 {
-       int ret;
+       int ret = -EINVAL;
 
-       spin_unlock(&dwc->lock);
-       ret = dwc->gadget_driver->setup(dwc->gadget, ctrl);
-       spin_lock(&dwc->lock);
+       if (dwc->async_callbacks) {
+               spin_unlock(&dwc->lock);
+               ret = dwc->gadget_driver->setup(dwc->gadget, ctrl);
+               spin_lock(&dwc->lock);
+       }
        return ret;
 }
 
index af6d7f1..45f2bc0 100644 (file)
@@ -2585,6 +2585,16 @@ static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned int mA)
        return ret;
 }
 
+static void dwc3_gadget_async_callbacks(struct usb_gadget *g, bool enable)
+{
+       struct dwc3             *dwc = gadget_to_dwc(g);
+       unsigned long           flags;
+
+       spin_lock_irqsave(&dwc->lock, flags);
+       dwc->async_callbacks = enable;
+       spin_unlock_irqrestore(&dwc->lock, flags);
+}
+
 static const struct usb_gadget_ops dwc3_gadget_ops = {
        .get_frame              = dwc3_gadget_get_frame,
        .wakeup                 = dwc3_gadget_wakeup,
@@ -2596,6 +2606,7 @@ static const struct usb_gadget_ops dwc3_gadget_ops = {
        .udc_set_ssp_rate       = dwc3_gadget_set_ssp_rate,
        .get_config_params      = dwc3_gadget_config_params,
        .vbus_draw              = dwc3_gadget_vbus_draw,
+       .udc_async_callbacks    = dwc3_gadget_async_callbacks,
 };
 
 /* -------------------------------------------------------------------------- */
@@ -3231,7 +3242,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
 
 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
 {
-       if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
+       if (dwc->async_callbacks && dwc->gadget_driver->disconnect) {
                spin_unlock(&dwc->lock);
                dwc->gadget_driver->disconnect(dwc->gadget);
                spin_lock(&dwc->lock);
@@ -3240,7 +3251,7 @@ static void dwc3_disconnect_gadget(struct dwc3 *dwc)
 
 static void dwc3_suspend_gadget(struct dwc3 *dwc)
 {
-       if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
+       if (dwc->async_callbacks && dwc->gadget_driver->suspend) {
                spin_unlock(&dwc->lock);
                dwc->gadget_driver->suspend(dwc->gadget);
                spin_lock(&dwc->lock);
@@ -3249,7 +3260,7 @@ static void dwc3_suspend_gadget(struct dwc3 *dwc)
 
 static void dwc3_resume_gadget(struct dwc3 *dwc)
 {
-       if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+       if (dwc->async_callbacks && dwc->gadget_driver->resume) {
                spin_unlock(&dwc->lock);
                dwc->gadget_driver->resume(dwc->gadget);
                spin_lock(&dwc->lock);
@@ -3261,7 +3272,7 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
        if (!dwc->gadget_driver)
                return;
 
-       if (dwc->gadget->speed != USB_SPEED_UNKNOWN) {
+       if (dwc->async_callbacks && dwc->gadget->speed != USB_SPEED_UNKNOWN) {
                spin_unlock(&dwc->lock);
                usb_gadget_udc_reset(dwc->gadget, dwc->gadget_driver);
                spin_lock(&dwc->lock);
@@ -3585,7 +3596,7 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
         * implemented.
         */
 
-       if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+       if (dwc->async_callbacks && dwc->gadget_driver->resume) {
                spin_unlock(&dwc->lock);
                dwc->gadget_driver->resume(dwc->gadget);
                spin_lock(&dwc->lock);
index bffef8e..281ca76 100644 (file)
@@ -1198,7 +1198,7 @@ void gserial_free_line(unsigned char port_num)
        struct gs_port  *port;
 
        mutex_lock(&ports[port_num].lock);
-       if (WARN_ON(!ports[port_num].port)) {
+       if (!ports[port_num].port) {
                mutex_unlock(&ports[port_num].lock);
                return;
        }
index 8e85889..15db7a3 100644 (file)
@@ -586,6 +586,7 @@ static int qe_ep_init(struct qe_udc *udc,
                        case USB_SPEED_FULL:
                                if (max <= 1023)
                                        break;
+                               fallthrough;
                        default:
                                goto en_done;
                        }
index a54d1ce..c0ca714 100644 (file)
@@ -3853,6 +3853,7 @@ static int tegra_xudc_probe(struct platform_device *pdev)
        return 0;
 
 free_eps:
+       pm_runtime_disable(&pdev->dev);
        tegra_xudc_free_eps(xudc);
 free_event_ring:
        tegra_xudc_free_event_ring(xudc);
index 36f5bf6..10b0365 100644 (file)
@@ -703,24 +703,28 @@ EXPORT_SYMBOL_GPL(ehci_setup);
 static irqreturn_t ehci_irq (struct usb_hcd *hcd)
 {
        struct ehci_hcd         *ehci = hcd_to_ehci (hcd);
-       u32                     status, masked_status, pcd_status = 0, cmd;
+       u32                     status, current_status, masked_status, pcd_status = 0;
+       u32                     cmd;
        int                     bh;
 
        spin_lock(&ehci->lock);
 
-       status = ehci_readl(ehci, &ehci->regs->status);
+       status = 0;
+       current_status = ehci_readl(ehci, &ehci->regs->status);
+restart:
 
        /* e.g. cardbus physical eject */
-       if (status == ~(u32) 0) {
+       if (current_status == ~(u32) 0) {
                ehci_dbg (ehci, "device removed\n");
                goto dead;
        }
+       status |= current_status;
 
        /*
         * We don't use STS_FLR, but some controllers don't like it to
         * remain on, so mask it out along with the other status bits.
         */
-       masked_status = status & (INTR_MASK | STS_FLR);
+       masked_status = current_status & (INTR_MASK | STS_FLR);
 
        /* Shared IRQ? */
        if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
@@ -730,6 +734,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
 
        /* clear (just) interrupts */
        ehci_writel(ehci, masked_status, &ehci->regs->status);
+
+       /* For edge interrupts, don't race with an interrupt bit being raised */
+       current_status = ehci_readl(ehci, &ehci->regs->status);
+       if (current_status & INTR_MASK)
+               goto restart;
+
        cmd = ehci_readl(ehci, &ehci->regs->command);
        bh = 0;
 
index e7a8e06..59cc1bc 100644 (file)
@@ -153,8 +153,6 @@ struct max3421_hcd {
         */
        struct urb *curr_urb;
        enum scheduling_pass sched_pass;
-       struct usb_device *loaded_dev;  /* dev that's loaded into the chip */
-       int loaded_epnum;               /* epnum whose toggles are loaded */
        int urb_done;                   /* > 0 -> no errors, < 0: errno */
        size_t curr_len;
        u8 hien;
@@ -492,39 +490,17 @@ max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev)
  * Caller must NOT hold HCD spinlock.
  */
 static void
-max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
-                   int force_toggles)
+max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum)
 {
-       struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
-       int old_epnum, same_ep, rcvtog, sndtog;
-       struct usb_device *old_dev;
+       int rcvtog, sndtog;
        u8 hctl;
 
-       old_dev = max3421_hcd->loaded_dev;
-       old_epnum = max3421_hcd->loaded_epnum;
-
-       same_ep = (dev == old_dev && epnum == old_epnum);
-       if (same_ep && !force_toggles)
-               return;
-
-       if (old_dev && !same_ep) {
-               /* save the old end-points toggles: */
-               u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
-
-               rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
-               sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
-
-               /* no locking: HCD (i.e., we) own toggles, don't we? */
-               usb_settoggle(old_dev, old_epnum, 0, rcvtog);
-               usb_settoggle(old_dev, old_epnum, 1, sndtog);
-       }
        /* setup new endpoint's toggle bits: */
        rcvtog = usb_gettoggle(dev, epnum, 0);
        sndtog = usb_gettoggle(dev, epnum, 1);
        hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) |
                BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
 
-       max3421_hcd->loaded_epnum = epnum;
        spi_wr8(hcd, MAX3421_REG_HCTL, hctl);
 
        /*
@@ -532,7 +508,6 @@ max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
         * address-assignment so it's best to just always load the
         * address whenever the end-point changed/was forced.
         */
-       max3421_hcd->loaded_dev = dev;
        spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum);
 }
 
@@ -667,7 +642,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd)
        struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
        struct urb *urb, *curr_urb = NULL;
        struct max3421_ep *max3421_ep;
-       int epnum, force_toggles = 0;
+       int epnum;
        struct usb_host_endpoint *ep;
        struct list_head *pos;
        unsigned long flags;
@@ -777,7 +752,6 @@ done:
                        usb_settoggle(urb->dev, epnum, 0, 1);
                        usb_settoggle(urb->dev, epnum, 1, 1);
                        max3421_ep->pkt_state = PKT_STATE_SETUP;
-                       force_toggles = 1;
                } else
                        max3421_ep->pkt_state = PKT_STATE_TRANSFER;
        }
@@ -785,7 +759,7 @@ done:
        spin_unlock_irqrestore(&max3421_hcd->lock, flags);
 
        max3421_ep->last_active = max3421_hcd->frame_number;
-       max3421_set_address(hcd, urb->dev, epnum, force_toggles);
+       max3421_set_address(hcd, urb->dev, epnum);
        max3421_set_speed(hcd, urb->dev);
        max3421_next_transfer(hcd, 0);
        return 1;
@@ -1379,6 +1353,16 @@ max3421_urb_done(struct usb_hcd *hcd)
                status = 0;
        urb = max3421_hcd->curr_urb;
        if (urb) {
+               /* save the old end-points toggles: */
+               u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
+               int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
+               int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
+               int epnum = usb_endpoint_num(&urb->ep->desc);
+
+               /* no locking: HCD (i.e., we) own toggles, don't we? */
+               usb_settoggle(urb->dev, epnum, 0, rcvtog);
+               usb_settoggle(urb->dev, epnum, 1, sndtog);
+
                max3421_hcd->curr_urb = NULL;
                spin_lock_irqsave(&max3421_hcd->lock, flags);
                usb_hcd_unlink_urb_from_ep(hcd, urb);
index e9b18fc..151e93c 100644 (file)
@@ -1638,11 +1638,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
         * Inform the usbcore about resume-in-progress by returning
         * a non-zero value even if there are no status changes.
         */
+       spin_lock_irqsave(&xhci->lock, flags);
+
        status = bus_state->resuming_ports;
 
        mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
 
-       spin_lock_irqsave(&xhci->lock, flags);
        /* For each port, did anything change?  If so, set that bit in buf. */
        for (i = 0; i < max_ports; i++) {
                temp = readl(ports[i]->addr);
index 1da6479..5923844 100644 (file)
@@ -207,8 +207,7 @@ static int renesas_check_rom_state(struct pci_dev *pdev)
                        return 0;
 
                case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */
-                       dev_dbg(&pdev->dev, "Unknown ROM status ...\n");
-                       break;
+                       return 0;
 
                case RENESAS_ROM_STATUS_ERROR: /* Error State */
                default: /* All other states are marked as "Reserved states" */
@@ -225,12 +224,13 @@ static int renesas_fw_check_running(struct pci_dev *pdev)
        u8 fw_state;
        int err;
 
-       /*
-        * Only if device has ROM and loaded FW we can skip loading and
-        * return success. Otherwise (even unknown state), attempt to load FW.
-        */
-       if (renesas_check_rom(pdev) && !renesas_check_rom_state(pdev))
-               return 0;
+       /* Check if device has ROM and loaded, if so skip everything */
+       err = renesas_check_rom(pdev);
+       if (err) { /* we have rom */
+               err = renesas_check_rom_state(pdev);
+               if (!err)
+                       return err;
+       }
 
        /*
         * Test if the device is actually needing the firmware. As most
index 18c2bbd..1c9a795 100644 (file)
@@ -636,7 +636,14 @@ static const struct pci_device_id pci_ids[] = {
        { /* end: all zeroes */ }
 };
 MODULE_DEVICE_TABLE(pci, pci_ids);
+
+/*
+ * Without CONFIG_USB_XHCI_PCI_RENESAS renesas_xhci_check_request_fw() won't
+ * load firmware, so don't encumber the xhci-pci driver with it.
+ */
+#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS)
 MODULE_FIRMWARE("renesas_usb_fw.mem");
+#endif
 
 /* pci driver glue; this is a "new style" PCI driver module */
 static struct pci_driver xhci_pci_driver = {
index 83ed508..1b24492 100644 (file)
@@ -86,10 +86,10 @@ static struct usb_phy *__device_to_usb_phy(struct device *dev)
 
        list_for_each_entry(usb_phy, &phy_list, head) {
                if (usb_phy->dev == dev)
-                       break;
+                       return usb_phy;
        }
 
-       return usb_phy;
+       return NULL;
 }
 
 static void usb_phy_set_default_current(struct usb_phy *usb_phy)
@@ -150,8 +150,14 @@ static int usb_phy_uevent(struct device *dev, struct kobj_uevent_env *env)
        struct usb_phy *usb_phy;
        char uchger_state[50] = { 0 };
        char uchger_type[50] = { 0 };
+       unsigned long flags;
 
+       spin_lock_irqsave(&phy_lock, flags);
        usb_phy = __device_to_usb_phy(dev);
+       spin_unlock_irqrestore(&phy_lock, flags);
+
+       if (!usb_phy)
+               return -ENODEV;
 
        snprintf(uchger_state, ARRAY_SIZE(uchger_state),
                 "USB_CHARGER_STATE=%s", usb_chger_state[usb_phy->chg_state]);
index b5e7991..a3c2b01 100644 (file)
@@ -101,6 +101,8 @@ static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
 #define usbhsf_dma_map(p)      __usbhsf_dma_map_ctrl(p, 1)
 #define usbhsf_dma_unmap(p)    __usbhsf_dma_map_ctrl(p, 0)
 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
+static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
+static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
 {
        struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
@@ -123,6 +125,11 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
                if (chan) {
                        dmaengine_terminate_all(chan);
                        usbhsf_dma_unmap(pkt);
+               } else {
+                       if (usbhs_pipe_is_dir_in(pipe))
+                               usbhsf_rx_irq_ctrl(pipe, 0);
+                       else
+                               usbhsf_tx_irq_ctrl(pipe, 0);
                }
 
                usbhs_pipe_clear_without_sequence(pipe, 0, 0);
index 09b845d..3c80bfb 100644 (file)
@@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
        { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
        { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
+       { USB_DEVICE(0x10C4, 0x8A5B) }, /* CEL EM3588 ZigBee USB Stick */
        { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
        { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
@@ -202,8 +203,8 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
        { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
        { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
-       { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 Display serial interface */
-       { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 M.2 Key E serial interface */
+       { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 M.2 Key E serial interface */
+       { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 Display serial interface */
        { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
        { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
        { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
index 7608584..0fbe253 100644 (file)
@@ -238,6 +238,7 @@ static void option_instat_callback(struct urb *urb);
 #define QUECTEL_PRODUCT_UC15                   0x9090
 /* These u-blox products use Qualcomm's vendor ID */
 #define UBLOX_PRODUCT_R410M                    0x90b2
+#define UBLOX_PRODUCT_R6XX                     0x90fa
 /* These Yuga products use Qualcomm's vendor ID */
 #define YUGA_PRODUCT_CLM920_NC5                        0x9625
 
@@ -1101,6 +1102,8 @@ static const struct usb_device_id option_ids[] = {
        /* u-blox products using Qualcomm vendor ID */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
          .driver_info = RSVD(1) | RSVD(3) },
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX),
+         .driver_info = RSVD(3) },
        /* Quectel products using Quectel vendor ID */
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
          .driver_info = NUMEP2 },
index f9677a5..c35a6db 100644 (file)
@@ -45,6 +45,13 @@ UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
 
+/* Reported-by: Julian Sikorski <belegdol@gmail.com> */
+UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
+               "LaCie",
+               "Rugged USB3-FW",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_IGNORE_UAS),
+
 /*
  * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
  * commands in UAS mode.  Observed with the 1.28 firmware; are there others?
index 6eaeba9..e7745d1 100644 (file)
@@ -686,6 +686,15 @@ static int stusb160x_probe(struct i2c_client *client)
                return -ENODEV;
 
        /*
+        * This fwnode has a "compatible" property, but is never populated as a
+        * struct device. Instead we simply parse it to read the properties.
+        * This it breaks fw_devlink=on. To maintain backward compatibility
+        * with existing DT files, we work around this by deleting any
+        * fwnode_links to/from this fwnode.
+        */
+       fw_devlink_purge_absent_suppliers(fwnode);
+
+       /*
         * When both VDD and VSYS power supplies are present, the low power
         * supply VSYS is selected when VSYS voltage is above 3.1 V.
         * Otherwise VDD is selected.
@@ -739,10 +748,6 @@ static int stusb160x_probe(struct i2c_client *client)
        typec_set_pwr_opmode(chip->port, chip->pwr_opmode);
 
        if (client->irq) {
-               ret = stusb160x_irq_init(chip, client->irq);
-               if (ret)
-                       goto port_unregister;
-
                chip->role_sw = fwnode_usb_role_switch_get(fwnode);
                if (IS_ERR(chip->role_sw)) {
                        ret = PTR_ERR(chip->role_sw);
@@ -752,6 +757,10 @@ static int stusb160x_probe(struct i2c_client *client)
                                        ret);
                        goto port_unregister;
                }
+
+               ret = stusb160x_irq_init(chip, client->irq);
+               if (ret)
+                       goto role_sw_put;
        } else {
                /*
                 * If Source or Dual power role, need to enable VDD supply
@@ -775,6 +784,9 @@ static int stusb160x_probe(struct i2c_client *client)
 
        return 0;
 
+role_sw_put:
+       if (chip->role_sw)
+               usb_role_switch_put(chip->role_sw);
 port_unregister:
        typec_unregister_port(chip->port);
 all_reg_disable:
index 938219b..21b3ae2 100644 (file)
@@ -629,6 +629,15 @@ static int tps6598x_probe(struct i2c_client *client)
        if (!fwnode)
                return -ENODEV;
 
+       /*
+        * This fwnode has a "compatible" property, but is never populated as a
+        * struct device. Instead we simply parse it to read the properties.
+        * This breaks fw_devlink=on. To maintain backward compatibility
+        * with existing DT files, we work around this by deleting any
+        * fwnode_links to/from this fwnode.
+        */
+       fw_devlink_purge_absent_suppliers(fwnode);
+
        tps->role_sw = fwnode_usb_role_switch_get(fwnode);
        if (IS_ERR(tps->role_sw)) {
                ret = PTR_ERR(tps->role_sw);
index 98f1930..1c85514 100644 (file)
@@ -970,13 +970,11 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
                fb_var_to_videomode(&mode2, &info->var);
                /* make sure we don't delete the videomode of current var */
                ret = fb_mode_is_equal(&mode1, &mode2);
-
-               if (!ret)
-                       fbcon_mode_deleted(info, &mode1);
-
-               if (!ret)
-                       fb_delete_videomode(&mode1, &info->modelist);
-
+               if (!ret) {
+                       ret = fbcon_mode_deleted(info, &mode1);
+                       if (!ret)
+                               fb_delete_videomode(&mode1, &info->modelist);
+               }
 
                return ret ? -EINVAL : 0;
        }
index ffbf900..438e2c7 100644 (file)
@@ -241,6 +241,8 @@ xilinx_fb_blank(int blank_mode, struct fb_info *fbi)
        case FB_BLANK_POWERDOWN:
                /* turn off panel */
                xilinx_fb_out32(drvdata, REG_CTRL, 0);
+               break;
+
        default:
                break;
        }
index 546dfc1..0bc7046 100644 (file)
@@ -487,6 +487,7 @@ config FTWDT010_WATCHDOG
 config IXP4XX_WATCHDOG
        tristate "IXP4xx Watchdog"
        depends on ARCH_IXP4XX
+       select WATCHDOG_CORE
        help
          Say Y here if to include support for the watchdog timer
          in the Intel IXP4xx network processors. This driver can
index aae29dc..2693ffb 100644 (file)
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * drivers/char/watchdog/ixp4xx_wdt.c
  *
  * Watchdog driver for Intel IXP4xx network processors
  *
  * Author: Deepak Saxena <dsaxena@plexity.net>
+ * Author: Linus Walleij <linus.walleij@linaro.org>
  *
  * Copyright 2004 (c) MontaVista, Software, Inc.
  * Based on sa1100 driver, Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
- *
- * This file is licensed under  the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/module.h>
-#include <linux/moduleparam.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/of.h>
 #include <linux/watchdog.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/uaccess.h>
-#include <mach/hardware.h>
+#include <linux/bits.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/soc/ixp4xx/cpu.h>
+
+struct ixp4xx_wdt {
+       struct watchdog_device wdd;
+       void __iomem *base;
+       unsigned long rate;
+};
 
-static bool nowayout = WATCHDOG_NOWAYOUT;
-static int heartbeat = 60;     /* (secs) Default is 1 minute */
-static unsigned long wdt_status;
-static unsigned long boot_status;
-static DEFINE_SPINLOCK(wdt_lock);
+/* Fallback if we do not have a clock for this */
+#define IXP4XX_TIMER_FREQ      66666000
 
-#define WDT_TICK_RATE (IXP4XX_PERIPHERAL_BUS_CLOCK * 1000000UL)
+/* Registers after the timer registers */
+#define IXP4XX_OSWT_OFFSET     0x14  /* Watchdog Timer */
+#define IXP4XX_OSWE_OFFSET     0x18  /* Watchdog Enable */
+#define IXP4XX_OSWK_OFFSET     0x1C  /* Watchdog Key */
+#define IXP4XX_OSST_OFFSET     0x20  /* Timer Status */
 
-#define        WDT_IN_USE              0
-#define        WDT_OK_TO_CLOSE         1
+#define IXP4XX_OSST_TIMER_WDOG_PEND    0x00000008
+#define IXP4XX_OSST_TIMER_WARM_RESET   0x00000010
+#define IXP4XX_WDT_KEY                 0x0000482E
+#define IXP4XX_WDT_RESET_ENABLE                0x00000001
+#define IXP4XX_WDT_IRQ_ENABLE          0x00000002
+#define IXP4XX_WDT_COUNT_ENABLE                0x00000004
 
-static void wdt_enable(void)
+static inline
+struct ixp4xx_wdt *to_ixp4xx_wdt(struct watchdog_device *wdd)
 {
-       spin_lock(&wdt_lock);
-       *IXP4XX_OSWK = IXP4XX_WDT_KEY;
-       *IXP4XX_OSWE = 0;
-       *IXP4XX_OSWT = WDT_TICK_RATE * heartbeat;
-       *IXP4XX_OSWE = IXP4XX_WDT_COUNT_ENABLE | IXP4XX_WDT_RESET_ENABLE;
-       *IXP4XX_OSWK = 0;
-       spin_unlock(&wdt_lock);
+       return container_of(wdd, struct ixp4xx_wdt, wdd);
 }
 
-static void wdt_disable(void)
+static int ixp4xx_wdt_start(struct watchdog_device *wdd)
 {
-       spin_lock(&wdt_lock);
-       *IXP4XX_OSWK = IXP4XX_WDT_KEY;
-       *IXP4XX_OSWE = 0;
-       *IXP4XX_OSWK = 0;
-       spin_unlock(&wdt_lock);
-}
+       struct ixp4xx_wdt *iwdt = to_ixp4xx_wdt(wdd);
 
-static int ixp4xx_wdt_open(struct inode *inode, struct file *file)
-{
-       if (test_and_set_bit(WDT_IN_USE, &wdt_status))
-               return -EBUSY;
+       __raw_writel(IXP4XX_WDT_KEY, iwdt->base + IXP4XX_OSWK_OFFSET);
+       __raw_writel(0, iwdt->base + IXP4XX_OSWE_OFFSET);
+       __raw_writel(wdd->timeout * iwdt->rate,
+                    iwdt->base + IXP4XX_OSWT_OFFSET);
+       __raw_writel(IXP4XX_WDT_COUNT_ENABLE | IXP4XX_WDT_RESET_ENABLE,
+                    iwdt->base + IXP4XX_OSWE_OFFSET);
+       __raw_writel(0, iwdt->base + IXP4XX_OSWK_OFFSET);
 
-       clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
-       wdt_enable();
-       return stream_open(inode, file);
+       return 0;
 }
 
-static ssize_t
-ixp4xx_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos)
+static int ixp4xx_wdt_stop(struct watchdog_device *wdd)
 {
-       if (len) {
-               if (!nowayout) {
-                       size_t i;
-
-                       clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
-
-                       for (i = 0; i != len; i++) {
-                               char c;
-
-                               if (get_user(c, data + i))
-                                       return -EFAULT;
-                               if (c == 'V')
-                                       set_bit(WDT_OK_TO_CLOSE, &wdt_status);
-                       }
-               }
-               wdt_enable();
-       }
-       return len;
-}
+       struct ixp4xx_wdt *iwdt = to_ixp4xx_wdt(wdd);
 
-static const struct watchdog_info ident = {
-       .options        = WDIOF_CARDRESET | WDIOF_MAGICCLOSE |
-                         WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
-       .identity       = "IXP4xx Watchdog",
-};
+       __raw_writel(IXP4XX_WDT_KEY, iwdt->base + IXP4XX_OSWK_OFFSET);
+       __raw_writel(0, iwdt->base + IXP4XX_OSWE_OFFSET);
+       __raw_writel(0, iwdt->base + IXP4XX_OSWK_OFFSET);
 
-
-static long ixp4xx_wdt_ioctl(struct file *file, unsigned int cmd,
-                                                       unsigned long arg)
-{
-       int ret = -ENOTTY;
-       int time;
-
-       switch (cmd) {
-       case WDIOC_GETSUPPORT:
-               ret = copy_to_user((struct watchdog_info *)arg, &ident,
-                                  sizeof(ident)) ? -EFAULT : 0;
-               break;
-
-       case WDIOC_GETSTATUS:
-               ret = put_user(0, (int *)arg);
-               break;
-
-       case WDIOC_GETBOOTSTATUS:
-               ret = put_user(boot_status, (int *)arg);
-               break;
-
-       case WDIOC_KEEPALIVE:
-               wdt_enable();
-               ret = 0;
-               break;
-
-       case WDIOC_SETTIMEOUT:
-               ret = get_user(time, (int *)arg);
-               if (ret)
-                       break;
-
-               if (time <= 0 || time > 60) {
-                       ret = -EINVAL;
-                       break;
-               }
-
-               heartbeat = time;
-               wdt_enable();
-               fallthrough;
-
-       case WDIOC_GETTIMEOUT:
-               ret = put_user(heartbeat, (int *)arg);
-               break;
-       }
-       return ret;
+       return 0;
 }
 
-static int ixp4xx_wdt_release(struct inode *inode, struct file *file)
+static int ixp4xx_wdt_set_timeout(struct watchdog_device *wdd,
+                                 unsigned int timeout)
 {
-       if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
-               wdt_disable();
-       else
-               pr_crit("Device closed unexpectedly - timer will not stop\n");
-       clear_bit(WDT_IN_USE, &wdt_status);
-       clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+       wdd->timeout = timeout;
+       if (watchdog_active(wdd))
+               ixp4xx_wdt_start(wdd);
 
        return 0;
 }
 
-
-static const struct file_operations ixp4xx_wdt_fops = {
-       .owner          = THIS_MODULE,
-       .llseek         = no_llseek,
-       .write          = ixp4xx_wdt_write,
-       .unlocked_ioctl = ixp4xx_wdt_ioctl,
-       .compat_ioctl   = compat_ptr_ioctl,
-       .open           = ixp4xx_wdt_open,
-       .release        = ixp4xx_wdt_release,
+static const struct watchdog_ops ixp4xx_wdt_ops = {
+       .start = ixp4xx_wdt_start,
+       .stop = ixp4xx_wdt_stop,
+       .set_timeout = ixp4xx_wdt_set_timeout,
+       .owner = THIS_MODULE,
 };
 
-static struct miscdevice ixp4xx_wdt_miscdev = {
-       .minor          = WATCHDOG_MINOR,
-       .name           = "watchdog",
-       .fops           = &ixp4xx_wdt_fops,
+static const struct watchdog_info ixp4xx_wdt_info = {
+       .options = WDIOF_KEEPALIVEPING
+               | WDIOF_MAGICCLOSE
+               | WDIOF_SETTIMEOUT,
+       .identity = KBUILD_MODNAME,
 };
 
-static int __init ixp4xx_wdt_init(void)
+/* Devres-handled clock disablement */
+static void ixp4xx_clock_action(void *d)
+{
+       clk_disable_unprepare(d);
+}
+
+static int ixp4xx_wdt_probe(struct platform_device *pdev)
 {
+       struct device *dev = &pdev->dev;
+       struct ixp4xx_wdt *iwdt;
+       struct clk *clk;
        int ret;
 
-       /*
-        * FIXME: we bail out on device tree boot but this really needs
-        * to be fixed in a nicer way: this registers the MDIO bus before
-        * even matching the driver infrastructure, we should only probe
-        * detected hardware.
-        */
-       if (of_have_populated_dt())
-               return -ENODEV;
        if (!(read_cpuid_id() & 0xf) && !cpu_is_ixp46x()) {
-               pr_err("Rev. A0 IXP42x CPU detected - watchdog disabled\n");
-
+               dev_err(dev, "Rev. A0 IXP42x CPU detected - watchdog disabled\n");
                return -ENODEV;
        }
-       boot_status = (*IXP4XX_OSST & IXP4XX_OSST_TIMER_WARM_RESET) ?
-                       WDIOF_CARDRESET : 0;
-       ret = misc_register(&ixp4xx_wdt_miscdev);
-       if (ret == 0)
-               pr_info("timer heartbeat %d sec\n", heartbeat);
-       return ret;
-}
 
-static void __exit ixp4xx_wdt_exit(void)
-{
-       misc_deregister(&ixp4xx_wdt_miscdev);
-}
+       iwdt = devm_kzalloc(dev, sizeof(*iwdt), GFP_KERNEL);
+       if (!iwdt)
+               return -ENOMEM;
+       iwdt->base = dev->platform_data;
 
+       /*
+        * Retrieve rate from a fixed clock from the device tree if
+        * the parent has that, else use the default clock rate.
+        */
+       clk = devm_clk_get(dev->parent, NULL);
+       if (!IS_ERR(clk)) {
+               ret = clk_prepare_enable(clk);
+               if (ret)
+                       return ret;
+               ret = devm_add_action_or_reset(dev, ixp4xx_clock_action, clk);
+               if (ret)
+                       return ret;
+               iwdt->rate = clk_get_rate(clk);
+       }
+       if (!iwdt->rate)
+               iwdt->rate = IXP4XX_TIMER_FREQ;
 
-module_init(ixp4xx_wdt_init);
-module_exit(ixp4xx_wdt_exit);
+       iwdt->wdd.info = &ixp4xx_wdt_info;
+       iwdt->wdd.ops = &ixp4xx_wdt_ops;
+       iwdt->wdd.min_timeout = 1;
+       iwdt->wdd.max_timeout = U32_MAX / iwdt->rate;
+       iwdt->wdd.parent = dev;
+       /* Default to 60 seconds */
+       iwdt->wdd.timeout = 60U;
+       watchdog_init_timeout(&iwdt->wdd, 0, dev);
 
-MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
-MODULE_DESCRIPTION("IXP4xx Network Processor Watchdog");
+       if (__raw_readl(iwdt->base + IXP4XX_OSST_OFFSET) &
+           IXP4XX_OSST_TIMER_WARM_RESET)
+               iwdt->wdd.bootstatus = WDIOF_CARDRESET;
+
+       ret = devm_watchdog_register_device(dev, &iwdt->wdd);
+       if (ret)
+               return ret;
+
+       dev_info(dev, "IXP4xx watchdog available\n");
 
-module_param(heartbeat, int, 0);
-MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 60s)");
+       return 0;
+}
 
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
+static struct platform_driver ixp4xx_wdt_driver = {
+       .probe = ixp4xx_wdt_probe,
+       .driver = {
+               .name   = "ixp4xx-watchdog",
+       },
+};
+module_platform_driver(ixp4xx_wdt_driver);
 
+MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
+MODULE_DESCRIPTION("IXP4xx Network Processor Watchdog");
 MODULE_LICENSE("GPL");
index 06fb7a9..4d5ae61 100644 (file)
@@ -168,21 +168,6 @@ config OSF4_COMPAT
          with v4 shared libraries freely available from Compaq. If you're
          going to use shared libraries from Tru64 version 5.0 or later, say N.
 
-config BINFMT_EM86
-       tristate "Kernel support for Linux/Intel ELF binaries"
-       depends on ALPHA
-       help
-         Say Y here if you want to be able to execute Linux/Intel ELF
-         binaries just like native Alpha binaries on your Alpha machine. For
-         this to work, you need to have the emulator /usr/bin/em86 in place.
-
-         You can get the same functionality by saying N here and saying Y to
-         "Kernel support for MISC binaries".
-
-         You may answer M to compile the emulation support as a module and
-         later load the module when you want to use a Linux/Intel binary. The
-         module will be called binfmt_em86. If unsure, say Y.
-
 config BINFMT_MISC
        tristate "Kernel support for MISC binaries"
        help
index 9c708e1..f98f3e6 100644 (file)
@@ -39,7 +39,6 @@ obj-$(CONFIG_FS_ENCRYPTION)   += crypto/
 obj-$(CONFIG_FS_VERITY)                += verity/
 obj-$(CONFIG_FILE_LOCKING)      += locks.o
 obj-$(CONFIG_BINFMT_AOUT)      += binfmt_aout.o
-obj-$(CONFIG_BINFMT_EM86)      += binfmt_em86.o
 obj-$(CONFIG_BINFMT_MISC)      += binfmt_misc.o
 obj-$(CONFIG_BINFMT_SCRIPT)    += binfmt_script.o
 obj-$(CONFIG_BINFMT_ELF)       += binfmt_elf.o
index d3c6bb2..a3f5de2 100644 (file)
@@ -29,16 +29,11 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *);
 
 static int afs_deliver_yfs_cb_callback(struct afs_call *);
 
-#define CM_NAME(name) \
-       char afs_SRXCB##name##_name[] __tracepoint_string =     \
-               "CB." #name
-
 /*
  * CB.CallBack operation type
  */
-static CM_NAME(CallBack);
 static const struct afs_call_type afs_SRXCBCallBack = {
-       .name           = afs_SRXCBCallBack_name,
+       .name           = "CB.CallBack",
        .deliver        = afs_deliver_cb_callback,
        .destructor     = afs_cm_destructor,
        .work           = SRXAFSCB_CallBack,
@@ -47,9 +42,8 @@ static const struct afs_call_type afs_SRXCBCallBack = {
 /*
  * CB.InitCallBackState operation type
  */
-static CM_NAME(InitCallBackState);
 static const struct afs_call_type afs_SRXCBInitCallBackState = {
-       .name           = afs_SRXCBInitCallBackState_name,
+       .name           = "CB.InitCallBackState",
        .deliver        = afs_deliver_cb_init_call_back_state,
        .destructor     = afs_cm_destructor,
        .work           = SRXAFSCB_InitCallBackState,
@@ -58,9 +52,8 @@ static const struct afs_call_type afs_SRXCBInitCallBackState = {
 /*
  * CB.InitCallBackState3 operation type
  */
-static CM_NAME(InitCallBackState3);
 static const struct afs_call_type afs_SRXCBInitCallBackState3 = {
-       .name           = afs_SRXCBInitCallBackState3_name,
+       .name           = "CB.InitCallBackState3",
        .deliver        = afs_deliver_cb_init_call_back_state3,
        .destructor     = afs_cm_destructor,
        .work           = SRXAFSCB_InitCallBackState,
@@ -69,9 +62,8 @@ static const struct afs_call_type afs_SRXCBInitCallBackState3 = {
 /*
  * CB.Probe operation type
  */
-static CM_NAME(Probe);
 static const struct afs_call_type afs_SRXCBProbe = {
-       .name           = afs_SRXCBProbe_name,
+       .name           = "CB.Probe",
        .deliver        = afs_deliver_cb_probe,
        .destructor     = afs_cm_destructor,
        .work           = SRXAFSCB_Probe,
@@ -80,9 +72,8 @@ static const struct afs_call_type afs_SRXCBProbe = {
 /*
  * CB.ProbeUuid operation type
  */
-static CM_NAME(ProbeUuid);
 static const struct afs_call_type afs_SRXCBProbeUuid = {
-       .name           = afs_SRXCBProbeUuid_name,
+       .name           = "CB.ProbeUuid",
        .deliver        = afs_deliver_cb_probe_uuid,
        .destructor     = afs_cm_destructor,
        .work           = SRXAFSCB_ProbeUuid,
@@ -91,9 +82,8 @@ static const struct afs_call_type afs_SRXCBProbeUuid = {
 /*
  * CB.TellMeAboutYourself operation type
  */
-static CM_NAME(TellMeAboutYourself);
 static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
-       .name           = afs_SRXCBTellMeAboutYourself_name,
+       .name           = "CB.TellMeAboutYourself",
        .deliver        = afs_deliver_cb_tell_me_about_yourself,
        .destructor     = afs_cm_destructor,
        .work           = SRXAFSCB_TellMeAboutYourself,
@@ -102,9 +92,8 @@ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
 /*
  * YFS CB.CallBack operation type
  */
-static CM_NAME(YFS_CallBack);
 static const struct afs_call_type afs_SRXYFSCB_CallBack = {
-       .name           = afs_SRXCBYFS_CallBack_name,
+       .name           = "YFSCB.CallBack",
        .deliver        = afs_deliver_yfs_cb_callback,
        .destructor     = afs_cm_destructor,
        .work           = SRXAFSCB_CallBack,
index 78719f2..ac829e6 100644 (file)
@@ -656,7 +656,6 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
                return ret;
        }
 
-       ret = -ENOENT;
        if (!cookie.found) {
                _leave(" = -ENOENT [not found]");
                return -ENOENT;
@@ -2020,17 +2019,20 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
 
                if (d_count(new_dentry) > 2) {
                        /* copy the target dentry's name */
-                       ret = -ENOMEM;
                        op->rename.tmp = d_alloc(new_dentry->d_parent,
                                                 &new_dentry->d_name);
-                       if (!op->rename.tmp)
+                       if (!op->rename.tmp) {
+                               op->error = -ENOMEM;
                                goto error;
+                       }
 
                        ret = afs_sillyrename(new_dvnode,
                                              AFS_FS_I(d_inode(new_dentry)),
                                              new_dentry, op->key);
-                       if (ret)
+                       if (ret) {
+                               op->error = ret;
                                goto error;
+                       }
 
                        op->dentry_2 = op->rename.tmp;
                        op->rename.rehash = NULL;
index 3104b62..c053469 100644 (file)
@@ -771,14 +771,20 @@ int afs_writepages(struct address_space *mapping,
        if (wbc->range_cyclic) {
                start = mapping->writeback_index * PAGE_SIZE;
                ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next);
-               if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
-                       ret = afs_writepages_region(mapping, wbc, 0, start,
-                                                   &next);
-               mapping->writeback_index = next / PAGE_SIZE;
+               if (ret == 0) {
+                       mapping->writeback_index = next / PAGE_SIZE;
+                       if (start > 0 && wbc->nr_to_write > 0) {
+                               ret = afs_writepages_region(mapping, wbc, 0,
+                                                           start, &next);
+                               if (ret == 0)
+                                       mapping->writeback_index =
+                                               next / PAGE_SIZE;
+                       }
+               }
        } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
                ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next);
-               if (wbc->nr_to_write > 0)
-                       mapping->writeback_index = next;
+               if (wbc->nr_to_write > 0 && ret == 0)
+                       mapping->writeback_index = next / PAGE_SIZE;
        } else {
                ret = afs_writepages_region(mapping, wbc,
                                            wbc->range_start, wbc->range_end, &next);
diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
deleted file mode 100644 (file)
index 06b9b9f..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *  linux/fs/binfmt_em86.c
- *
- *  Based on linux/fs/binfmt_script.c
- *  Copyright (C) 1996  Martin von Löwis
- *  original #!-checking implemented by tytso.
- *
- *  em86 changes Copyright (C) 1997  Jim Paradis
- */
-
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/binfmts.h>
-#include <linux/elf.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/errno.h>
-
-
-#define EM86_INTERP    "/usr/bin/em86"
-#define EM86_I_NAME    "em86"
-
-static int load_em86(struct linux_binprm *bprm)
-{
-       const char *i_name, *i_arg;
-       char *interp;
-       struct file * file;
-       int retval;
-       struct elfhdr   elf_ex;
-
-       /* Make sure this is a Linux/Intel ELF executable... */
-       elf_ex = *((struct elfhdr *)bprm->buf);
-
-       if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
-               return  -ENOEXEC;
-
-       /* First of all, some simple consistency checks */
-       if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
-               (!((elf_ex.e_machine == EM_386) || (elf_ex.e_machine == EM_486))) ||
-               !bprm->file->f_op->mmap) {
-                       return -ENOEXEC;
-       }
-
-       /* Need to be able to load the file after exec */
-       if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
-               return -ENOENT;
-
-       /* Unlike in the script case, we don't have to do any hairy
-        * parsing to find our interpreter... it's hardcoded!
-        */
-       interp = EM86_INTERP;
-       i_name = EM86_I_NAME;
-       i_arg = NULL;           /* We reserve the right to add an arg later */
-
-       /*
-        * Splice in (1) the interpreter's name for argv[0]
-        *           (2) (optional) argument to interpreter
-        *           (3) filename of emulated file (replace argv[0])
-        *
-        * This is done in reverse order, because of how the
-        * user environment and arguments are stored.
-        */
-       remove_arg_zero(bprm);
-       retval = copy_string_kernel(bprm->filename, bprm);
-       if (retval < 0) return retval; 
-       bprm->argc++;
-       if (i_arg) {
-               retval = copy_string_kernel(i_arg, bprm);
-               if (retval < 0) return retval; 
-               bprm->argc++;
-       }
-       retval = copy_string_kernel(i_name, bprm);
-       if (retval < 0) return retval;
-       bprm->argc++;
-
-       /*
-        * OK, now restart the process with the interpreter's inode.
-        * Note that we use open_exec() as the name is now in kernel
-        * space, and we don't need to copy it.
-        */
-       file = open_exec(interp);
-       if (IS_ERR(file))
-               return PTR_ERR(file);
-
-       bprm->interpreter = file;
-       return 0;
-}
-
-static struct linux_binfmt em86_format = {
-       .module         = THIS_MODULE,
-       .load_binary    = load_em86,
-};
-
-static int __init init_em86_binfmt(void)
-{
-       register_binfmt(&em86_format);
-       return 0;
-}
-
-static void __exit exit_em86_binfmt(void)
-{
-       unregister_binfmt(&em86_format);
-}
-
-core_initcall(init_em86_binfmt);
-module_exit(exit_em86_binfmt);
-MODULE_LICENSE("GPL");
index 0c424a0..9ef4f1f 100644 (file)
@@ -812,6 +812,8 @@ static void bdev_free_inode(struct inode *inode)
        free_percpu(bdev->bd_stats);
        kfree(bdev->bd_meta_info);
 
+       if (!bdev_is_partition(bdev))
+               kfree(bdev->bd_disk);
        kmem_cache_free(bdev_cachep, BDEV_I(inode));
 }
 
index 7a8a2fc..78b202d 100644 (file)
@@ -1488,15 +1488,15 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
                         struct btrfs_fs_info *fs_info, u64 bytenr,
                         u64 time_seq, struct ulist **roots,
-                        bool ignore_offset)
+                        bool ignore_offset, bool skip_commit_root_sem)
 {
        int ret;
 
-       if (!trans)
+       if (!trans && !skip_commit_root_sem)
                down_read(&fs_info->commit_root_sem);
        ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
                                        time_seq, roots, ignore_offset);
-       if (!trans)
+       if (!trans && !skip_commit_root_sem)
                up_read(&fs_info->commit_root_sem);
        return ret;
 }
index 17abde7..ff5f07f 100644 (file)
@@ -47,7 +47,8 @@ int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
                         const u64 *extent_item_pos, bool ignore_offset);
 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
                         struct btrfs_fs_info *fs_info, u64 bytenr,
-                        u64 time_seq, struct ulist **roots, bool ignore_offset);
+                        u64 time_seq, struct ulist **roots, bool ignore_offset,
+                        bool skip_commit_root_sem);
 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
                        u32 name_len, unsigned long name_off,
                        struct extent_buffer *eb_in, u64 parent,
index 38b127b..9e7d9d0 100644 (file)
@@ -1498,9 +1498,18 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
        if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
                return;
 
-       mutex_lock(&fs_info->reclaim_bgs_lock);
+       /*
+        * Long running balances can keep us blocked here for eternity, so
+        * simply skip reclaim if we're unable to get the mutex.
+        */
+       if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) {
+               btrfs_exclop_finish(fs_info);
+               return;
+       }
+
        spin_lock(&fs_info->unused_bgs_lock);
        while (!list_empty(&fs_info->reclaim_bgs)) {
+               u64 zone_unusable;
                int ret = 0;
 
                bg = list_first_entry(&fs_info->reclaim_bgs,
@@ -1534,13 +1543,22 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
                        goto next;
                }
 
+               /*
+                * Cache the zone_unusable value before turning the block group
+                * to read only. As soon as the blog group is read only it's
+                * zone_unusable value gets moved to the block group's read-only
+                * bytes and isn't available for calculations anymore.
+                */
+               zone_unusable = bg->zone_unusable;
                ret = inc_block_group_ro(bg, 0);
                up_write(&space_info->groups_sem);
                if (ret < 0)
                        goto next;
 
-               btrfs_info(fs_info, "reclaiming chunk %llu with %llu%% used",
-                               bg->start, div_u64(bg->used * 100, bg->length));
+               btrfs_info(fs_info,
+                       "reclaiming chunk %llu with %llu%% used %llu%% unusable",
+                               bg->start, div_u64(bg->used * 100, bg->length),
+                               div64_u64(zone_unusable * 100, bg->length));
                trace_btrfs_reclaim_block_group(bg);
                ret = btrfs_relocate_chunk(fs_info, bg->start);
                if (ret)
@@ -2197,6 +2215,13 @@ error:
        return ret;
 }
 
+/*
+ * This function, insert_block_group_item(), belongs to the phase 2 of chunk
+ * allocation.
+ *
+ * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
+ * phases.
+ */
 static int insert_block_group_item(struct btrfs_trans_handle *trans,
                                   struct btrfs_block_group *block_group)
 {
@@ -2219,15 +2244,19 @@ static int insert_block_group_item(struct btrfs_trans_handle *trans,
        return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
 }
 
+/*
+ * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of
+ * chunk allocation.
+ *
+ * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
+ * phases.
+ */
 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
        struct btrfs_block_group *block_group;
        int ret = 0;
 
-       if (!trans->can_flush_pending_bgs)
-               return;
-
        while (!list_empty(&trans->new_bgs)) {
                int index;
 
@@ -2242,6 +2271,13 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
                ret = insert_block_group_item(trans, block_group);
                if (ret)
                        btrfs_abort_transaction(trans, ret);
+               if (!block_group->chunk_item_inserted) {
+                       mutex_lock(&fs_info->chunk_mutex);
+                       ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group);
+                       mutex_unlock(&fs_info->chunk_mutex);
+                       if (ret)
+                               btrfs_abort_transaction(trans, ret);
+               }
                ret = btrfs_finish_chunk_alloc(trans, block_group->start,
                                        block_group->length);
                if (ret)
@@ -2265,8 +2301,9 @@ next:
        btrfs_trans_release_chunk_metadata(trans);
 }
 
-int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
-                          u64 type, u64 chunk_offset, u64 size)
+struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
+                                                u64 bytes_used, u64 type,
+                                                u64 chunk_offset, u64 size)
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
        struct btrfs_block_group *cache;
@@ -2276,7 +2313,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
 
        cache = btrfs_create_block_group_cache(fs_info, chunk_offset);
        if (!cache)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        cache->length = size;
        set_free_space_tree_thresholds(cache);
@@ -2290,7 +2327,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
        ret = btrfs_load_block_group_zone_info(cache, true);
        if (ret) {
                btrfs_put_block_group(cache);
-               return ret;
+               return ERR_PTR(ret);
        }
 
        ret = exclude_super_stripes(cache);
@@ -2298,7 +2335,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
                /* We may have excluded something, so call this just in case */
                btrfs_free_excluded_extents(cache);
                btrfs_put_block_group(cache);
-               return ret;
+               return ERR_PTR(ret);
        }
 
        add_new_free_space(cache, chunk_offset, chunk_offset + size);
@@ -2325,7 +2362,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
        if (ret) {
                btrfs_remove_free_space_cache(cache);
                btrfs_put_block_group(cache);
-               return ret;
+               return ERR_PTR(ret);
        }
 
        /*
@@ -2344,7 +2381,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
        btrfs_update_delayed_refs_rsv(trans);
 
        set_avail_alloc_bits(fs_info, type);
-       return 0;
+       return cache;
 }
 
 /*
@@ -3222,11 +3259,203 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
        return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
 }
 
+static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
+{
+       struct btrfs_block_group *bg;
+       int ret;
+
+       /*
+        * Check if we have enough space in the system space info because we
+        * will need to update device items in the chunk btree and insert a new
+        * chunk item in the chunk btree as well. This will allocate a new
+        * system block group if needed.
+        */
+       check_system_chunk(trans, flags);
+
+       bg = btrfs_alloc_chunk(trans, flags);
+       if (IS_ERR(bg)) {
+               ret = PTR_ERR(bg);
+               goto out;
+       }
+
+       /*
+        * If this is a system chunk allocation then stop right here and do not
+        * add the chunk item to the chunk btree. This is to prevent a deadlock
+        * because this system chunk allocation can be triggered while COWing
+        * some extent buffer of the chunk btree and while holding a lock on a
+        * parent extent buffer, in which case attempting to insert the chunk
+        * item (or update the device item) would result in a deadlock on that
+        * parent extent buffer. In this case defer the chunk btree updates to
+        * the second phase of chunk allocation and keep our reservation until
+        * the second phase completes.
+        *
+        * This is a rare case and can only be triggered by the very few cases
+        * we have where we need to touch the chunk btree outside chunk allocation
+        * and chunk removal. These cases are basically adding a device, removing
+        * a device or resizing a device.
+        */
+       if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+               return 0;
+
+       ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
+       /*
+        * Normally we are not expected to fail with -ENOSPC here, since we have
+        * previously reserved space in the system space_info and allocated one
+        * new system chunk if necessary. However there are two exceptions:
+        *
+        * 1) We may have enough free space in the system space_info but all the
+        *    existing system block groups have a profile which can not be used
+        *    for extent allocation.
+        *
+        *    This happens when mounting in degraded mode. For example we have a
+        *    RAID1 filesystem with 2 devices, lose one device and mount the fs
+        *    using the other device in degraded mode. If we then allocate a chunk,
+        *    we may have enough free space in the existing system space_info, but
+        *    none of the block groups can be used for extent allocation since they
+        *    have a RAID1 profile, and because we are in degraded mode with a
+        *    single device, we are forced to allocate a new system chunk with a
+        *    SINGLE profile. Making check_system_chunk() iterate over all system
+        *    block groups and check if they have a usable profile and enough space
+        *    can be slow on very large filesystems, so we tolerate the -ENOSPC and
+        *    try again after forcing allocation of a new system chunk. Like this
+        *    we avoid paying the cost of that search in normal circumstances, when
+        *    we were not mounted in degraded mode;
+        *
+        * 2) We had enough free space info the system space_info, and one suitable
+        *    block group to allocate from when we called check_system_chunk()
+        *    above. However right after we called it, the only system block group
+        *    with enough free space got turned into RO mode by a running scrub,
+        *    and in this case we have to allocate a new one and retry. We only
+        *    need do this allocate and retry once, since we have a transaction
+        *    handle and scrub uses the commit root to search for block groups.
+        */
+       if (ret == -ENOSPC) {
+               const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info);
+               struct btrfs_block_group *sys_bg;
+
+               sys_bg = btrfs_alloc_chunk(trans, sys_flags);
+               if (IS_ERR(sys_bg)) {
+                       ret = PTR_ERR(sys_bg);
+                       btrfs_abort_transaction(trans, ret);
+                       goto out;
+               }
+
+               ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
+               if (ret) {
+                       btrfs_abort_transaction(trans, ret);
+                       goto out;
+               }
+
+               ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
+               if (ret) {
+                       btrfs_abort_transaction(trans, ret);
+                       goto out;
+               }
+       } else if (ret) {
+               btrfs_abort_transaction(trans, ret);
+               goto out;
+       }
+out:
+       btrfs_trans_release_chunk_metadata(trans);
+
+       return ret;
+}
+
 /*
- * If force is CHUNK_ALLOC_FORCE:
+ * Chunk allocation is done in 2 phases:
+ *
+ * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for
+ *    the chunk, the chunk mapping, create its block group and add the items
+ *    that belong in the chunk btree to it - more specifically, we need to
+ *    update device items in the chunk btree and add a new chunk item to it.
+ *
+ * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block
+ *    group item to the extent btree and the device extent items to the devices
+ *    btree.
+ *
+ * This is done to prevent deadlocks. For example when COWing a node from the
+ * extent btree we are holding a write lock on the node's parent and if we
+ * trigger chunk allocation and attempted to insert the new block group item
+ * in the extent btree right way, we could deadlock because the path for the
+ * insertion can include that parent node. At first glance it seems impossible
+ * to trigger chunk allocation after starting a transaction since tasks should
+ * reserve enough transaction units (metadata space), however while that is true
+ * most of the time, chunk allocation may still be triggered for several reasons:
+ *
+ * 1) When reserving metadata, we check if there is enough free space in the
+ *    metadata space_info and therefore don't trigger allocation of a new chunk.
+ *    However later when the task actually tries to COW an extent buffer from
+ *    the extent btree or from the device btree for example, it is forced to
+ *    allocate a new block group (chunk) because the only one that had enough
+ *    free space was just turned to RO mode by a running scrub for example (or
+ *    device replace, block group reclaim thread, etc), so we can not use it
+ *    for allocating an extent and end up being forced to allocate a new one;
+ *
+ * 2) Because we only check that the metadata space_info has enough free bytes,
+ *    we end up not allocating a new metadata chunk in that case. However if
+ *    the filesystem was mounted in degraded mode, none of the existing block
+ *    groups might be suitable for extent allocation due to their incompatible
+ *    profile (for e.g. mounting a 2 devices filesystem, where all block groups
+ *    use a RAID1 profile, in degraded mode using a single device). In this case
+ *    when the task attempts to COW some extent buffer of the extent btree for
+ *    example, it will trigger allocation of a new metadata block group with a
+ *    suitable profile (SINGLE profile in the example of the degraded mount of
+ *    the RAID1 filesystem);
+ *
+ * 3) The task has reserved enough transaction units / metadata space, but when
+ *    it attempts to COW an extent buffer from the extent or device btree for
+ *    example, it does not find any free extent in any metadata block group,
+ *    therefore forced to try to allocate a new metadata block group.
+ *    This is because some other task allocated all available extents in the
+ *    meanwhile - this typically happens with tasks that don't reserve space
+ *    properly, either intentionally or as a bug. One example where this is
+ *    done intentionally is fsync, as it does not reserve any transaction units
+ *    and ends up allocating a variable number of metadata extents for log
+ *    tree extent buffers.
+ *
+ * We also need this 2 phases setup when adding a device to a filesystem with
+ * a seed device - we must create new metadata and system chunks without adding
+ * any of the block group items to the chunk, extent and device btrees. If we
+ * did not do it this way, we would get ENOSPC when attempting to update those
+ * btrees, since all the chunks from the seed device are read-only.
+ *
+ * Phase 1 does the updates and insertions to the chunk btree because if we had
+ * it done in phase 2 and have a thundering herd of tasks allocating chunks in
+ * parallel, we risk having too many system chunks allocated by many tasks if
+ * many tasks reach phase 1 without the previous ones completing phase 2. In the
+ * extreme case this leads to exhaustion of the system chunk array in the
+ * superblock. This is easier to trigger if using a btree node/leaf size of 64K
+ * and with RAID filesystems (so we have more device items in the chunk btree).
+ * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of
+ * the system chunk array due to concurrent allocations") provides more details.
+ *
+ * For allocation of system chunks, we defer the updates and insertions into the
+ * chunk btree to phase 2. This is to prevent deadlocks on extent buffers because
+ * if the chunk allocation is triggered while COWing an extent buffer of the
+ * chunk btree, we are holding a lock on the parent of that extent buffer and
+ * doing the chunk btree updates and insertions can require locking that parent.
+ * This is for the very few and rare cases where we update the chunk btree that
+ * are not chunk allocation or chunk removal: adding a device, removing a device
+ * or resizing a device.
+ *
+ * The reservation of system space, done through check_system_chunk(), as well
+ * as all the updates and insertions into the chunk btree must be done while
+ * holding fs_info->chunk_mutex. This is important to guarantee that while COWing
+ * an extent buffer from the chunks btree we never trigger allocation of a new
+ * system chunk, which would result in a deadlock (trying to lock twice an
+ * extent buffer of the chunk btree, first time before triggering the chunk
+ * allocation and the second time during chunk allocation while attempting to
+ * update the chunks btree). The system chunk array is also updated while holding
+ * that mutex. The same logic applies to removing chunks - we must reserve system
+ * space, update the chunk btree and the system chunk array in the superblock
+ * while holding fs_info->chunk_mutex.
+ *
+ * This function, btrfs_chunk_alloc(), belongs to phase 1.
+ *
+ * If @force is CHUNK_ALLOC_FORCE:
  *    - return 1 if it successfully allocates a chunk,
  *    - return errors including -ENOSPC otherwise.
- * If force is NOT CHUNK_ALLOC_FORCE:
+ * If @force is NOT CHUNK_ALLOC_FORCE:
  *    - return 0 if it doesn't need to allocate a new chunk,
  *    - return 1 if it successfully allocates a chunk,
  *    - return errors including -ENOSPC otherwise.
@@ -3243,6 +3472,13 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
        /* Don't re-enter if we're already allocating a chunk */
        if (trans->allocating_chunk)
                return -ENOSPC;
+       /*
+        * If we are removing a chunk, don't re-enter or we would deadlock.
+        * System space reservation and system chunk allocation is done by the
+        * chunk remove operation (btrfs_remove_chunk()).
+        */
+       if (trans->removing_chunk)
+               return -ENOSPC;
 
        space_info = btrfs_find_space_info(fs_info, flags);
        ASSERT(space_info);
@@ -3306,13 +3542,7 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
                        force_metadata_allocation(fs_info);
        }
 
-       /*
-        * Check if we have enough space in SYSTEM chunk because we may need
-        * to update devices.
-        */
-       check_system_chunk(trans, flags);
-
-       ret = btrfs_alloc_chunk(trans, flags);
+       ret = do_chunk_alloc(trans, flags);
        trans->allocating_chunk = false;
 
        spin_lock(&space_info->lock);
@@ -3331,22 +3561,6 @@ out:
        space_info->chunk_alloc = 0;
        spin_unlock(&space_info->lock);
        mutex_unlock(&fs_info->chunk_mutex);
-       /*
-        * When we allocate a new chunk we reserve space in the chunk block
-        * reserve to make sure we can COW nodes/leafs in the chunk tree or
-        * add new nodes/leafs to it if we end up needing to do it when
-        * inserting the chunk item and updating device items as part of the
-        * second phase of chunk allocation, performed by
-        * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
-        * large number of new block groups to create in our transaction
-        * handle's new_bgs list to avoid exhausting the chunk block reserve
-        * in extreme cases - like having a single transaction create many new
-        * block groups when starting to write out the free space caches of all
-        * the block groups that were made dirty during the lifetime of the
-        * transaction.
-        */
-       if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
-               btrfs_create_pending_block_groups(trans);
 
        return ret;
 }
@@ -3367,7 +3581,6 @@ static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
  */
 void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
 {
-       struct btrfs_transaction *cur_trans = trans->transaction;
        struct btrfs_fs_info *fs_info = trans->fs_info;
        struct btrfs_space_info *info;
        u64 left;
@@ -3382,7 +3595,6 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
        lockdep_assert_held(&fs_info->chunk_mutex);
 
        info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
-again:
        spin_lock(&info->lock);
        left = info->total_bytes - btrfs_space_info_used(info, true);
        spin_unlock(&info->lock);
@@ -3401,76 +3613,39 @@ again:
 
        if (left < thresh) {
                u64 flags = btrfs_system_alloc_profile(fs_info);
-               u64 reserved = atomic64_read(&cur_trans->chunk_bytes_reserved);
-
-               /*
-                * If there's not available space for the chunk tree (system
-                * space) and there are other tasks that reserved space for
-                * creating a new system block group, wait for them to complete
-                * the creation of their system block group and release excess
-                * reserved space. We do this because:
-                *
-                * *) We can end up allocating more system chunks than necessary
-                *    when there are multiple tasks that are concurrently
-                *    allocating block groups, which can lead to exhaustion of
-                *    the system array in the superblock;
-                *
-                * *) If we allocate extra and unnecessary system block groups,
-                *    despite being empty for a long time, and possibly forever,
-                *    they end not being added to the list of unused block groups
-                *    because that typically happens only when deallocating the
-                *    last extent from a block group - which never happens since
-                *    we never allocate from them in the first place. The few
-                *    exceptions are when mounting a filesystem or running scrub,
-                *    which add unused block groups to the list of unused block
-                *    groups, to be deleted by the cleaner kthread.
-                *    And even when they are added to the list of unused block
-                *    groups, it can take a long time until they get deleted,
-                *    since the cleaner kthread might be sleeping or busy with
-                *    other work (deleting subvolumes, running delayed iputs,
-                *    defrag scheduling, etc);
-                *
-                * This is rare in practice, but can happen when too many tasks
-                * are allocating blocks groups in parallel (via fallocate())
-                * and before the one that reserved space for a new system block
-                * group finishes the block group creation and releases the space
-                * reserved in excess (at btrfs_create_pending_block_groups()),
-                * other tasks end up here and see free system space temporarily
-                * not enough for updating the chunk tree.
-                *
-                * We unlock the chunk mutex before waiting for such tasks and
-                * lock it again after the wait, otherwise we would deadlock.
-                * It is safe to do so because allocating a system chunk is the
-                * first thing done while allocating a new block group.
-                */
-               if (reserved > trans->chunk_bytes_reserved) {
-                       const u64 min_needed = reserved - thresh;
-
-                       mutex_unlock(&fs_info->chunk_mutex);
-                       wait_event(cur_trans->chunk_reserve_wait,
-                          atomic64_read(&cur_trans->chunk_bytes_reserved) <=
-                          min_needed);
-                       mutex_lock(&fs_info->chunk_mutex);
-                       goto again;
-               }
+               struct btrfs_block_group *bg;
 
                /*
                 * Ignore failure to create system chunk. We might end up not
                 * needing it, as we might not need to COW all nodes/leafs from
                 * the paths we visit in the chunk tree (they were already COWed
                 * or created in the current transaction for example).
+                *
+                * Also, if our caller is allocating a system chunk, do not
+                * attempt to insert the chunk item in the chunk btree, as we
+                * could deadlock on an extent buffer since our caller may be
+                * COWing an extent buffer from the chunk btree.
                 */
-               ret = btrfs_alloc_chunk(trans, flags);
+               bg = btrfs_alloc_chunk(trans, flags);
+               if (IS_ERR(bg)) {
+                       ret = PTR_ERR(bg);
+               } else if (!(type & BTRFS_BLOCK_GROUP_SYSTEM)) {
+                       /*
+                        * If we fail to add the chunk item here, we end up
+                        * trying again at phase 2 of chunk allocation, at
+                        * btrfs_create_pending_block_groups(). So ignore
+                        * any error here.
+                        */
+                       btrfs_chunk_alloc_add_chunk_item(trans, bg);
+               }
        }
 
        if (!ret) {
                ret = btrfs_block_rsv_add(fs_info->chunk_root,
                                          &fs_info->chunk_block_rsv,
                                          thresh, BTRFS_RESERVE_NO_FLUSH);
-               if (!ret) {
-                       atomic64_add(thresh, &cur_trans->chunk_bytes_reserved);
+               if (!ret)
                        trans->chunk_bytes_reserved += thresh;
-               }
        }
 }
 
index 7b92742..c72a71e 100644 (file)
@@ -97,6 +97,7 @@ struct btrfs_block_group {
        unsigned int removed:1;
        unsigned int to_copy:1;
        unsigned int relocating_repair:1;
+       unsigned int chunk_item_inserted:1;
 
        int disk_cache_state;
 
@@ -268,8 +269,9 @@ void btrfs_reclaim_bgs_work(struct work_struct *work);
 void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info);
 void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg);
 int btrfs_read_block_groups(struct btrfs_fs_info *info);
-int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
-                          u64 type, u64 chunk_offset, u64 size);
+struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
+                                                u64 bytes_used, u64 type,
+                                                u64 chunk_offset, u64 size);
 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
                             bool do_chunk_alloc);
index 9a023ae..30d82cd 100644 (file)
@@ -352,7 +352,7 @@ static void end_compressed_bio_write(struct bio *bio)
        btrfs_record_physical_zoned(inode, cb->start, bio);
        btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
                        cb->start, cb->start + cb->len - 1,
-                       bio->bi_status == BLK_STS_OK);
+                       !cb->errors);
 
        end_compressed_writeback(inode, cb);
        /* note, our inode could be gone now */
index 4bc3ca2..c5c08c8 100644 (file)
@@ -364,49 +364,6 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
        return 0;
 }
 
-static struct extent_buffer *alloc_tree_block_no_bg_flush(
-                                         struct btrfs_trans_handle *trans,
-                                         struct btrfs_root *root,
-                                         u64 parent_start,
-                                         const struct btrfs_disk_key *disk_key,
-                                         int level,
-                                         u64 hint,
-                                         u64 empty_size,
-                                         enum btrfs_lock_nesting nest)
-{
-       struct btrfs_fs_info *fs_info = root->fs_info;
-       struct extent_buffer *ret;
-
-       /*
-        * If we are COWing a node/leaf from the extent, chunk, device or free
-        * space trees, make sure that we do not finish block group creation of
-        * pending block groups. We do this to avoid a deadlock.
-        * COWing can result in allocation of a new chunk, and flushing pending
-        * block groups (btrfs_create_pending_block_groups()) can be triggered
-        * when finishing allocation of a new chunk. Creation of a pending block
-        * group modifies the extent, chunk, device and free space trees,
-        * therefore we could deadlock with ourselves since we are holding a
-        * lock on an extent buffer that btrfs_create_pending_block_groups() may
-        * try to COW later.
-        * For similar reasons, we also need to delay flushing pending block
-        * groups when splitting a leaf or node, from one of those trees, since
-        * we are holding a write lock on it and its parent or when inserting a
-        * new root node for one of those trees.
-        */
-       if (root == fs_info->extent_root ||
-           root == fs_info->chunk_root ||
-           root == fs_info->dev_root ||
-           root == fs_info->free_space_root)
-               trans->can_flush_pending_bgs = false;
-
-       ret = btrfs_alloc_tree_block(trans, root, parent_start,
-                                    root->root_key.objectid, disk_key, level,
-                                    hint, empty_size, nest);
-       trans->can_flush_pending_bgs = true;
-
-       return ret;
-}
-
 /*
  * does the dirty work in cow of a single block.  The parent block (if
  * supplied) is updated to point to the new cow copy.  The new buffer is marked
@@ -455,8 +412,9 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
        if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
                parent_start = parent->start;
 
-       cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
-                                          level, search_start, empty_size, nest);
+       cow = btrfs_alloc_tree_block(trans, root, parent_start,
+                                    root->root_key.objectid, &disk_key, level,
+                                    search_start, empty_size, nest);
        if (IS_ERR(cow))
                return PTR_ERR(cow);
 
@@ -2458,9 +2416,9 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
        else
                btrfs_node_key(lower, &lower_key, 0);
 
-       c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
-                                        root->node->start, 0,
-                                        BTRFS_NESTING_NEW_ROOT);
+       c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
+                                  &lower_key, level, root->node->start, 0,
+                                  BTRFS_NESTING_NEW_ROOT);
        if (IS_ERR(c))
                return PTR_ERR(c);
 
@@ -2589,8 +2547,9 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
        mid = (c_nritems + 1) / 2;
        btrfs_node_key(c, &disk_key, mid);
 
-       split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
-                                            c->start, 0, BTRFS_NESTING_SPLIT);
+       split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
+                                      &disk_key, level, c->start, 0,
+                                      BTRFS_NESTING_SPLIT);
        if (IS_ERR(split))
                return PTR_ERR(split);
 
@@ -3381,10 +3340,10 @@ again:
         * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
         * use BTRFS_NESTING_NEW_ROOT.
         */
-       right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
-                                            l->start, 0, num_doubles ?
-                                            BTRFS_NESTING_NEW_ROOT :
-                                            BTRFS_NESTING_SPLIT);
+       right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
+                                      &disk_key, 0, l->start, 0,
+                                      num_doubles ? BTRFS_NESTING_NEW_ROOT :
+                                      BTRFS_NESTING_SPLIT);
        if (IS_ERR(right))
                return PTR_ERR(right);
 
index 06bc842..ca848b1 100644 (file)
@@ -974,7 +974,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
                kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
 
        if (qrecord_inserted)
-               btrfs_qgroup_trace_extent_post(fs_info, record);
+               btrfs_qgroup_trace_extent_post(trans, record);
 
        return 0;
 }
@@ -1069,7 +1069,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
 
 
        if (qrecord_inserted)
-               return btrfs_qgroup_trace_extent_post(fs_info, record);
+               return btrfs_qgroup_trace_extent_post(trans, record);
        return 0;
 }
 
index b117dd3..a59ab7b 100644 (file)
@@ -209,7 +209,7 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
 static void csum_tree_block(struct extent_buffer *buf, u8 *result)
 {
        struct btrfs_fs_info *fs_info = buf->fs_info;
-       const int num_pages = fs_info->nodesize >> PAGE_SHIFT;
+       const int num_pages = num_extent_pages(buf);
        const int first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
        SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
        char *kaddr;
index d296483..268ce58 100644 (file)
@@ -6019,6 +6019,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
        mutex_lock(&fs_info->fs_devices->device_list_mutex);
        devices = &fs_info->fs_devices->devices;
        list_for_each_entry(device, devices, dev_list) {
+               if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
+                       continue;
+
                ret = btrfs_trim_free_extents(device, &group_trimmed);
                if (ret) {
                        dev_failed++;
index e6eb209..0117d86 100644 (file)
@@ -2271,13 +2271,127 @@ static blk_status_t btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
        return btrfs_csum_one_bio(BTRFS_I(inode), bio, 0, 0);
 }
 
+/*
+ * Split an extent_map at [start, start + len]
+ *
+ * This function is intended to be used only for extract_ordered_extent().
+ */
+static int split_zoned_em(struct btrfs_inode *inode, u64 start, u64 len,
+                         u64 pre, u64 post)
+{
+       struct extent_map_tree *em_tree = &inode->extent_tree;
+       struct extent_map *em;
+       struct extent_map *split_pre = NULL;
+       struct extent_map *split_mid = NULL;
+       struct extent_map *split_post = NULL;
+       int ret = 0;
+       int modified;
+       unsigned long flags;
+
+       /* Sanity check */
+       if (pre == 0 && post == 0)
+               return 0;
+
+       split_pre = alloc_extent_map();
+       if (pre)
+               split_mid = alloc_extent_map();
+       if (post)
+               split_post = alloc_extent_map();
+       if (!split_pre || (pre && !split_mid) || (post && !split_post)) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ASSERT(pre + post < len);
+
+       lock_extent(&inode->io_tree, start, start + len - 1);
+       write_lock(&em_tree->lock);
+       em = lookup_extent_mapping(em_tree, start, len);
+       if (!em) {
+               ret = -EIO;
+               goto out_unlock;
+       }
+
+       ASSERT(em->len == len);
+       ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
+       ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE);
+
+       flags = em->flags;
+       clear_bit(EXTENT_FLAG_PINNED, &em->flags);
+       clear_bit(EXTENT_FLAG_LOGGING, &flags);
+       modified = !list_empty(&em->list);
+
+       /* First, replace the em with a new extent_map starting from * em->start */
+       split_pre->start = em->start;
+       split_pre->len = (pre ? pre : em->len - post);
+       split_pre->orig_start = split_pre->start;
+       split_pre->block_start = em->block_start;
+       split_pre->block_len = split_pre->len;
+       split_pre->orig_block_len = split_pre->block_len;
+       split_pre->ram_bytes = split_pre->len;
+       split_pre->flags = flags;
+       split_pre->compress_type = em->compress_type;
+       split_pre->generation = em->generation;
+
+       replace_extent_mapping(em_tree, em, split_pre, modified);
+
+       /*
+        * Now we only have an extent_map at:
+        *     [em->start, em->start + pre] if pre != 0
+        *     [em->start, em->start + em->len - post] if pre == 0
+        */
+
+       if (pre) {
+               /* Insert the middle extent_map */
+               split_mid->start = em->start + pre;
+               split_mid->len = em->len - pre - post;
+               split_mid->orig_start = split_mid->start;
+               split_mid->block_start = em->block_start + pre;
+               split_mid->block_len = split_mid->len;
+               split_mid->orig_block_len = split_mid->block_len;
+               split_mid->ram_bytes = split_mid->len;
+               split_mid->flags = flags;
+               split_mid->compress_type = em->compress_type;
+               split_mid->generation = em->generation;
+               add_extent_mapping(em_tree, split_mid, modified);
+       }
+
+       if (post) {
+               split_post->start = em->start + em->len - post;
+               split_post->len = post;
+               split_post->orig_start = split_post->start;
+               split_post->block_start = em->block_start + em->len - post;
+               split_post->block_len = split_post->len;
+               split_post->orig_block_len = split_post->block_len;
+               split_post->ram_bytes = split_post->len;
+               split_post->flags = flags;
+               split_post->compress_type = em->compress_type;
+               split_post->generation = em->generation;
+               add_extent_mapping(em_tree, split_post, modified);
+       }
+
+       /* Once for us */
+       free_extent_map(em);
+       /* Once for the tree */
+       free_extent_map(em);
+
+out_unlock:
+       write_unlock(&em_tree->lock);
+       unlock_extent(&inode->io_tree, start, start + len - 1);
+out:
+       free_extent_map(split_pre);
+       free_extent_map(split_mid);
+       free_extent_map(split_post);
+
+       return ret;
+}
+
 static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
                                           struct bio *bio, loff_t file_offset)
 {
        struct btrfs_ordered_extent *ordered;
-       struct extent_map *em = NULL, *em_new = NULL;
-       struct extent_map_tree *em_tree = &inode->extent_tree;
        u64 start = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT;
+       u64 file_len;
        u64 len = bio->bi_iter.bi_size;
        u64 end = start + len;
        u64 ordered_end;
@@ -2317,41 +2431,16 @@ static blk_status_t extract_ordered_extent(struct btrfs_inode *inode,
                goto out;
        }
 
+       file_len = ordered->num_bytes;
        pre = start - ordered->disk_bytenr;
        post = ordered_end - end;
 
        ret = btrfs_split_ordered_extent(ordered, pre, post);
        if (ret)
                goto out;
-
-       read_lock(&em_tree->lock);
-       em = lookup_extent_mapping(em_tree, ordered->file_offset, len);
-       if (!em) {
-               read_unlock(&em_tree->lock);
-               ret = -EIO;
-               goto out;
-       }
-       read_unlock(&em_tree->lock);
-
-       ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
-       /*
-        * We cannot reuse em_new here but have to create a new one, as
-        * unpin_extent_cache() expects the start of the extent map to be the
-        * logical offset of the file, which does not hold true anymore after
-        * splitting.
-        */
-       em_new = create_io_em(inode, em->start + pre, len,
-                             em->start + pre, em->block_start + pre, len,
-                             len, len, BTRFS_COMPRESS_NONE,
-                             BTRFS_ORDERED_REGULAR);
-       if (IS_ERR(em_new)) {
-               ret = PTR_ERR(em_new);
-               goto out;
-       }
-       free_extent_map(em_new);
+       ret = split_zoned_em(inode, file_offset, file_len, pre, post);
 
 out:
-       free_extent_map(em);
        btrfs_put_ordered_extent(ordered);
 
        return errno_to_blk_status(ret);
@@ -2903,7 +2992,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
                goto out;
        }
 
-       if (ordered_extent->disk)
+       if (ordered_extent->bdev)
                btrfs_rewrite_logical_zoned(ordered_extent);
 
        btrfs_free_io_failure_record(inode, start, end);
index 6eb41b7..5c0f848 100644 (file)
@@ -190,8 +190,6 @@ static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset
        entry->truncated_len = (u64)-1;
        entry->qgroup_rsv = ret;
        entry->physical = (u64)-1;
-       entry->disk = NULL;
-       entry->partno = (u8)-1;
 
        ASSERT(type == BTRFS_ORDERED_REGULAR ||
               type == BTRFS_ORDERED_NOCOW ||
index 5664720..b2d88ab 100644 (file)
@@ -145,8 +145,7 @@ struct btrfs_ordered_extent {
         * command in a workqueue context
         */
        u64 physical;
-       struct gendisk *disk;
-       u8 partno;
+       struct block_device *bdev;
 };
 
 /*
index 07ec06d..0fa1211 100644 (file)
@@ -1704,17 +1704,39 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
        return 0;
 }
 
-int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
+int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
                                   struct btrfs_qgroup_extent_record *qrecord)
 {
        struct ulist *old_root;
        u64 bytenr = qrecord->bytenr;
        int ret;
 
-       ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
+       /*
+        * We are always called in a context where we are already holding a
+        * transaction handle. Often we are called when adding a data delayed
+        * reference from btrfs_truncate_inode_items() (truncating or unlinking),
+        * in which case we will be holding a write lock on extent buffer from a
+        * subvolume tree. In this case we can't allow btrfs_find_all_roots() to
+        * acquire fs_info->commit_root_sem, because that is a higher level lock
+        * that must be acquired before locking any extent buffers.
+        *
+        * So we want btrfs_find_all_roots() to not acquire the commit_root_sem
+        * but we can't pass it a non-NULL transaction handle, because otherwise
+        * it would not use commit roots and would lock extent buffers, causing
+        * a deadlock if it ends up trying to read lock the same extent buffer
+        * that was previously write locked at btrfs_truncate_inode_items().
+        *
+        * So pass a NULL transaction handle to btrfs_find_all_roots() and
+        * explicitly tell it to not acquire the commit_root_sem - if we are
+        * holding a transaction handle we don't need its protection.
+        */
+       ASSERT(trans != NULL);
+
+       ret = btrfs_find_all_roots(NULL, trans->fs_info, bytenr, 0, &old_root,
+                                  false, true);
        if (ret < 0) {
-               fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
-               btrfs_warn(fs_info,
+               trans->fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+               btrfs_warn(trans->fs_info,
 "error accounting new delayed refs extent (err code: %d), quota inconsistent",
                        ret);
                return 0;
@@ -1758,7 +1780,7 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
                kfree(record);
                return 0;
        }
-       return btrfs_qgroup_trace_extent_post(fs_info, record);
+       return btrfs_qgroup_trace_extent_post(trans, record);
 }
 
 int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
@@ -2629,7 +2651,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
                                /* Search commit root to find old_roots */
                                ret = btrfs_find_all_roots(NULL, fs_info,
                                                record->bytenr, 0,
-                                               &record->old_roots, false);
+                                               &record->old_roots, false, false);
                                if (ret < 0)
                                        goto cleanup;
                        }
@@ -2645,7 +2667,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
                         * current root. It's safe inside commit_transaction().
                         */
                        ret = btrfs_find_all_roots(trans, fs_info,
-                               record->bytenr, BTRFS_SEQ_LAST, &new_roots, false);
+                          record->bytenr, BTRFS_SEQ_LAST, &new_roots, false, false);
                        if (ret < 0)
                                goto cleanup;
                        if (qgroup_to_skip) {
@@ -3179,7 +3201,7 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
                        num_bytes = found.offset;
 
                ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
-                                          &roots, false);
+                                          &roots, false, false);
                if (ret < 0)
                        goto out;
                /* For rescan, just pass old_roots as NULL */
index 7283e4f..880e9df 100644 (file)
@@ -298,7 +298,7 @@ int btrfs_qgroup_trace_extent_nolock(
  * using current root, then we can move all expensive backref walk out of
  * transaction committing, but not now as qgroup accounting will be wrong again.
  */
-int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
+int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
                                   struct btrfs_qgroup_extent_record *qrecord);
 
 /*
index f313728..98b5aab 100644 (file)
@@ -224,7 +224,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
         * quota.
         */
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                test_err("couldn't find old roots: %d", ret);
@@ -237,7 +237,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
                return ret;
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -261,7 +261,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
        new_roots = NULL;
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                test_err("couldn't find old roots: %d", ret);
@@ -273,7 +273,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
                return -EINVAL;
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -325,7 +325,7 @@ static int test_multiple_refs(struct btrfs_root *root,
        }
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                test_err("couldn't find old roots: %d", ret);
@@ -338,7 +338,7 @@ static int test_multiple_refs(struct btrfs_root *root,
                return ret;
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -360,7 +360,7 @@ static int test_multiple_refs(struct btrfs_root *root,
        }
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                test_err("couldn't find old roots: %d", ret);
@@ -373,7 +373,7 @@ static int test_multiple_refs(struct btrfs_root *root,
                return ret;
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -401,7 +401,7 @@ static int test_multiple_refs(struct btrfs_root *root,
        }
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                test_err("couldn't find old roots: %d", ret);
@@ -414,7 +414,7 @@ static int test_multiple_refs(struct btrfs_root *root,
                return ret;
 
        ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
-                       false);
+                       false, false);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
index 5031823..14b9fdc 100644 (file)
@@ -254,23 +254,21 @@ static inline int extwriter_counter_read(struct btrfs_transaction *trans)
 }
 
 /*
- * To be called after all the new block groups attached to the transaction
- * handle have been created (btrfs_create_pending_block_groups()).
+ * To be called after doing the chunk btree updates right after allocating a new
+ * chunk (after btrfs_chunk_alloc_add_chunk_item() is called), when removing a
+ * chunk after all chunk btree updates and after finishing the second phase of
+ * chunk allocation (btrfs_create_pending_block_groups()) in case some block
+ * group had its chunk item insertion delayed to the second phase.
  */
 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
-       struct btrfs_transaction *cur_trans = trans->transaction;
 
        if (!trans->chunk_bytes_reserved)
                return;
 
-       WARN_ON_ONCE(!list_empty(&trans->new_bgs));
-
        btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
                                trans->chunk_bytes_reserved, NULL);
-       atomic64_sub(trans->chunk_bytes_reserved, &cur_trans->chunk_bytes_reserved);
-       cond_wake_up(&cur_trans->chunk_reserve_wait);
        trans->chunk_bytes_reserved = 0;
 }
 
@@ -386,8 +384,6 @@ loop:
        spin_lock_init(&cur_trans->dropped_roots_lock);
        INIT_LIST_HEAD(&cur_trans->releasing_ebs);
        spin_lock_init(&cur_trans->releasing_ebs_lock);
-       atomic64_set(&cur_trans->chunk_bytes_reserved, 0);
-       init_waitqueue_head(&cur_trans->chunk_reserve_wait);
        list_add_tail(&cur_trans->list, &fs_info->trans_list);
        extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
                        IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
@@ -701,7 +697,6 @@ again:
        h->fs_info = root->fs_info;
 
        h->type = type;
-       h->can_flush_pending_bgs = true;
        INIT_LIST_HEAD(&h->new_bgs);
 
        smp_mb();
index 07d7602..ba45065 100644 (file)
@@ -96,13 +96,6 @@ struct btrfs_transaction {
 
        spinlock_t releasing_ebs_lock;
        struct list_head releasing_ebs;
-
-       /*
-        * The number of bytes currently reserved, by all transaction handles
-        * attached to this transaction, for metadata extents of the chunk tree.
-        */
-       atomic64_t chunk_bytes_reserved;
-       wait_queue_head_t chunk_reserve_wait;
 };
 
 #define __TRANS_FREEZABLE      (1U << 0)
@@ -139,7 +132,7 @@ struct btrfs_trans_handle {
        short aborted;
        bool adding_csums;
        bool allocating_chunk;
-       bool can_flush_pending_bgs;
+       bool removing_chunk;
        bool reloc_reserved;
        bool in_fsync;
        struct btrfs_root *root;
index cab451d..e6430ac 100644 (file)
@@ -3173,7 +3173,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
                if (!log_root_tree->node) {
                        ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
                        if (ret) {
-                               mutex_unlock(&fs_info->tree_log_mutex);
+                               mutex_unlock(&fs_info->tree_root->log_mutex);
                                goto out;
                        }
                }
@@ -5526,16 +5526,29 @@ log_extents:
                spin_lock(&inode->lock);
                inode->logged_trans = trans->transid;
                /*
-                * Don't update last_log_commit if we logged that an inode exists
-                * after it was loaded to memory (full_sync bit set).
-                * This is to prevent data loss when we do a write to the inode,
-                * then the inode gets evicted after all delalloc was flushed,
-                * then we log it exists (due to a rename for example) and then
-                * fsync it. This last fsync would do nothing (not logging the
-                * extents previously written).
+                * Don't update last_log_commit if we logged that an inode exists.
+                * We do this for two reasons:
+                *
+                * 1) We might have had buffered writes to this inode that were
+                *    flushed and had their ordered extents completed in this
+                *    transaction, but we did not previously log the inode with
+                *    LOG_INODE_ALL. Later the inode was evicted and after that
+                *    it was loaded again and this LOG_INODE_EXISTS log operation
+                *    happened. We must make sure that if an explicit fsync against
+                *    the inode is performed later, it logs the new extents, an
+                *    updated inode item, etc, and syncs the log. The same logic
+                *    applies to direct IO writes instead of buffered writes.
+                *
+                * 2) When we log the inode with LOG_INODE_EXISTS, its inode item
+                *    is logged with an i_size of 0 or whatever value was logged
+                *    before. If later the i_size of the inode is increased by a
+                *    truncate operation, the log is synced through an fsync of
+                *    some other inode and then finally an explicit fsync against
+                *    this inode is made, we must make sure this fsync logs the
+                *    inode with the new i_size, the hole between old i_size and
+                *    the new i_size, and syncs the log.
                 */
-               if (inode_only != LOG_INODE_EXISTS ||
-                   !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
+               if (inode_only != LOG_INODE_EXISTS)
                        inode->last_log_commit = inode->last_sub_trans;
                spin_unlock(&inode->lock);
        }
@@ -6490,8 +6503,8 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
         * if this inode hasn't been logged and directory we're renaming it
         * from hasn't been logged, we don't need to log it
         */
-       if (inode->logged_trans < trans->transid &&
-           (!old_dir || old_dir->logged_trans < trans->transid))
+       if (!inode_logged(trans, inode) &&
+           (!old_dir || !inode_logged(trans, old_dir)))
                return;
 
        /*
index 807502c..70f94b7 100644 (file)
@@ -1078,6 +1078,7 @@ static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
                if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
                        list_del_init(&device->dev_alloc_list);
                        clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
+                       fs_devices->rw_devices--;
                }
                list_del_init(&device->dev_list);
                fs_devices->num_devices--;
@@ -1745,19 +1746,14 @@ again:
                extent = btrfs_item_ptr(leaf, path->slots[0],
                                        struct btrfs_dev_extent);
        } else {
-               btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
                goto out;
        }
 
        *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
 
        ret = btrfs_del_item(trans, root, path);
-       if (ret) {
-               btrfs_handle_fs_error(fs_info, ret,
-                                     "Failed to remove dev extent item");
-       } else {
+       if (ret == 0)
                set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
-       }
 out:
        btrfs_free_path(path);
        return ret;
@@ -2942,7 +2938,7 @@ static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
        u32 cur;
        struct btrfs_key key;
 
-       mutex_lock(&fs_info->chunk_mutex);
+       lockdep_assert_held(&fs_info->chunk_mutex);
        array_size = btrfs_super_sys_array_size(super_copy);
 
        ptr = super_copy->sys_chunk_array;
@@ -2972,7 +2968,6 @@ static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
                        cur += len;
                }
        }
-       mutex_unlock(&fs_info->chunk_mutex);
        return ret;
 }
 
@@ -3012,6 +3007,29 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
        return em;
 }
 
+static int remove_chunk_item(struct btrfs_trans_handle *trans,
+                            struct map_lookup *map, u64 chunk_offset)
+{
+       int i;
+
+       /*
+        * Removing chunk items and updating the device items in the chunks btree
+        * requires holding the chunk_mutex.
+        * See the comment at btrfs_chunk_alloc() for the details.
+        */
+       lockdep_assert_held(&trans->fs_info->chunk_mutex);
+
+       for (i = 0; i < map->num_stripes; i++) {
+               int ret;
+
+               ret = btrfs_update_device(trans, map->stripes[i].dev);
+               if (ret)
+                       return ret;
+       }
+
+       return btrfs_free_chunk(trans, chunk_offset);
+}
+
 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -3032,14 +3050,16 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
                return PTR_ERR(em);
        }
        map = em->map_lookup;
-       mutex_lock(&fs_info->chunk_mutex);
-       check_system_chunk(trans, map->type);
-       mutex_unlock(&fs_info->chunk_mutex);
 
        /*
-        * Take the device list mutex to prevent races with the final phase of
-        * a device replace operation that replaces the device object associated
-        * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
+        * First delete the device extent items from the devices btree.
+        * We take the device_list_mutex to avoid racing with the finishing phase
+        * of a device replace operation. See the comment below before acquiring
+        * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
+        * because that can result in a deadlock when deleting the device extent
+        * items from the devices btree - COWing an extent buffer from the btree
+        * may result in allocating a new metadata chunk, which would attempt to
+        * lock again fs_info->chunk_mutex.
         */
        mutex_lock(&fs_devices->device_list_mutex);
        for (i = 0; i < map->num_stripes; i++) {
@@ -3061,18 +3081,73 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
                        btrfs_clear_space_info_full(fs_info);
                        mutex_unlock(&fs_info->chunk_mutex);
                }
+       }
+       mutex_unlock(&fs_devices->device_list_mutex);
 
-               ret = btrfs_update_device(trans, device);
+       /*
+        * We acquire fs_info->chunk_mutex for 2 reasons:
+        *
+        * 1) Just like with the first phase of the chunk allocation, we must
+        *    reserve system space, do all chunk btree updates and deletions, and
+        *    update the system chunk array in the superblock while holding this
+        *    mutex. This is for similar reasons as explained on the comment at
+        *    the top of btrfs_chunk_alloc();
+        *
+        * 2) Prevent races with the final phase of a device replace operation
+        *    that replaces the device object associated with the map's stripes,
+        *    because the device object's id can change at any time during that
+        *    final phase of the device replace operation
+        *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
+        *    replaced device and then see it with an ID of
+        *    BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
+        *    the device item, which does not exists on the chunk btree.
+        *    The finishing phase of device replace acquires both the
+        *    device_list_mutex and the chunk_mutex, in that order, so we are
+        *    safe by just acquiring the chunk_mutex.
+        */
+       trans->removing_chunk = true;
+       mutex_lock(&fs_info->chunk_mutex);
+
+       check_system_chunk(trans, map->type);
+
+       ret = remove_chunk_item(trans, map, chunk_offset);
+       /*
+        * Normally we should not get -ENOSPC since we reserved space before
+        * through the call to check_system_chunk().
+        *
+        * Despite our system space_info having enough free space, we may not
+        * be able to allocate extents from its block groups, because all have
+        * an incompatible profile, which will force us to allocate a new system
+        * block group with the right profile, or right after we called
+        * check_system_space() above, a scrub turned the only system block group
+        * with enough free space into RO mode.
+        * This is explained with more detail at do_chunk_alloc().
+        *
+        * So if we get -ENOSPC, allocate a new system chunk and retry once.
+        */
+       if (ret == -ENOSPC) {
+               const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
+               struct btrfs_block_group *sys_bg;
+
+               sys_bg = btrfs_alloc_chunk(trans, sys_flags);
+               if (IS_ERR(sys_bg)) {
+                       ret = PTR_ERR(sys_bg);
+                       btrfs_abort_transaction(trans, ret);
+                       goto out;
+               }
+
+               ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
                if (ret) {
-                       mutex_unlock(&fs_devices->device_list_mutex);
                        btrfs_abort_transaction(trans, ret);
                        goto out;
                }
-       }
-       mutex_unlock(&fs_devices->device_list_mutex);
 
-       ret = btrfs_free_chunk(trans, chunk_offset);
-       if (ret) {
+               ret = remove_chunk_item(trans, map, chunk_offset);
+               if (ret) {
+                       btrfs_abort_transaction(trans, ret);
+                       goto out;
+               }
+       } else if (ret) {
                btrfs_abort_transaction(trans, ret);
                goto out;
        }
@@ -3087,6 +3162,15 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
                }
        }
 
+       mutex_unlock(&fs_info->chunk_mutex);
+       trans->removing_chunk = false;
+
+       /*
+        * We are done with chunk btree updates and deletions, so release the
+        * system space we previously reserved (with check_system_chunk()).
+        */
+       btrfs_trans_release_chunk_metadata(trans);
+
        ret = btrfs_remove_block_group(trans, chunk_offset, em);
        if (ret) {
                btrfs_abort_transaction(trans, ret);
@@ -3094,6 +3178,10 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
        }
 
 out:
+       if (trans->removing_chunk) {
+               mutex_unlock(&fs_info->chunk_mutex);
+               trans->removing_chunk = false;
+       }
        /* once for us */
        free_extent_map(em);
        return ret;
@@ -4860,13 +4948,12 @@ static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
        u32 array_size;
        u8 *ptr;
 
-       mutex_lock(&fs_info->chunk_mutex);
+       lockdep_assert_held(&fs_info->chunk_mutex);
+
        array_size = btrfs_super_sys_array_size(super_copy);
        if (array_size + item_size + sizeof(disk_key)
-                       > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
-               mutex_unlock(&fs_info->chunk_mutex);
+                       > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
                return -EFBIG;
-       }
 
        ptr = super_copy->sys_chunk_array + array_size;
        btrfs_cpu_key_to_disk(&disk_key, key);
@@ -4875,7 +4962,6 @@ static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
        memcpy(ptr, chunk, item_size);
        item_size += sizeof(disk_key);
        btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
-       mutex_unlock(&fs_info->chunk_mutex);
 
        return 0;
 }
@@ -5225,13 +5311,14 @@ static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
        }
 }
 
-static int create_chunk(struct btrfs_trans_handle *trans,
+static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
                        struct alloc_chunk_ctl *ctl,
                        struct btrfs_device_info *devices_info)
 {
        struct btrfs_fs_info *info = trans->fs_info;
        struct map_lookup *map = NULL;
        struct extent_map_tree *em_tree;
+       struct btrfs_block_group *block_group;
        struct extent_map *em;
        u64 start = ctl->start;
        u64 type = ctl->type;
@@ -5241,7 +5328,7 @@ static int create_chunk(struct btrfs_trans_handle *trans,
 
        map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
        if (!map)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
        map->num_stripes = ctl->num_stripes;
 
        for (i = 0; i < ctl->ndevs; ++i) {
@@ -5263,7 +5350,7 @@ static int create_chunk(struct btrfs_trans_handle *trans,
        em = alloc_extent_map();
        if (!em) {
                kfree(map);
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
        }
        set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
        em->map_lookup = map;
@@ -5279,12 +5366,12 @@ static int create_chunk(struct btrfs_trans_handle *trans,
        if (ret) {
                write_unlock(&em_tree->lock);
                free_extent_map(em);
-               return ret;
+               return ERR_PTR(ret);
        }
        write_unlock(&em_tree->lock);
 
-       ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
-       if (ret)
+       block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
+       if (IS_ERR(block_group))
                goto error_del_extent;
 
        for (i = 0; i < map->num_stripes; i++) {
@@ -5304,7 +5391,7 @@ static int create_chunk(struct btrfs_trans_handle *trans,
        check_raid56_incompat_flag(info, type);
        check_raid1c34_incompat_flag(info, type);
 
-       return 0;
+       return block_group;
 
 error_del_extent:
        write_lock(&em_tree->lock);
@@ -5316,34 +5403,36 @@ error_del_extent:
        /* One for the tree reference */
        free_extent_map(em);
 
-       return ret;
+       return block_group;
 }
 
-int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
+struct btrfs_block_group *btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+                                           u64 type)
 {
        struct btrfs_fs_info *info = trans->fs_info;
        struct btrfs_fs_devices *fs_devices = info->fs_devices;
        struct btrfs_device_info *devices_info = NULL;
        struct alloc_chunk_ctl ctl;
+       struct btrfs_block_group *block_group;
        int ret;
 
        lockdep_assert_held(&info->chunk_mutex);
 
        if (!alloc_profile_is_valid(type, 0)) {
                ASSERT(0);
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
        }
 
        if (list_empty(&fs_devices->alloc_list)) {
                if (btrfs_test_opt(info, ENOSPC_DEBUG))
                        btrfs_debug(info, "%s: no writable device", __func__);
-               return -ENOSPC;
+               return ERR_PTR(-ENOSPC);
        }
 
        if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
                btrfs_err(info, "invalid chunk type 0x%llx requested", type);
                ASSERT(0);
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
        }
 
        ctl.start = find_next_chunk(info);
@@ -5353,46 +5442,43 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
        devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
                               GFP_NOFS);
        if (!devices_info)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        ret = gather_device_info(fs_devices, &ctl, devices_info);
-       if (ret < 0)
+       if (ret < 0) {
+               block_group = ERR_PTR(ret);
                goto out;
+       }
 
        ret = decide_stripe_size(fs_devices, &ctl, devices_info);
-       if (ret < 0)
+       if (ret < 0) {
+               block_group = ERR_PTR(ret);
                goto out;
+       }
 
-       ret = create_chunk(trans, &ctl, devices_info);
+       block_group = create_chunk(trans, &ctl, devices_info);
 
 out:
        kfree(devices_info);
-       return ret;
+       return block_group;
 }
 
 /*
- * Chunk allocation falls into two parts. The first part does work
- * that makes the new allocated chunk usable, but does not do any operation
- * that modifies the chunk tree. The second part does the work that
- * requires modifying the chunk tree. This division is important for the
- * bootstrap process of adding storage to a seed btrfs.
+ * This function, btrfs_finish_chunk_alloc(), belongs to phase 2.
+ *
+ * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
+ * phases.
  */
 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
                             u64 chunk_offset, u64 chunk_size)
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
-       struct btrfs_root *extent_root = fs_info->extent_root;
-       struct btrfs_root *chunk_root = fs_info->chunk_root;
-       struct btrfs_key key;
        struct btrfs_device *device;
-       struct btrfs_chunk *chunk;
-       struct btrfs_stripe *stripe;
        struct extent_map *em;
        struct map_lookup *map;
-       size_t item_size;
        u64 dev_offset;
        u64 stripe_size;
-       int i = 0;
+       int i;
        int ret = 0;
 
        em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
@@ -5400,53 +5486,117 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
                return PTR_ERR(em);
 
        map = em->map_lookup;
-       item_size = btrfs_chunk_item_size(map->num_stripes);
        stripe_size = em->orig_block_len;
 
-       chunk = kzalloc(item_size, GFP_NOFS);
-       if (!chunk) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
        /*
         * Take the device list mutex to prevent races with the final phase of
         * a device replace operation that replaces the device object associated
         * with the map's stripes, because the device object's id can change
         * at any time during that final phase of the device replace operation
-        * (dev-replace.c:btrfs_dev_replace_finishing()).
+        * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
+        * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
+        * resulting in persisting a device extent item with such ID.
         */
        mutex_lock(&fs_info->fs_devices->device_list_mutex);
        for (i = 0; i < map->num_stripes; i++) {
                device = map->stripes[i].dev;
                dev_offset = map->stripes[i].physical;
 
-               ret = btrfs_update_device(trans, device);
-               if (ret)
-                       break;
                ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
                                             dev_offset, stripe_size);
                if (ret)
                        break;
        }
-       if (ret) {
-               mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+       mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+
+       free_extent_map(em);
+       return ret;
+}
+
+/*
+ * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
+ * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
+ * chunks.
+ *
+ * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
+ * phases.
+ */
+int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
+                                    struct btrfs_block_group *bg)
+{
+       struct btrfs_fs_info *fs_info = trans->fs_info;
+       struct btrfs_root *extent_root = fs_info->extent_root;
+       struct btrfs_root *chunk_root = fs_info->chunk_root;
+       struct btrfs_key key;
+       struct btrfs_chunk *chunk;
+       struct btrfs_stripe *stripe;
+       struct extent_map *em;
+       struct map_lookup *map;
+       size_t item_size;
+       int i;
+       int ret;
+
+       /*
+        * We take the chunk_mutex for 2 reasons:
+        *
+        * 1) Updates and insertions in the chunk btree must be done while holding
+        *    the chunk_mutex, as well as updating the system chunk array in the
+        *    superblock. See the comment on top of btrfs_chunk_alloc() for the
+        *    details;
+        *
+        * 2) To prevent races with the final phase of a device replace operation
+        *    that replaces the device object associated with the map's stripes,
+        *    because the device object's id can change at any time during that
+        *    final phase of the device replace operation
+        *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
+        *    replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
+        *    which would cause a failure when updating the device item, which does
+        *    not exists, or persisting a stripe of the chunk item with such ID.
+        *    Here we can't use the device_list_mutex because our caller already
+        *    has locked the chunk_mutex, and the final phase of device replace
+        *    acquires both mutexes - first the device_list_mutex and then the
+        *    chunk_mutex. Using any of those two mutexes protects us from a
+        *    concurrent device replace.
+        */
+       lockdep_assert_held(&fs_info->chunk_mutex);
+
+       em = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
+       if (IS_ERR(em)) {
+               ret = PTR_ERR(em);
+               btrfs_abort_transaction(trans, ret);
+               return ret;
+       }
+
+       map = em->map_lookup;
+       item_size = btrfs_chunk_item_size(map->num_stripes);
+
+       chunk = kzalloc(item_size, GFP_NOFS);
+       if (!chunk) {
+               ret = -ENOMEM;
+               btrfs_abort_transaction(trans, ret);
                goto out;
        }
 
+       for (i = 0; i < map->num_stripes; i++) {
+               struct btrfs_device *device = map->stripes[i].dev;
+
+               ret = btrfs_update_device(trans, device);
+               if (ret)
+                       goto out;
+       }
+
        stripe = &chunk->stripe;
        for (i = 0; i < map->num_stripes; i++) {
-               device = map->stripes[i].dev;
-               dev_offset = map->stripes[i].physical;
+               struct btrfs_device *device = map->stripes[i].dev;
+               const u64 dev_offset = map->stripes[i].physical;
 
                btrfs_set_stack_stripe_devid(stripe, device->devid);
                btrfs_set_stack_stripe_offset(stripe, dev_offset);
                memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
                stripe++;
        }
-       mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
-       btrfs_set_stack_chunk_length(chunk, chunk_size);
+       btrfs_set_stack_chunk_length(chunk, bg->length);
        btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
        btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
        btrfs_set_stack_chunk_type(chunk, map->type);
@@ -5458,15 +5608,18 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
 
        key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
        key.type = BTRFS_CHUNK_ITEM_KEY;
-       key.offset = chunk_offset;
+       key.offset = bg->start;
 
        ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
-       if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
-               /*
-                * TODO: Cleanup of inserted chunk root in case of
-                * failure.
-                */
+       if (ret)
+               goto out;
+
+       bg->chunk_item_inserted = 1;
+
+       if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
                ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
+               if (ret)
+                       goto out;
        }
 
 out:
@@ -5479,16 +5632,41 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
        u64 alloc_profile;
-       int ret;
+       struct btrfs_block_group *meta_bg;
+       struct btrfs_block_group *sys_bg;
+
+       /*
+        * When adding a new device for sprouting, the seed device is read-only
+        * so we must first allocate a metadata and a system chunk. But before
+        * adding the block group items to the extent, device and chunk btrees,
+        * we must first:
+        *
+        * 1) Create both chunks without doing any changes to the btrees, as
+        *    otherwise we would get -ENOSPC since the block groups from the
+        *    seed device are read-only;
+        *
+        * 2) Add the device item for the new sprout device - finishing the setup
+        *    of a new block group requires updating the device item in the chunk
+        *    btree, so it must exist when we attempt to do it. The previous step
+        *    ensures this does not fail with -ENOSPC.
+        *
+        * After that we can add the block group items to their btrees:
+        * update existing device item in the chunk btree, add a new block group
+        * item to the extent btree, add a new chunk item to the chunk btree and
+        * finally add the new device extent items to the devices btree.
+        */
 
        alloc_profile = btrfs_metadata_alloc_profile(fs_info);
-       ret = btrfs_alloc_chunk(trans, alloc_profile);
-       if (ret)
-               return ret;
+       meta_bg = btrfs_alloc_chunk(trans, alloc_profile);
+       if (IS_ERR(meta_bg))
+               return PTR_ERR(meta_bg);
 
        alloc_profile = btrfs_system_alloc_profile(fs_info);
-       ret = btrfs_alloc_chunk(trans, alloc_profile);
-       return ret;
+       sys_bg = btrfs_alloc_chunk(trans, alloc_profile);
+       if (IS_ERR(sys_bg))
+               return PTR_ERR(sys_bg);
+
+       return 0;
 }
 
 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
@@ -7415,10 +7593,18 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
                        total_dev++;
                } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
                        struct btrfs_chunk *chunk;
+
+                       /*
+                        * We are only called at mount time, so no need to take
+                        * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
+                        * we always lock first fs_info->chunk_mutex before
+                        * acquiring any locks on the chunk tree. This is a
+                        * requirement for chunk allocation, see the comment on
+                        * top of btrfs_chunk_alloc() for details.
+                        */
+                       ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
                        chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
-                       mutex_lock(&fs_info->chunk_mutex);
                        ret = read_one_chunk(&found_key, leaf, chunk);
-                       mutex_unlock(&fs_info->chunk_mutex);
                        if (ret)
                                goto error;
                }
index c7fc7ca..55a8ba2 100644 (file)
@@ -450,7 +450,8 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map,
                          struct btrfs_io_geometry *io_geom);
 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
-int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type);
+struct btrfs_block_group *btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+                                           u64 type);
 void btrfs_mapping_tree_free(struct extent_map_tree *tree);
 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
                           int mirror_num);
@@ -509,6 +510,8 @@ unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
                                    u64 logical);
 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
                             u64 chunk_offset, u64 chunk_size);
+int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
+                                    struct btrfs_block_group *bg);
 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
                                       u64 logical, u64 length);
index 297c0b1..907c2cc 100644 (file)
@@ -1349,8 +1349,7 @@ void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
                return;
 
        ordered->physical = physical;
-       ordered->disk = bio->bi_bdev->bd_disk;
-       ordered->partno = bio->bi_bdev->bd_partno;
+       ordered->bdev = bio->bi_bdev;
 
        btrfs_put_ordered_extent(ordered);
 }
@@ -1362,18 +1361,16 @@ void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
        struct extent_map_tree *em_tree;
        struct extent_map *em;
        struct btrfs_ordered_sum *sum;
-       struct block_device *bdev;
        u64 orig_logical = ordered->disk_bytenr;
        u64 *logical = NULL;
        int nr, stripe_len;
 
        /* Zoned devices should not have partitions. So, we can assume it is 0 */
-       ASSERT(ordered->partno == 0);
-       bdev = bdgrab(ordered->disk->part0);
-       if (WARN_ON(!bdev))
+       ASSERT(!bdev_is_partition(ordered->bdev));
+       if (WARN_ON(!ordered->bdev))
                return;
 
-       if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, bdev,
+       if (WARN_ON(btrfs_rmap_block(fs_info, orig_logical, ordered->bdev,
                                     ordered->physical, &logical, &nr,
                                     &stripe_len)))
                goto out;
@@ -1402,7 +1399,6 @@ void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
 
 out:
        kfree(logical);
-       bdput(bdev);
 }
 
 bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
index a818213..9db1b39 100644 (file)
@@ -4456,7 +4456,7 @@ bool check_session_state(struct ceph_mds_session *s)
                break;
        case CEPH_MDS_SESSION_CLOSING:
                /* Should never reach this when we're unmounting */
-               WARN_ON_ONCE(true);
+               WARN_ON_ONCE(s->s_ttl);
                fallthrough;
        case CEPH_MDS_SESSION_NEW:
        case CEPH_MDS_SESSION_RESTARTING:
index 57f9131..007427b 100644 (file)
@@ -176,7 +176,7 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
                }
        }
 
-       rc = dns_resolve_server_name_to_ip(name, &srvIP);
+       rc = dns_resolve_server_name_to_ip(name, &srvIP, NULL);
        if (rc < 0) {
                cifs_dbg(FYI, "%s: Failed to resolve server part of %s to IP: %d\n",
                         __func__, name, rc);
@@ -211,6 +211,10 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
                else
                        noff = tkn_e - (sb_mountdata + off) + 1;
 
+               if (strncasecmp(sb_mountdata + off, "cruid=", 6) == 0) {
+                       off += noff;
+                       continue;
+               }
                if (strncasecmp(sb_mountdata + off, "unc=", 4) == 0) {
                        off += noff;
                        continue;
index 3c2e117..c0bfc2f 100644 (file)
@@ -75,6 +75,9 @@
 #define SMB_ECHO_INTERVAL_MAX 600
 #define SMB_ECHO_INTERVAL_DEFAULT 60
 
+/* dns resolution interval in seconds */
+#define SMB_DNS_RESOLVE_INTERVAL_DEFAULT 600
+
 /* maximum number of PDUs in one compound */
 #define MAX_COMPOUND 5
 
@@ -646,6 +649,7 @@ struct TCP_Server_Info {
        /* point to the SMBD connection if RDMA is used instead of socket */
        struct smbd_connection *smbd_conn;
        struct delayed_work     echo; /* echo ping workqueue job */
+       struct delayed_work     resolve; /* dns resolution workqueue job */
        char    *smallbuf;      /* pointer to current "small" buffer */
        char    *bigbuf;        /* pointer to current "big" buffer */
        /* Total size of this PDU. Only valid from cifs_demultiplex_thread */
@@ -689,6 +693,9 @@ struct TCP_Server_Info {
        bool use_swn_dstaddr;
        struct sockaddr_storage swn_dstaddr;
 #endif
+#ifdef CONFIG_CIFS_DFS_UPCALL
+       bool is_dfs_conn; /* if a dfs connection */
+#endif
 };
 
 struct cifs_credits {
index f72e3b3..65d1a65 100644 (file)
@@ -873,8 +873,11 @@ PsxDelete:
                                InformationLevel) - 4;
        offset = param_offset + params;
 
-       /* Setup pointer to Request Data (inode type) */
-       pRqD = (struct unlink_psx_rq *)(((char *)&pSMB->hdr.Protocol) + offset);
+       /* Setup pointer to Request Data (inode type).
+        * Note that SMB offsets are from the beginning of SMB which is 4 bytes
+        * in, after RFC1001 field
+        */
+       pRqD = (struct unlink_psx_rq *)((char *)(pSMB) + offset + 4);
        pRqD->type = cpu_to_le16(type);
        pSMB->ParameterOffset = cpu_to_le16(param_offset);
        pSMB->DataOffset = cpu_to_le16(offset);
@@ -1081,7 +1084,8 @@ PsxCreat:
        param_offset = offsetof(struct smb_com_transaction2_spi_req,
                                InformationLevel) - 4;
        offset = param_offset + params;
-       pdata = (OPEN_PSX_REQ *)(((char *)&pSMB->hdr.Protocol) + offset);
+       /* SMB offsets are from the beginning of SMB which is 4 bytes in, after RFC1001 field */
+       pdata = (OPEN_PSX_REQ *)((char *)(pSMB) + offset + 4);
        pdata->Level = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC);
        pdata->Permissions = cpu_to_le64(mode);
        pdata->PosixOpenFlags = cpu_to_le32(posix_flags);
index 01dc451..3781eee 100644 (file)
@@ -78,6 +78,8 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
        int rc;
        int len;
        char *unc, *ipaddr = NULL;
+       time64_t expiry, now;
+       unsigned long ttl = SMB_DNS_RESOLVE_INTERVAL_DEFAULT;
 
        if (!server->hostname)
                return -EINVAL;
@@ -91,13 +93,13 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
        }
        scnprintf(unc, len, "\\\\%s", server->hostname);
 
-       rc = dns_resolve_server_name_to_ip(unc, &ipaddr);
+       rc = dns_resolve_server_name_to_ip(unc, &ipaddr, &expiry);
        kfree(unc);
 
        if (rc < 0) {
                cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
                         __func__, server->hostname, rc);
-               return rc;
+               goto requeue_resolve;
        }
 
        spin_lock(&cifs_tcp_ses_lock);
@@ -106,7 +108,45 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
        spin_unlock(&cifs_tcp_ses_lock);
        kfree(ipaddr);
 
-       return !rc ? -1 : 0;
+       /* rc == 1 means success here */
+       if (rc) {
+               now = ktime_get_real_seconds();
+               if (expiry && expiry > now)
+                       /*
+                        * To make sure we don't use the cached entry, retry 1s
+                        * after expiry.
+                        */
+                       ttl = (expiry - now + 1);
+       }
+       rc = !rc ? -1 : 0;
+
+requeue_resolve:
+       cifs_dbg(FYI, "%s: next dns resolution scheduled for %lu seconds in the future\n",
+                __func__, ttl);
+       mod_delayed_work(cifsiod_wq, &server->resolve, (ttl * HZ));
+
+       return rc;
+}
+
+
+static void cifs_resolve_server(struct work_struct *work)
+{
+       int rc;
+       struct TCP_Server_Info *server = container_of(work,
+                                       struct TCP_Server_Info, resolve.work);
+
+       mutex_lock(&server->srv_mutex);
+
+       /*
+        * Resolve the hostname again to make sure that IP address is up-to-date.
+        */
+       rc = reconn_set_ipaddr_from_hostname(server);
+       if (rc) {
+               cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
+                               __func__, rc);
+       }
+
+       mutex_unlock(&server->srv_mutex);
 }
 
 #ifdef CONFIG_CIFS_DFS_UPCALL
@@ -180,7 +220,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
 #ifdef CONFIG_CIFS_DFS_UPCALL
        struct super_block *sb = NULL;
        struct cifs_sb_info *cifs_sb = NULL;
-       struct dfs_cache_tgt_list tgt_list = {0};
+       struct dfs_cache_tgt_list tgt_list = DFS_CACHE_TGT_LIST_INIT(tgt_list);
        struct dfs_cache_tgt_iterator *tgt_it = NULL;
 #endif
 
@@ -680,6 +720,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
        spin_unlock(&cifs_tcp_ses_lock);
 
        cancel_delayed_work_sync(&server->echo);
+       cancel_delayed_work_sync(&server->resolve);
 
        spin_lock(&GlobalMid_Lock);
        server->tcpStatus = CifsExiting;
@@ -1227,6 +1268,16 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx)
 
        spin_lock(&cifs_tcp_ses_lock);
        list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+#ifdef CONFIG_CIFS_DFS_UPCALL
+               /*
+                * DFS failover implementation in cifs_reconnect() requires unique tcp sessions for
+                * DFS connections to do failover properly, so avoid sharing them with regular
+                * shares or even links that may connect to same server but having completely
+                * different failover targets.
+                */
+               if (server->is_dfs_conn)
+                       continue;
+#endif
                /*
                 * Skip ses channels since they're only handled in lower layers
                 * (e.g. cifs_send_recv).
@@ -1254,12 +1305,16 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
                return;
        }
 
+       /* srv_count can never go negative */
+       WARN_ON(server->srv_count < 0);
+
        put_net(cifs_net_ns(server));
 
        list_del_init(&server->tcp_ses_list);
        spin_unlock(&cifs_tcp_ses_lock);
 
        cancel_delayed_work_sync(&server->echo);
+       cancel_delayed_work_sync(&server->resolve);
 
        if (from_reconnect)
                /*
@@ -1342,6 +1397,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx)
        INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
        INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
        INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
+       INIT_DELAYED_WORK(&tcp_ses->resolve, cifs_resolve_server);
        INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
        mutex_init(&tcp_ses->reconnect_mutex);
        memcpy(&tcp_ses->srcaddr, &ctx->srcaddr,
@@ -1427,6 +1483,12 @@ smbd_connected:
        /* queue echo request delayed work */
        queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
 
+       /* queue dns resolution delayed work */
+       cifs_dbg(FYI, "%s: next dns resolution scheduled for %d seconds in the future\n",
+                __func__, SMB_DNS_RESOLVE_INTERVAL_DEFAULT);
+
+       queue_delayed_work(cifsiod_wq, &tcp_ses->resolve, (SMB_DNS_RESOLVE_INTERVAL_DEFAULT * HZ));
+
        return tcp_ses;
 
 out_err_crypto_release:
@@ -1605,6 +1667,9 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
        }
        spin_unlock(&cifs_tcp_ses_lock);
 
+       /* ses_count can never go negative */
+       WARN_ON(ses->ses_count < 0);
+
        spin_lock(&GlobalMid_Lock);
        if (ses->status == CifsGood)
                ses->status = CifsExiting;
@@ -1972,6 +2037,9 @@ cifs_put_tcon(struct cifs_tcon *tcon)
                return;
        }
 
+       /* tc_count can never go negative */
+       WARN_ON(tcon->tc_count < 0);
+
        if (tcon->use_witness) {
                int rc;
 
@@ -2910,6 +2978,23 @@ static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
 }
 
 #ifdef CONFIG_CIFS_DFS_UPCALL
+static int mount_get_dfs_conns(struct smb3_fs_context *ctx, struct cifs_sb_info *cifs_sb,
+                              unsigned int *xid, struct TCP_Server_Info **nserver,
+                              struct cifs_ses **nses, struct cifs_tcon **ntcon)
+{
+       int rc;
+
+       ctx->nosharesock = true;
+       rc = mount_get_conns(ctx, cifs_sb, xid, nserver, nses, ntcon);
+       if (*nserver) {
+               cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__);
+               spin_lock(&cifs_tcp_ses_lock);
+               (*nserver)->is_dfs_conn = true;
+               spin_unlock(&cifs_tcp_ses_lock);
+       }
+       return rc;
+}
+
 /*
  * cifs_build_path_to_root returns full path to root when we do not have an
  * existing connection (tcon)
@@ -3045,7 +3130,7 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_
 {
        int rc;
        char *npath = NULL;
-       struct dfs_cache_tgt_list tgt_list = {0};
+       struct dfs_cache_tgt_list tgt_list = DFS_CACHE_TGT_LIST_INIT(tgt_list);
        struct dfs_cache_tgt_iterator *tgt_it = NULL;
        struct smb3_fs_context tmp_ctx = {NULL};
 
@@ -3105,7 +3190,7 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_
                         tmp_ctx.prepath);
 
                mount_put_conns(cifs_sb, *xid, *server, *ses, *tcon);
-               rc = mount_get_conns(&tmp_ctx, cifs_sb, xid, server, ses, tcon);
+               rc = mount_get_dfs_conns(&tmp_ctx, cifs_sb, xid, server, ses, tcon);
                if (!rc || (*server && *ses)) {
                        /*
                         * We were able to connect to new target server. Update current context with
@@ -3404,7 +3489,12 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
                        goto error;
        }
 
-       ctx->nosharesock = true;
+       mount_put_conns(cifs_sb, xid, server, ses, tcon);
+       /*
+        * Ignore error check here because we may failover to other targets from cached a
+        * referral.
+        */
+       (void)mount_get_dfs_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
 
        /* Get path of DFS root */
        ref_path = build_unc_path_to_root(ctx, cifs_sb, false);
@@ -3433,7 +3523,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
                /* Connect to new DFS target only if we were redirected */
                if (oldmnt != cifs_sb->ctx->mount_options) {
                        mount_put_conns(cifs_sb, xid, server, ses, tcon);
-                       rc = mount_get_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
+                       rc = mount_get_dfs_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
                }
                if (rc && !server && !ses) {
                        /* Failed to connect. Try to connect to other targets in the referral. */
@@ -3459,7 +3549,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
                        rc = -ELOOP;
        } while (rc == -EREMOTE);
 
-       if (rc || !tcon)
+       if (rc || !tcon || !ses)
                goto error;
 
        kfree(ref_path);
@@ -4095,7 +4185,8 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
        if (!tree)
                return -ENOMEM;
 
-       if (!tcon->dfs_path) {
+       /* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
+       if (!tcon->dfs_path || dfs_cache_noreq_find(tcon->dfs_path + 1, &ref, &tl)) {
                if (tcon->ipc) {
                        scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
                        rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
@@ -4105,9 +4196,6 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
                goto out;
        }
 
-       rc = dfs_cache_noreq_find(tcon->dfs_path + 1, &ref, &tl);
-       if (rc)
-               goto out;
        isroot = ref.server_type == DFS_TYPE_ROOT;
        free_dfs_info_param(&ref);
 
index 7c17697..2837455 100644 (file)
@@ -19,6 +19,7 @@
 #include "cifs_debug.h"
 #include "cifs_unicode.h"
 #include "smb2glob.h"
+#include "dns_resolve.h"
 
 #include "dfs_cache.h"
 
@@ -911,6 +912,7 @@ static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
 
 err_free_it:
        list_for_each_entry_safe(it, nit, head, it_list) {
+               list_del(&it->it_list);
                kfree(it->it_name);
                kfree(it);
        }
@@ -1293,6 +1295,194 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
        return 0;
 }
 
+static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
+{
+       char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
+       const char *host;
+       size_t hostlen;
+       char *ip = NULL;
+       struct sockaddr sa;
+       bool match;
+       int rc;
+
+       if (strcasecmp(s1, s2))
+               return false;
+
+       /*
+        * Resolve share's hostname and check if server address matches.  Otherwise just ignore it
+        * as we could not have upcall to resolve hostname or failed to convert ip address.
+        */
+       match = true;
+       extract_unc_hostname(s1, &host, &hostlen);
+       scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
+
+       rc = dns_resolve_server_name_to_ip(unc, &ip, NULL);
+       if (rc < 0) {
+               cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
+                        __func__, (int)hostlen, host);
+               return true;
+       }
+
+       if (!cifs_convert_address(&sa, ip, strlen(ip))) {
+               cifs_dbg(VFS, "%s: failed to convert address \'%s\'. skip address matching.\n",
+                        __func__, ip);
+       } else {
+               mutex_lock(&server->srv_mutex);
+               match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, &sa);
+               mutex_unlock(&server->srv_mutex);
+       }
+
+       kfree(ip);
+       return match;
+}
+
+/*
+ * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
+ * target shares in @refs.
+ */
+static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cache_tgt_list *tl,
+                                        const struct dfs_info3_param *refs, int numrefs)
+{
+       struct dfs_cache_tgt_iterator *it;
+       int i;
+
+       for (it = dfs_cache_get_tgt_iterator(tl); it; it = dfs_cache_get_next_tgt(tl, it)) {
+               for (i = 0; i < numrefs; i++) {
+                       if (target_share_equal(tcon->ses->server, dfs_cache_get_tgt_name(it),
+                                              refs[i].node_name))
+                               return;
+               }
+       }
+
+       cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
+       for (i = 0; i < tcon->ses->chan_count; i++) {
+               spin_lock(&GlobalMid_Lock);
+               if (tcon->ses->chans[i].server->tcpStatus != CifsExiting)
+                       tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect;
+               spin_unlock(&GlobalMid_Lock);
+       }
+}
+
+/* Refresh dfs referral of tcon and mark it for reconnect if needed */
+static int refresh_tcon(struct cifs_ses **sessions, struct cifs_tcon *tcon, bool force_refresh)
+{
+       const char *path = tcon->dfs_path + 1;
+       struct cifs_ses *ses;
+       struct cache_entry *ce;
+       struct dfs_info3_param *refs = NULL;
+       int numrefs = 0;
+       bool needs_refresh = false;
+       struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+       int rc = 0;
+       unsigned int xid;
+
+       ses = find_ipc_from_server_path(sessions, path);
+       if (IS_ERR(ses)) {
+               cifs_dbg(FYI, "%s: could not find ipc session\n", __func__);
+               return PTR_ERR(ses);
+       }
+
+       down_read(&htable_rw_lock);
+       ce = lookup_cache_entry(path);
+       needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
+       if (!IS_ERR(ce)) {
+               rc = get_targets(ce, &tl);
+               if (rc)
+                       cifs_dbg(FYI, "%s: could not get dfs targets: %d\n", __func__, rc);
+       }
+       up_read(&htable_rw_lock);
+
+       if (!needs_refresh) {
+               rc = 0;
+               goto out;
+       }
+
+       xid = get_xid();
+       rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
+       free_xid(xid);
+
+       /* Create or update a cache entry with the new referral */
+       if (!rc) {
+               dump_refs(refs, numrefs);
+
+               down_write(&htable_rw_lock);
+               ce = lookup_cache_entry(path);
+               if (IS_ERR(ce))
+                       add_cache_entry_locked(refs, numrefs);
+               else if (force_refresh || cache_entry_expired(ce))
+                       update_cache_entry_locked(ce, refs, numrefs);
+               up_write(&htable_rw_lock);
+
+               mark_for_reconnect_if_needed(tcon, &tl, refs, numrefs);
+       }
+
+out:
+       dfs_cache_free_tgts(&tl);
+       free_dfs_info_array(refs, numrefs);
+       return rc;
+}
+
+/**
+ * dfs_cache_remount_fs - remount a DFS share
+ *
+ * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
+ * match any of the new targets, mark it for reconnect.
+ *
+ * @cifs_sb: cifs superblock.
+ *
+ * Return zero if remounted, otherwise non-zero.
+ */
+int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
+{
+       struct cifs_tcon *tcon;
+       struct mount_group *mg;
+       struct cifs_ses *sessions[CACHE_MAX_ENTRIES + 1] = {NULL};
+       int rc;
+
+       if (!cifs_sb || !cifs_sb->master_tlink)
+               return -EINVAL;
+
+       tcon = cifs_sb_master_tcon(cifs_sb);
+       if (!tcon->dfs_path) {
+               cifs_dbg(FYI, "%s: not a dfs tcon\n", __func__);
+               return 0;
+       }
+
+       if (uuid_is_null(&cifs_sb->dfs_mount_id)) {
+               cifs_dbg(FYI, "%s: tcon has no dfs mount group id\n", __func__);
+               return -EINVAL;
+       }
+
+       mutex_lock(&mount_group_list_lock);
+       mg = find_mount_group_locked(&cifs_sb->dfs_mount_id);
+       if (IS_ERR(mg)) {
+               mutex_unlock(&mount_group_list_lock);
+               cifs_dbg(FYI, "%s: tcon has ipc session to refresh referral\n", __func__);
+               return PTR_ERR(mg);
+       }
+       kref_get(&mg->refcount);
+       mutex_unlock(&mount_group_list_lock);
+
+       spin_lock(&mg->lock);
+       memcpy(&sessions, mg->sessions, mg->num_sessions * sizeof(mg->sessions[0]));
+       spin_unlock(&mg->lock);
+
+       /*
+        * After reconnecting to a different server, unique ids won't match anymore, so we disable
+        * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
+        */
+       cifs_autodisable_serverino(cifs_sb);
+       /*
+        * Force the use of prefix path to support failover on DFS paths that resolve to targets
+        * that have different prefix paths.
+        */
+       cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+       rc = refresh_tcon(sessions, tcon, true);
+
+       kref_put(&mg->refcount, mount_group_release);
+       return rc;
+}
+
 /*
  * Refresh all active dfs mounts regardless of whether they are in cache or not.
  * (cache can be cleared)
@@ -1303,7 +1493,6 @@ static void refresh_mounts(struct cifs_ses **sessions)
        struct cifs_ses *ses;
        struct cifs_tcon *tcon, *ntcon;
        struct list_head tcons;
-       unsigned int xid;
 
        INIT_LIST_HEAD(&tcons);
 
@@ -1321,44 +1510,8 @@ static void refresh_mounts(struct cifs_ses **sessions)
        spin_unlock(&cifs_tcp_ses_lock);
 
        list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
-               const char *path = tcon->dfs_path + 1;
-               struct cache_entry *ce;
-               struct dfs_info3_param *refs = NULL;
-               int numrefs = 0;
-               bool needs_refresh = false;
-               int rc = 0;
-
                list_del_init(&tcon->ulist);
-
-               ses = find_ipc_from_server_path(sessions, path);
-               if (IS_ERR(ses))
-                       goto next_tcon;
-
-               down_read(&htable_rw_lock);
-               ce = lookup_cache_entry(path);
-               needs_refresh = IS_ERR(ce) || cache_entry_expired(ce);
-               up_read(&htable_rw_lock);
-
-               if (!needs_refresh)
-                       goto next_tcon;
-
-               xid = get_xid();
-               rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
-               free_xid(xid);
-
-               /* Create or update a cache entry with the new referral */
-               if (!rc) {
-                       down_write(&htable_rw_lock);
-                       ce = lookup_cache_entry(path);
-                       if (IS_ERR(ce))
-                               add_cache_entry_locked(refs, numrefs);
-                       else if (cache_entry_expired(ce))
-                               update_cache_entry_locked(ce, refs, numrefs);
-                       up_write(&htable_rw_lock);
-               }
-
-next_tcon:
-               free_dfs_info_array(refs, numrefs);
+               refresh_tcon(sessions, tcon, false);
                cifs_put_tcon(tcon);
        }
 }
index b29d3ae..52070d1 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/uuid.h>
 #include "cifsglob.h"
 
+#define DFS_CACHE_TGT_LIST_INIT(var) { .tl_numtgts = 0, .tl_list = LIST_HEAD_INIT((var).tl_list), }
+
 struct dfs_cache_tgt_list {
        int tl_numtgts;
        struct list_head tl_list;
@@ -44,6 +46,7 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
 void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id);
 void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses);
 char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
+int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb);
 
 static inline struct dfs_cache_tgt_iterator *
 dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl,
index d15b82d..8c616aa 100644 (file)
@@ -24,6 +24,7 @@
  * dns_resolve_server_name_to_ip - Resolve UNC server name to ip address.
  * @unc: UNC path specifying the server (with '/' as delimiter)
  * @ip_addr: Where to return the IP address.
+ * @expiry: Where to return the expiry time for the dns record.
  *
  * The IP address will be returned in string form, and the caller is
  * responsible for freeing it.
@@ -31,7 +32,7 @@
  * Returns length of result on success, -ve on error.
  */
 int
-dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
+dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry)
 {
        struct sockaddr_storage ss;
        const char *hostname, *sep;
@@ -66,13 +67,14 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
 
        /* Perform the upcall */
        rc = dns_query(current->nsproxy->net_ns, NULL, hostname, len,
-                      NULL, ip_addr, NULL, false);
+                      NULL, ip_addr, expiry, false);
        if (rc < 0)
                cifs_dbg(FYI, "%s: unable to resolve: %*.*s\n",
                         __func__, len, len, hostname);
        else
-               cifs_dbg(FYI, "%s: resolved: %*.*s to %s\n",
-                        __func__, len, len, hostname, *ip_addr);
+               cifs_dbg(FYI, "%s: resolved: %*.*s to %s expiry %llu\n",
+                        __func__, len, len, hostname, *ip_addr,
+                        expiry ? (*expiry) : 0);
        return rc;
 
 name_is_IP_address:
index 5be060b..9fa2807 100644 (file)
@@ -12,7 +12,7 @@
 #define _DNS_RESOLVE_H
 
 #ifdef __KERNEL__
-extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr);
+extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr, time64_t *expiry);
 #endif /* KERNEL */
 
 #endif /* _DNS_RESOLVE_H */
index cd10860..0a72840 100644 (file)
@@ -4619,7 +4619,7 @@ read_complete:
 
 static int cifs_readpage(struct file *file, struct page *page)
 {
-       loff_t offset = (loff_t)page->index << PAGE_SHIFT;
+       loff_t offset = page_file_offset(page);
        int rc = -EACCES;
        unsigned int xid;
 
index 553adfb..eed59bc 100644 (file)
@@ -13,6 +13,9 @@
 #include <linux/magic.h>
 #include <linux/security.h>
 #include <net/net_namespace.h>
+#ifdef CONFIG_CIFS_DFS_UPCALL
+#include "dfs_cache.h"
+#endif
 */
 
 #include <linux/ctype.h>
@@ -779,6 +782,10 @@ static int smb3_reconfigure(struct fs_context *fc)
        smb3_cleanup_fs_context_contents(cifs_sb->ctx);
        rc = smb3_fs_context_dup(cifs_sb->ctx, ctx);
        smb3_update_mnt_flags(cifs_sb);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+       if (!rc)
+               rc = dfs_cache_remount_fs(cifs_sb);
+#endif
 
        return rc;
 }
@@ -918,6 +925,13 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
                ctx->cred_uid = uid;
                ctx->cruid_specified = true;
                break;
+       case Opt_backupuid:
+               uid = make_kuid(current_user_ns(), result.uint_32);
+               if (!uid_valid(uid))
+                       goto cifs_parse_mount_err;
+               ctx->backupuid = uid;
+               ctx->backupuid_specified = true;
+               break;
        case Opt_backupgid:
                gid = make_kgid(current_user_ns(), result.uint_32);
                if (!gid_valid(gid))
index 184138b..844abeb 100644 (file)
@@ -1187,7 +1187,7 @@ int match_target_ip(struct TCP_Server_Info *server,
 
        cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
 
-       rc = dns_resolve_server_name_to_ip(target, &tip);
+       rc = dns_resolve_server_name_to_ip(target, &tip, NULL);
        if (rc < 0)
                goto out;
 
index e4c8f60..2dfd0d8 100644 (file)
@@ -557,8 +557,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
        p = buf;
        while (bytes_left >= sizeof(*p)) {
                info->speed = le64_to_cpu(p->LinkSpeed);
-               info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
-               info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
+               info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
+               info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
 
                cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
                cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
@@ -2910,6 +2910,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
                /* ipc tcons are not refcounted */
                spin_lock(&cifs_tcp_ses_lock);
                tcon->tc_count--;
+               /* tc_count can never go negative */
+               WARN_ON(tcon->tc_count < 0);
                spin_unlock(&cifs_tcp_ses_lock);
        }
        kfree(utf16_path);
@@ -3616,6 +3618,7 @@ static int smb3_simple_fallocate_write_range(unsigned int xid,
 {
        struct cifs_io_parms io_parms = {0};
        int nbytes;
+       int rc = 0;
        struct kvec iov[2];
 
        io_parms.netfid = cfile->fid.netfid;
@@ -3623,13 +3626,25 @@ static int smb3_simple_fallocate_write_range(unsigned int xid,
        io_parms.tcon = tcon;
        io_parms.persistent_fid = cfile->fid.persistent_fid;
        io_parms.volatile_fid = cfile->fid.volatile_fid;
-       io_parms.offset = off;
-       io_parms.length = len;
 
-       /* iov[0] is reserved for smb header */
-       iov[1].iov_base = buf;
-       iov[1].iov_len = io_parms.length;
-       return SMB2_write(xid, &io_parms, &nbytes, iov, 1);
+       while (len) {
+               io_parms.offset = off;
+               io_parms.length = len;
+               if (io_parms.length > SMB2_MAX_BUFFER_SIZE)
+                       io_parms.length = SMB2_MAX_BUFFER_SIZE;
+               /* iov[0] is reserved for smb header */
+               iov[1].iov_base = buf;
+               iov[1].iov_len = io_parms.length;
+               rc = SMB2_write(xid, &io_parms, &nbytes, iov, 1);
+               if (rc)
+                       break;
+               if (nbytes > len)
+                       return -EINVAL;
+               buf += nbytes;
+               off += nbytes;
+               len -= nbytes;
+       }
+       return rc;
 }
 
 static int smb3_simple_fallocate_range(unsigned int xid,
@@ -3653,11 +3668,6 @@ static int smb3_simple_fallocate_range(unsigned int xid,
                        (char **)&out_data, &out_data_len);
        if (rc)
                goto out;
-       /*
-        * It is already all allocated
-        */
-       if (out_data_len == 0)
-               goto out;
 
        buf = kzalloc(1024 * 1024, GFP_KERNEL);
        if (buf == NULL) {
@@ -3780,6 +3790,24 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
                goto out;
        }
 
+       if (keep_size == true) {
+               /*
+                * We can not preallocate pages beyond the end of the file
+                * in SMB2
+                */
+               if (off >= i_size_read(inode)) {
+                       rc = 0;
+                       goto out;
+               }
+               /*
+                * For fallocates that are partially beyond the end of file,
+                * clamp len so we only fallocate up to the end of file.
+                */
+               if (off + len > i_size_read(inode)) {
+                       len = i_size_read(inode) - off;
+               }
+       }
+
        if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
                /*
                 * At this point, we are trying to fallocate an internal
index 4b27cb9..e9cac79 100644 (file)
@@ -394,6 +394,7 @@ struct smb2_compression_capabilities_context {
        __u16   Padding;
        __u32   Flags;
        __le16  CompressionAlgorithms[3];
+       __u16   Pad;  /* Some servers require pad to DataLen multiple of 8 */
        /* Check if pad needed */
 } __packed;
 
index 2f63bf3..5a0be99 100644 (file)
@@ -91,7 +91,10 @@ static ssize_t configfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
        }
        pr_debug("%s: count = %zd, pos = %lld, buf = %s\n",
                 __func__, iov_iter_count(to), iocb->ki_pos, buffer->page);
-       retval = copy_to_iter(buffer->page, buffer->count, to);
+       if (iocb->ki_pos >= buffer->count)
+               goto out;
+       retval = copy_to_iter(buffer->page + iocb->ki_pos,
+                             buffer->count - iocb->ki_pos, to);
        iocb->ki_pos += retval;
        if (retval == 0)
                retval = -EFAULT;
@@ -162,7 +165,10 @@ static ssize_t configfs_bin_read_iter(struct kiocb *iocb, struct iov_iter *to)
                buffer->needs_read_fill = 0;
        }
 
-       retval = copy_to_iter(buffer->bin_buffer, buffer->bin_buffer_size, to);
+       if (iocb->ki_pos >= buffer->bin_buffer_size)
+               goto out;
+       retval = copy_to_iter(buffer->bin_buffer + iocb->ki_pos,
+                             buffer->bin_buffer_size - iocb->ki_pos, to);
        iocb->ki_pos += retval;
        if (retval == 0)
                retval = -EFAULT;
@@ -171,21 +177,28 @@ out:
        return retval;
 }
 
-static int fill_write_buffer(struct configfs_buffer *buffer,
+/* Fill [buffer, buffer + pos) with data coming from @from. */
+static int fill_write_buffer(struct configfs_buffer *buffer, loff_t pos,
                             struct iov_iter *from)
 {
+       loff_t to_copy;
        int copied;
+       u8 *to;
 
        if (!buffer->page)
                buffer->page = (char *)__get_free_pages(GFP_KERNEL, 0);
        if (!buffer->page)
                return -ENOMEM;
 
-       copied = copy_from_iter(buffer->page, SIMPLE_ATTR_SIZE - 1, from);
+       to_copy = SIMPLE_ATTR_SIZE - 1 - pos;
+       if (to_copy <= 0)
+               return 0;
+       to = buffer->page + pos;
+       copied = copy_from_iter(to, to_copy, from);
        buffer->needs_read_fill = 1;
        /* if buf is assumed to contain a string, terminate it by \0,
         * so e.g. sscanf() can scan the string easily */
-       buffer->page[copied] = 0;
+       to[copied] = 0;
        return copied ? : -EFAULT;
 }
 
@@ -217,7 +230,7 @@ static ssize_t configfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
        ssize_t len;
 
        mutex_lock(&buffer->mutex);
-       len = fill_write_buffer(buffer, from);
+       len = fill_write_buffer(buffer, iocb->ki_pos, from);
        if (len > 0)
                len = flush_write_buffer(file, buffer, len);
        if (len > 0)
@@ -272,7 +285,9 @@ static ssize_t configfs_bin_write_iter(struct kiocb *iocb,
                buffer->bin_buffer_size = end_offset;
        }
 
-       len = copy_from_iter(buffer->bin_buffer, buffer->bin_buffer_size, from);
+       len = copy_from_iter(buffer->bin_buffer + iocb->ki_pos,
+                            buffer->bin_buffer_size - iocb->ki_pos, from);
+       iocb->ki_pos += len;
 out:
        mutex_unlock(&buffer->mutex);
        return len ? : -EFAULT;
index 14292db..2c2f179 100644 (file)
@@ -106,12 +106,11 @@ static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len)
        return err;
 }
 
-static bool ext2_check_page(struct page *page, int quiet)
+static bool ext2_check_page(struct page *page, int quiet, char *kaddr)
 {
        struct inode *dir = page->mapping->host;
        struct super_block *sb = dir->i_sb;
        unsigned chunk_size = ext2_chunk_size(dir);
-       char *kaddr = page_address(page);
        u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
        unsigned offs, rec_len;
        unsigned limit = PAGE_SIZE;
@@ -205,7 +204,8 @@ static struct page * ext2_get_page(struct inode *dir, unsigned long n,
        if (!IS_ERR(page)) {
                *page_addr = kmap_local_page(page);
                if (unlikely(!PageChecked(page))) {
-                       if (PageError(page) || !ext2_check_page(page, quiet))
+                       if (PageError(page) || !ext2_check_page(page, quiet,
+                                                               *page_addr))
                                goto fail;
                }
        }
@@ -584,10 +584,10 @@ out_unlock:
  * ext2_delete_entry deletes a directory entry by merging it with the
  * previous entry. Page is up-to-date.
  */
-int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
+int ext2_delete_entry (struct ext2_dir_entry_2 *dir, struct page *page,
+                       char *kaddr)
 {
        struct inode *inode = page->mapping->host;
-       char *kaddr = page_address(page);
        unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
        unsigned to = ((char *)dir - kaddr) +
                                ext2_rec_len_from_disk(dir->rec_len);
@@ -607,7 +607,7 @@ int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
                de = ext2_next_entry(de);
        }
        if (pde)
-               from = (char*)pde - (char*)page_address(page);
+               from = (char *)pde - kaddr;
        pos = page_offset(page) + from;
        lock_page(page);
        err = ext2_prepare_chunk(page, pos, to - from);
index b0a6948..e512630 100644 (file)
@@ -740,7 +740,8 @@ extern int ext2_inode_by_name(struct inode *dir,
 extern int ext2_make_empty(struct inode *, struct inode *);
 extern struct ext2_dir_entry_2 *ext2_find_entry(struct inode *, const struct qstr *,
                                                struct page **, void **res_page_addr);
-extern int ext2_delete_entry (struct ext2_dir_entry_2 *, struct page *);
+extern int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct page *page,
+                            char *kaddr);
 extern int ext2_empty_dir (struct inode *);
 extern struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct page **p, void **pa);
 extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page *, void *,
index 1f69b81..5f6b756 100644 (file)
@@ -293,7 +293,7 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry)
                goto out;
        }
 
-       err = ext2_delete_entry (de, page);
+       err = ext2_delete_entry (de, page, page_addr);
        ext2_put_page(page, page_addr);
        if (err)
                goto out;
@@ -397,7 +397,7 @@ static int ext2_rename (struct user_namespace * mnt_userns,
        old_inode->i_ctime = current_time(old_inode);
        mark_inode_dirty(old_inode);
 
-       ext2_delete_entry(old_de, old_page);
+       ext2_delete_entry(old_de, old_page, old_page_addr);
 
        if (dir_de) {
                if (old_dir != new_dir)
index dfc72f1..f946bec 100644 (file)
@@ -369,8 +369,8 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
        /* 32-bit arches must use fcntl64() */
        case F_OFD_SETLK:
        case F_OFD_SETLKW:
-#endif
                fallthrough;
+#endif
        case F_SETLK:
        case F_SETLKW:
                if (copy_from_user(&flock, argp, sizeof(flock)))
index 06d04a7..4c33705 100644 (file)
@@ -521,6 +521,9 @@ static bool inode_prepare_wbs_switch(struct inode *inode,
         */
        smp_mb();
 
+       if (IS_DAX(inode))
+               return false;
+
        /* while holding I_WB_SWITCH, no one else can update the association */
        spin_lock(&inode->i_lock);
        if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
index 2834d1a..de1985e 100644 (file)
@@ -80,6 +80,35 @@ static int vfs_parse_sb_flag(struct fs_context *fc, const char *key)
 }
 
 /**
+ * vfs_parse_fs_param_source - Handle setting "source" via parameter
+ * @fc: The filesystem context to modify
+ * @param: The parameter
+ *
+ * This is a simple helper for filesystems to verify that the "source" they
+ * accept is sane.
+ *
+ * Returns 0 on success, -ENOPARAM if this is not  "source" parameter, and
+ * -EINVAL otherwise. In the event of failure, supplementary error information
+ *  is logged.
+ */
+int vfs_parse_fs_param_source(struct fs_context *fc, struct fs_parameter *param)
+{
+       if (strcmp(param->key, "source") != 0)
+               return -ENOPARAM;
+
+       if (param->type != fs_value_is_string)
+               return invalf(fc, "Non-string source");
+
+       if (fc->source)
+               return invalf(fc, "Multiple sources");
+
+       fc->source = param->string;
+       param->string = NULL;
+       return 0;
+}
+EXPORT_SYMBOL(vfs_parse_fs_param_source);
+
+/**
  * vfs_parse_fs_param - Add a single parameter to a superblock config
  * @fc: The filesystem context to modify
  * @param: The parameter
@@ -122,15 +151,9 @@ int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param)
        /* If the filesystem doesn't take any arguments, give it the
         * default handling of source.
         */
-       if (strcmp(param->key, "source") == 0) {
-               if (param->type != fs_value_is_string)
-                       return invalf(fc, "VFS: Non-string source");
-               if (fc->source)
-                       return invalf(fc, "VFS: Multiple sources");
-               fc->source = param->string;
-               param->string = NULL;
-               return 0;
-       }
+       ret = vfs_parse_fs_param_source(fc, param);
+       if (ret != -ENOPARAM)
+               return ret;
 
        return invalf(fc, "%s: Unknown parameter '%s'",
                      fc->fs_type->name, param->key);
@@ -504,16 +527,11 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param)
        struct legacy_fs_context *ctx = fc->fs_private;
        unsigned int size = ctx->data_size;
        size_t len = 0;
+       int ret;
 
-       if (strcmp(param->key, "source") == 0) {
-               if (param->type != fs_value_is_string)
-                       return invalf(fc, "VFS: Legacy: Non-string source");
-               if (fc->source)
-                       return invalf(fc, "VFS: Legacy: Multiple sources");
-               fc->source = param->string;
-               param->string = NULL;
-               return 0;
-       }
+       ret = vfs_parse_fs_param_source(fc, param);
+       if (ret != -ENOPARAM)
+               return ret;
 
        if (ctx->param_type == LEGACY_FS_MONOLITHIC_PARAMS)
                return invalf(fc, "VFS: Legacy: Can't mix monolithic and individual options");
index 4af318f..ef9498a 100644 (file)
@@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
        fd->key = ptr + tree->max_key_len + 2;
        hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
                tree->cnid, __builtin_return_address(0));
-       mutex_lock(&tree->tree_lock);
+       switch (tree->cnid) {
+       case HFS_CAT_CNID:
+               mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
+               break;
+       case HFS_EXT_CNID:
+               mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
+               break;
+       case HFS_ATTR_CNID:
+               mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
+               break;
+       default:
+               return -EINVAL;
+       }
        return 0;
 }
 
index b63a4df..c0a73a6 100644 (file)
 
 #include "btree.h"
 
-void hfs_bnode_read(struct hfs_bnode *node, void *buf,
-               int off, int len)
+void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
 {
        struct page *page;
+       int pagenum;
+       int bytes_read;
+       int bytes_to_read;
+       void *vaddr;
 
        off += node->page_offset;
-       page = node->page[0];
+       pagenum = off >> PAGE_SHIFT;
+       off &= ~PAGE_MASK; /* compute page offset for the first page */
 
-       memcpy(buf, kmap(page) + off, len);
-       kunmap(page);
+       for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
+               if (pagenum >= node->tree->pages_per_bnode)
+                       break;
+               page = node->page[pagenum];
+               bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
+
+               vaddr = kmap_atomic(page);
+               memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
+               kunmap_atomic(vaddr);
+
+               pagenum++;
+               off = 0; /* page offset only applies to the first page */
+       }
 }
 
 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
index 4ba45ca..0e6baee 100644 (file)
@@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
 
 #define NODE_HASH_SIZE  256
 
+/* B-tree mutex nested subclasses */
+enum hfs_btree_mutex_classes {
+       CATALOG_BTREE_MUTEX,
+       EXTENTS_BTREE_MUTEX,
+       ATTR_BTREE_MUTEX,
+};
+
 /* A HFS BTree held in memory */
 struct hfs_btree {
        struct super_block *sb;
index 44d07c9..12d9bae 100644 (file)
@@ -420,14 +420,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
        if (!res) {
                if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
                        res =  -EIO;
-                       goto bail;
+                       goto bail_hfs_find;
                }
                hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
        }
-       if (res) {
-               hfs_find_exit(&fd);
-               goto bail_no_root;
-       }
+       if (res)
+               goto bail_hfs_find;
        res = -EINVAL;
        root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
        hfs_find_exit(&fd);
@@ -443,6 +441,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
        /* everything's okay */
        return 0;
 
+bail_hfs_find:
+       hfs_find_exit(&fd);
 bail_no_root:
        pr_err("get root inode failed\n");
 bail:
index 926eeb9..cdfb1ae 100644 (file)
@@ -77,7 +77,7 @@ enum hugetlb_param {
 static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
        fsparam_u32   ("gid",           Opt_gid),
        fsparam_string("min_size",      Opt_min_size),
-       fsparam_u32   ("mode",          Opt_mode),
+       fsparam_u32oct("mode",          Opt_mode),
        fsparam_string("nr_inodes",     Opt_nr_inodes),
        fsparam_string("pagesize",      Opt_pagesize),
        fsparam_string("size",          Opt_size),
index 3ce8edb..82e8eb3 100644 (file)
@@ -61,7 +61,6 @@ extern void __init chrdev_init(void);
  */
 extern const struct fs_context_operations legacy_fs_context_ops;
 extern int parse_monolithic_mount_data(struct fs_context *, void *);
-extern void fc_drop_locked(struct fs_context *);
 extern void vfs_clean_context(struct fs_context *fc);
 extern int finish_clean_context(struct fs_context *fc);
 
index 843d4a7..cf086b0 100644 (file)
@@ -731,7 +731,12 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
        int work_flags;
        unsigned long flags;
 
-       if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state)) {
+       /*
+        * If io-wq is exiting for this task, or if the request has explicitly
+        * been marked as one that should not get executed, cancel it here.
+        */
+       if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) ||
+           (work->flags & IO_WQ_WORK_CANCEL)) {
                io_run_cancel(work, wqe);
                return;
        }
index d94fb58..bf548af 100644 (file)
@@ -1279,8 +1279,17 @@ static void io_prep_async_link(struct io_kiocb *req)
 {
        struct io_kiocb *cur;
 
-       io_for_each_link(cur, req)
-               io_prep_async_work(cur);
+       if (req->flags & REQ_F_LINK_TIMEOUT) {
+               struct io_ring_ctx *ctx = req->ctx;
+
+               spin_lock_irq(&ctx->completion_lock);
+               io_for_each_link(cur, req)
+                       io_prep_async_work(cur);
+               spin_unlock_irq(&ctx->completion_lock);
+       } else {
+               io_for_each_link(cur, req)
+                       io_prep_async_work(cur);
+       }
 }
 
 static void io_queue_async_work(struct io_kiocb *req)
@@ -1294,6 +1303,17 @@ static void io_queue_async_work(struct io_kiocb *req)
 
        /* init ->work of the whole link before punting */
        io_prep_async_link(req);
+
+       /*
+        * Not expected to happen, but if we do have a bug where this _can_
+        * happen, catch it here and ensure the request is marked as
+        * canceled. That will make io-wq go through the usual work cancel
+        * procedure rather than attempt to run this request (or create a new
+        * worker for it).
+        */
+       if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
+               req->work.flags |= IO_WQ_WORK_CANCEL;
+
        trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
                                        &req->work, req->flags);
        io_wq_enqueue(tctx->io_wq, &req->work);
@@ -1939,9 +1959,13 @@ static void tctx_task_work(struct callback_head *cb)
                        node = next;
                }
                if (wq_list_empty(&tctx->task_list)) {
+                       spin_lock_irq(&tctx->task_lock);
                        clear_bit(0, &tctx->task_state);
-                       if (wq_list_empty(&tctx->task_list))
+                       if (wq_list_empty(&tctx->task_list)) {
+                               spin_unlock_irq(&tctx->task_lock);
                                break;
+                       }
+                       spin_unlock_irq(&tctx->task_lock);
                        /* another tctx_task_work() is enqueued, yield */
                        if (test_and_set_bit(0, &tctx->task_state))
                                break;
@@ -2016,7 +2040,7 @@ static void io_req_task_submit(struct io_kiocb *req)
 
        /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
        mutex_lock(&ctx->uring_lock);
-       if (!(current->flags & PF_EXITING) && !current->in_execve)
+       if (!(req->task->flags & PF_EXITING) && !req->task->in_execve)
                __io_queue_sqe(req);
        else
                io_req_complete_failed(req, -EFAULT);
@@ -2036,6 +2060,12 @@ static void io_req_task_queue(struct io_kiocb *req)
        io_req_task_work_add(req);
 }
 
+static void io_req_task_queue_reissue(struct io_kiocb *req)
+{
+       req->io_task_work.func = io_queue_async_work;
+       io_req_task_work_add(req);
+}
+
 static inline void io_queue_next(struct io_kiocb *req)
 {
        struct io_kiocb *nxt = io_req_find_next(req);
@@ -2205,7 +2235,7 @@ static inline bool io_run_task_work(void)
  * Find and free completed poll iocbs
  */
 static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
-                              struct list_head *done)
+                              struct list_head *done, bool resubmit)
 {
        struct req_batch rb;
        struct io_kiocb *req;
@@ -2220,11 +2250,11 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
                req = list_first_entry(done, struct io_kiocb, inflight_entry);
                list_del(&req->inflight_entry);
 
-               if (READ_ONCE(req->result) == -EAGAIN &&
+               if (READ_ONCE(req->result) == -EAGAIN && resubmit &&
                    !(req->flags & REQ_F_DONT_REISSUE)) {
                        req->iopoll_completed = 0;
                        req_ref_get(req);
-                       io_queue_async_work(req);
+                       io_req_task_queue_reissue(req);
                        continue;
                }
 
@@ -2244,7 +2274,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
 }
 
 static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
-                       long min)
+                       long min, bool resubmit)
 {
        struct io_kiocb *req, *tmp;
        LIST_HEAD(done);
@@ -2287,7 +2317,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
        }
 
        if (!list_empty(&done))
-               io_iopoll_complete(ctx, nr_events, &done);
+               io_iopoll_complete(ctx, nr_events, &done, resubmit);
 
        return ret;
 }
@@ -2305,7 +2335,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
        while (!list_empty(&ctx->iopoll_list)) {
                unsigned int nr_events = 0;
 
-               io_do_iopoll(ctx, &nr_events, 0);
+               io_do_iopoll(ctx, &nr_events, 0, false);
 
                /* let it sleep and repeat later if can't complete a request */
                if (nr_events == 0)
@@ -2367,7 +2397,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
                            list_empty(&ctx->iopoll_list))
                                break;
                }
-               ret = io_do_iopoll(ctx, &nr_events, min);
+               ret = io_do_iopoll(ctx, &nr_events, min, true);
        } while (!ret && nr_events < min && !need_resched());
 out:
        mutex_unlock(&ctx->uring_lock);
@@ -2417,6 +2447,12 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
         */
        if (percpu_ref_is_dying(&ctx->refs))
                return false;
+       /*
+        * Play it safe and assume not safe to re-import and reissue if we're
+        * not in the original thread group (or in task context).
+        */
+       if (!same_thread_group(req->task, current) || !in_task())
+               return false;
        return true;
 }
 #else
@@ -2747,7 +2783,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
                req->flags &= ~REQ_F_REISSUE;
                if (io_resubmit_prep(req)) {
                        req_ref_get(req);
-                       io_queue_async_work(req);
+                       io_req_task_queue_reissue(req);
                } else {
                        int cflags = 0;
 
@@ -4802,6 +4838,7 @@ IO_NETOP_FN(recv);
 struct io_poll_table {
        struct poll_table_struct pt;
        struct io_kiocb *req;
+       int nr_entries;
        int error;
 };
 
@@ -4902,7 +4939,6 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
        if (req->poll.events & EPOLLONESHOT)
                flags = 0;
        if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
-               io_poll_remove_waitqs(req);
                req->poll.done = true;
                flags = 0;
        }
@@ -4925,6 +4961,7 @@ static void io_poll_task_func(struct io_kiocb *req)
 
                done = io_poll_complete(req, req->result);
                if (done) {
+                       io_poll_remove_double(req);
                        hash_del(&req->hash_node);
                } else {
                        req->result = 0;
@@ -4995,11 +5032,11 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
        struct io_kiocb *req = pt->req;
 
        /*
-        * If poll->head is already set, it's because the file being polled
-        * uses multiple waitqueues for poll handling (eg one for read, one
-        * for write). Setup a separate io_poll_iocb if this happens.
+        * The file being polled uses multiple waitqueues for poll handling
+        * (e.g. one for read, one for write). Setup a separate io_poll_iocb
+        * if this happens.
         */
-       if (unlikely(poll->head)) {
+       if (unlikely(pt->nr_entries)) {
                struct io_poll_iocb *poll_one = poll;
 
                /* already have a 2nd entry, fail a third attempt */
@@ -5027,7 +5064,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
                *poll_ptr = poll;
        }
 
-       pt->error = 0;
+       pt->nr_entries++;
        poll->head = head;
 
        if (poll->events & EPOLLEXCLUSIVE)
@@ -5104,11 +5141,16 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
 
        ipt->pt._key = mask;
        ipt->req = req;
-       ipt->error = -EINVAL;
+       ipt->error = 0;
+       ipt->nr_entries = 0;
 
        mask = vfs_poll(req->file, &ipt->pt) & poll->events;
+       if (unlikely(!ipt->nr_entries) && !ipt->error)
+               ipt->error = -EINVAL;
 
        spin_lock_irq(&ctx->completion_lock);
+       if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
+               io_poll_remove_double(req);
        if (likely(poll->head)) {
                spin_lock(&poll->head->lock);
                if (unlikely(list_empty(&poll->wait.entry))) {
@@ -5179,7 +5221,6 @@ static int io_arm_poll_handler(struct io_kiocb *req)
        ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
                                        io_async_wake);
        if (ret || ipt.error) {
-               io_poll_remove_double(req);
                spin_unlock_irq(&ctx->completion_lock);
                if (ret)
                        return IO_APOLL_READY;
@@ -6019,11 +6060,13 @@ static bool io_drain_req(struct io_kiocb *req)
 
        ret = io_req_prep_async(req);
        if (ret)
-               return ret;
+               goto fail;
        io_prep_async_link(req);
        de = kmalloc(sizeof(*de), GFP_KERNEL);
        if (!de) {
-               io_req_complete_failed(req, -ENOMEM);
+               ret = -ENOMEM;
+fail:
+               io_req_complete_failed(req, ret);
                return true;
        }
 
@@ -6790,7 +6833,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
 
                mutex_lock(&ctx->uring_lock);
                if (!list_empty(&ctx->iopoll_list))
-                       io_do_iopoll(ctx, &nr_events, 0);
+                       io_do_iopoll(ctx, &nr_events, 0, true);
 
                /*
                 * Don't submit if refs are dying, good for io_uring_register(),
@@ -7897,15 +7940,19 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
        struct io_wq_data data;
        unsigned int concurrency;
 
+       mutex_lock(&ctx->uring_lock);
        hash = ctx->hash_map;
        if (!hash) {
                hash = kzalloc(sizeof(*hash), GFP_KERNEL);
-               if (!hash)
+               if (!hash) {
+                       mutex_unlock(&ctx->uring_lock);
                        return ERR_PTR(-ENOMEM);
+               }
                refcount_set(&hash->refs, 1);
                init_waitqueue_head(&hash->wait);
                ctx->hash_map = hash;
        }
+       mutex_unlock(&ctx->uring_lock);
 
        data.hash = hash;
        data.task = task;
@@ -7979,9 +8026,11 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
                f = fdget(p->wq_fd);
                if (!f.file)
                        return -ENXIO;
-               fdput(f);
-               if (f.file->f_op != &io_uring_fops)
+               if (f.file->f_op != &io_uring_fops) {
+                       fdput(f);
                        return -EINVAL;
+               }
+               fdput(f);
        }
        if (ctx->flags & IORING_SETUP_SQPOLL) {
                struct task_struct *tsk;
index 41da4f1..87ccb34 100644 (file)
@@ -215,6 +215,7 @@ iomap_read_inline_data(struct inode *inode, struct page *page,
        if (PageUptodate(page))
                return;
 
+       BUG_ON(page_has_private(page));
        BUG_ON(page->index);
        BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
 
@@ -239,7 +240,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 {
        struct iomap_readpage_ctx *ctx = data;
        struct page *page = ctx->cur_page;
-       struct iomap_page *iop = iomap_page_create(inode, page);
+       struct iomap_page *iop;
        bool same_page = false, is_contig = false;
        loff_t orig_pos = pos;
        unsigned poff, plen;
@@ -252,6 +253,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
        }
 
        /* zero post-eof blocks as the page may be mapped */
+       iop = iomap_page_create(inode, page);
        iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
        if (plen == 0)
                goto done;
@@ -967,7 +969,6 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
                block_commit_write(page, 0, length);
        } else {
                WARN_ON_ONCE(!PageUptodate(page));
-               iomap_page_create(inode, page);
                set_page_dirty(page);
        }
 
@@ -1304,14 +1305,13 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
                struct writeback_control *wbc, struct inode *inode,
                struct page *page, u64 end_offset)
 {
-       struct iomap_page *iop = to_iomap_page(page);
+       struct iomap_page *iop = iomap_page_create(inode, page);
        struct iomap_ioend *ioend, *next;
        unsigned len = i_blocksize(inode);
        u64 file_offset; /* file offset of page */
        int error = 0, count = 0, i;
        LIST_HEAD(submit_list);
 
-       WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
        WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
 
        /*
index dab1b02..ce6fb81 100644 (file)
@@ -35,23 +35,20 @@ loff_t
 iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
 {
        loff_t size = i_size_read(inode);
-       loff_t length = size - offset;
        loff_t ret;
 
        /* Nothing to be found before or beyond the end of the file. */
        if (offset < 0 || offset >= size)
                return -ENXIO;
 
-       while (length > 0) {
-               ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
-                                 &offset, iomap_seek_hole_actor);
+       while (offset < size) {
+               ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
+                                 ops, &offset, iomap_seek_hole_actor);
                if (ret < 0)
                        return ret;
                if (ret == 0)
                        break;
-
                offset += ret;
-               length -= ret;
        }
 
        return offset;
@@ -83,27 +80,23 @@ loff_t
 iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
 {
        loff_t size = i_size_read(inode);
-       loff_t length = size - offset;
        loff_t ret;
 
        /* Nothing to be found before or beyond the end of the file. */
        if (offset < 0 || offset >= size)
                return -ENXIO;
 
-       while (length > 0) {
-               ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
-                                 &offset, iomap_seek_data_actor);
+       while (offset < size) {
+               ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
+                                 ops, &offset, iomap_seek_data_actor);
                if (ret < 0)
                        return ret;
                if (ret == 0)
-                       break;
-
+                       return offset;
                offset += ret;
-               length -= ret;
        }
 
-       if (length <= 0)
-               return -ENXIO;
-       return offset;
+       /* We've reached the end of the file without finding data */
+       return -ENXIO;
 }
 EXPORT_SYMBOL_GPL(iomap_seek_data);
index 7756579..54d7843 100644 (file)
@@ -1529,6 +1529,45 @@ static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
        }
 }
 
+/*
+ * zero out partial blocks of one cluster.
+ *
+ * start: file offset where zero starts, will be made upper block aligned.
+ * len: it will be trimmed to the end of current cluster if "start + len"
+ *      is bigger than it.
+ */
+static int ocfs2_zeroout_partial_cluster(struct inode *inode,
+                                       u64 start, u64 len)
+{
+       int ret;
+       u64 start_block, end_block, nr_blocks;
+       u64 p_block, offset;
+       u32 cluster, p_cluster, nr_clusters;
+       struct super_block *sb = inode->i_sb;
+       u64 end = ocfs2_align_bytes_to_clusters(sb, start);
+
+       if (start + len < end)
+               end = start + len;
+
+       start_block = ocfs2_blocks_for_bytes(sb, start);
+       end_block = ocfs2_blocks_for_bytes(sb, end);
+       nr_blocks = end_block - start_block;
+       if (!nr_blocks)
+               return 0;
+
+       cluster = ocfs2_bytes_to_clusters(sb, start);
+       ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
+                               &nr_clusters, NULL);
+       if (ret)
+               return ret;
+       if (!p_cluster)
+               return 0;
+
+       offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
+       p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
+       return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
+}
+
 static int ocfs2_zero_partial_clusters(struct inode *inode,
                                       u64 start, u64 len)
 {
@@ -1538,6 +1577,7 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        unsigned int csize = osb->s_clustersize;
        handle_t *handle;
+       loff_t isize = i_size_read(inode);
 
        /*
         * The "start" and "end" values are NOT necessarily part of
@@ -1558,6 +1598,26 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
        if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
                goto out;
 
+       /* No page cache for EOF blocks, issue zero out to disk. */
+       if (end > isize) {
+               /*
+                * zeroout eof blocks in last cluster starting from
+                * "isize" even "start" > "isize" because it is
+                * complicated to zeroout just at "start" as "start"
+                * may be not aligned with block size, buffer write
+                * would be required to do that, but out of eof buffer
+                * write is not supported.
+                */
+               ret = ocfs2_zeroout_partial_cluster(inode, isize,
+                                       end - isize);
+               if (ret) {
+                       mlog_errno(ret);
+                       goto out;
+               }
+               if (start >= isize)
+                       goto out;
+               end = isize;
+       }
        handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
        if (IS_ERR(handle)) {
                ret = PTR_ERR(handle);
@@ -1856,45 +1916,6 @@ out:
 }
 
 /*
- * zero out partial blocks of one cluster.
- *
- * start: file offset where zero starts, will be made upper block aligned.
- * len: it will be trimmed to the end of current cluster if "start + len"
- *      is bigger than it.
- */
-static int ocfs2_zeroout_partial_cluster(struct inode *inode,
-                                       u64 start, u64 len)
-{
-       int ret;
-       u64 start_block, end_block, nr_blocks;
-       u64 p_block, offset;
-       u32 cluster, p_cluster, nr_clusters;
-       struct super_block *sb = inode->i_sb;
-       u64 end = ocfs2_align_bytes_to_clusters(sb, start);
-
-       if (start + len < end)
-               end = start + len;
-
-       start_block = ocfs2_blocks_for_bytes(sb, start);
-       end_block = ocfs2_blocks_for_bytes(sb, end);
-       nr_blocks = end_block - start_block;
-       if (!nr_blocks)
-               return 0;
-
-       cluster = ocfs2_bytes_to_clusters(sb, start);
-       ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
-                               &nr_clusters, NULL);
-       if (ret)
-               return ret;
-       if (!p_cluster)
-               return 0;
-
-       offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
-       p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
-       return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
-}
-
-/*
  * Parts of this function taken from xfs_change_file_space()
  */
 static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
@@ -1935,7 +1956,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                goto out_inode_unlock;
        }
 
-       orig_isize = i_size_read(inode);
        switch (sr->l_whence) {
        case 0: /*SEEK_SET*/
                break;
@@ -1943,7 +1963,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                sr->l_start += f_pos;
                break;
        case 2: /*SEEK_END*/
-               sr->l_start += orig_isize;
+               sr->l_start += i_size_read(inode);
                break;
        default:
                ret = -EINVAL;
@@ -1998,6 +2018,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                ret = -EINVAL;
        }
 
+       orig_isize = i_size_read(inode);
        /* zeroout eof blocks in the cluster. */
        if (!ret && change_size && orig_isize < size) {
                ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
index bfd946a..9ef4231 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -429,20 +429,20 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
 #endif
 
        /*
-        * Only wake up if the pipe started out empty, since
-        * otherwise there should be no readers waiting.
+        * Epoll nonsensically wants a wakeup whether the pipe
+        * was already empty or not.
         *
         * If it wasn't empty we try to merge new data into
         * the last buffer.
         *
         * That naturally merges small writes, but it also
-        * page-aligs the rest of the writes for large writes
+        * page-aligns the rest of the writes for large writes
         * spanning multiple pages.
         */
        head = pipe->head;
-       was_empty = pipe_empty(head, pipe->tail);
+       was_empty = true;
        chars = total_len & (PAGE_SIZE-1);
-       if (chars && !was_empty) {
+       if (chars && !pipe_empty(head, pipe->tail)) {
                unsigned int mask = pipe->ring_size - 1;
                struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
                int offset = buf->offset + buf->len;
index 476a7ff..ef42729 100644 (file)
@@ -387,6 +387,24 @@ void pathrelse(struct treepath *search_path)
        search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
 }
 
+static int has_valid_deh_location(struct buffer_head *bh, struct item_head *ih)
+{
+       struct reiserfs_de_head *deh;
+       int i;
+
+       deh = B_I_DEH(bh, ih);
+       for (i = 0; i < ih_entry_count(ih); i++) {
+               if (deh_location(&deh[i]) > ih_item_len(ih)) {
+                       reiserfs_warning(NULL, "reiserfs-5094",
+                                        "directory entry location seems wrong %h",
+                                        &deh[i]);
+                       return 0;
+               }
+       }
+
+       return 1;
+}
+
 static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
 {
        struct block_head *blkh;
@@ -454,11 +472,14 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
                                         "(second one): %h", ih);
                        return 0;
                }
-               if (is_direntry_le_ih(ih) && (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE))) {
-                       reiserfs_warning(NULL, "reiserfs-5093",
-                                        "item entry count seems wrong %h",
-                                        ih);
-                       return 0;
+               if (is_direntry_le_ih(ih)) {
+                       if (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE)) {
+                               reiserfs_warning(NULL, "reiserfs-5093",
+                                                "item entry count seems wrong %h",
+                                                ih);
+                               return 0;
+                       }
+                       return has_valid_deh_location(bh, ih);
                }
                prev_location = ih_location(ih);
        }
index 3ffafc7..58481f8 100644 (file)
@@ -2082,6 +2082,14 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
                unlock_new_inode(root_inode);
        }
 
+       if (!S_ISDIR(root_inode->i_mode) || !inode_get_bytes(root_inode) ||
+           !root_inode->i_size) {
+               SWARN(silent, s, "", "corrupt root inode, run fsck");
+               iput(root_inode);
+               errval = -EUCLEAN;
+               goto error;
+       }
+
        s->s_root = d_make_root(root_inode);
        if (!s->s_root)
                goto error;
index b117b21..4a2cda0 100644 (file)
@@ -32,6 +32,9 @@ static void seq_set_overflow(struct seq_file *m)
 
 static void *seq_buf_alloc(unsigned long size)
 {
+       if (unlikely(size > MAX_RW_COUNT))
+               return NULL;
+
        return kvmalloc(size, GFP_KERNEL_ACCOUNT);
 }
 
index f6e0f0c..5c2d806 100644 (file)
@@ -1236,23 +1236,21 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
 }
 
 static __always_inline int validate_range(struct mm_struct *mm,
-                                         __u64 *start, __u64 len)
+                                         __u64 start, __u64 len)
 {
        __u64 task_size = mm->task_size;
 
-       *start = untagged_addr(*start);
-
-       if (*start & ~PAGE_MASK)
+       if (start & ~PAGE_MASK)
                return -EINVAL;
        if (len & ~PAGE_MASK)
                return -EINVAL;
        if (!len)
                return -EINVAL;
-       if (*start < mmap_min_addr)
+       if (start < mmap_min_addr)
                return -EINVAL;
-       if (*start >= task_size)
+       if (start >= task_size)
                return -EINVAL;
-       if (len > task_size - *start)
+       if (len > task_size - start)
                return -EINVAL;
        return 0;
 }
@@ -1316,7 +1314,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
                vm_flags |= VM_UFFD_MINOR;
        }
 
-       ret = validate_range(mm, &uffdio_register.range.start,
+       ret = validate_range(mm, uffdio_register.range.start,
                             uffdio_register.range.len);
        if (ret)
                goto out;
@@ -1522,7 +1520,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
        if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
                goto out;
 
-       ret = validate_range(mm, &uffdio_unregister.start,
+       ret = validate_range(mm, uffdio_unregister.start,
                             uffdio_unregister.len);
        if (ret)
                goto out;
@@ -1671,7 +1669,7 @@ static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
        if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
                goto out;
 
-       ret = validate_range(ctx->mm, &uffdio_wake.start, uffdio_wake.len);
+       ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
        if (ret)
                goto out;
 
@@ -1711,7 +1709,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
                           sizeof(uffdio_copy)-sizeof(__s64)))
                goto out;
 
-       ret = validate_range(ctx->mm, &uffdio_copy.dst, uffdio_copy.len);
+       ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
        if (ret)
                goto out;
        /*
@@ -1768,7 +1766,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
                           sizeof(uffdio_zeropage)-sizeof(__s64)))
                goto out;
 
-       ret = validate_range(ctx->mm, &uffdio_zeropage.range.start,
+       ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
                             uffdio_zeropage.range.len);
        if (ret)
                goto out;
@@ -1818,7 +1816,7 @@ static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
                           sizeof(struct uffdio_writeprotect)))
                return -EFAULT;
 
-       ret = validate_range(ctx->mm, &uffdio_wp.range.start,
+       ret = validate_range(ctx->mm, uffdio_wp.range.start,
                             uffdio_wp.range.len);
        if (ret)
                return ret;
@@ -1866,7 +1864,7 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
                           sizeof(uffdio_continue) - (sizeof(__s64))))
                goto out;
 
-       ret = validate_range(ctx->mm, &uffdio_continue.range.start,
+       ret = validate_range(ctx->mm, uffdio_continue.range.start,
                             uffdio_continue.range.len);
        if (ret)
                goto out;
index eac6788..c4769a9 100644 (file)
@@ -253,7 +253,7 @@ static int vboxsf_dir_instantiate(struct inode *parent, struct dentry *dentry,
 }
 
 static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry,
-                            umode_t mode, int is_dir)
+                            umode_t mode, bool is_dir, bool excl, u64 *handle_ret)
 {
        struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
        struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
@@ -261,10 +261,12 @@ static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry,
        int err;
 
        params.handle = SHFL_HANDLE_NIL;
-       params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW |
-                             SHFL_CF_ACT_FAIL_IF_EXISTS |
-                             SHFL_CF_ACCESS_READWRITE |
-                             (is_dir ? SHFL_CF_DIRECTORY : 0);
+       params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW | SHFL_CF_ACCESS_READWRITE;
+       if (is_dir)
+               params.create_flags |= SHFL_CF_DIRECTORY;
+       if (excl)
+               params.create_flags |= SHFL_CF_ACT_FAIL_IF_EXISTS;
+
        params.info.attr.mode = (mode & 0777) |
                                (is_dir ? SHFL_TYPE_DIRECTORY : SHFL_TYPE_FILE);
        params.info.attr.additional = SHFLFSOBJATTRADD_NOTHING;
@@ -276,30 +278,81 @@ static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry,
        if (params.result != SHFL_FILE_CREATED)
                return -EPERM;
 
-       vboxsf_close(sbi->root, params.handle);
-
        err = vboxsf_dir_instantiate(parent, dentry, &params.info);
        if (err)
-               return err;
+               goto out;
 
        /* parent directory access/change time changed */
        sf_parent_i->force_restat = 1;
 
-       return 0;
+out:
+       if (err == 0 && handle_ret)
+               *handle_ret = params.handle;
+       else
+               vboxsf_close(sbi->root, params.handle);
+
+       return err;
 }
 
 static int vboxsf_dir_mkfile(struct user_namespace *mnt_userns,
                             struct inode *parent, struct dentry *dentry,
                             umode_t mode, bool excl)
 {
-       return vboxsf_dir_create(parent, dentry, mode, 0);
+       return vboxsf_dir_create(parent, dentry, mode, false, excl, NULL);
 }
 
 static int vboxsf_dir_mkdir(struct user_namespace *mnt_userns,
                            struct inode *parent, struct dentry *dentry,
                            umode_t mode)
 {
-       return vboxsf_dir_create(parent, dentry, mode, 1);
+       return vboxsf_dir_create(parent, dentry, mode, true, true, NULL);
+}
+
+static int vboxsf_dir_atomic_open(struct inode *parent, struct dentry *dentry,
+                                 struct file *file, unsigned int flags, umode_t mode)
+{
+       struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
+       struct vboxsf_handle *sf_handle;
+       struct dentry *res = NULL;
+       u64 handle;
+       int err;
+
+       if (d_in_lookup(dentry)) {
+               res = vboxsf_dir_lookup(parent, dentry, 0);
+               if (IS_ERR(res))
+                       return PTR_ERR(res);
+
+               if (res)
+                       dentry = res;
+       }
+
+       /* Only creates */
+       if (!(flags & O_CREAT) || d_really_is_positive(dentry))
+               return finish_no_open(file, res);
+
+       err = vboxsf_dir_create(parent, dentry, mode, false, flags & O_EXCL, &handle);
+       if (err)
+               goto out;
+
+       sf_handle = vboxsf_create_sf_handle(d_inode(dentry), handle, SHFL_CF_ACCESS_READWRITE);
+       if (IS_ERR(sf_handle)) {
+               vboxsf_close(sbi->root, handle);
+               err = PTR_ERR(sf_handle);
+               goto out;
+       }
+
+       err = finish_open(file, dentry, generic_file_open);
+       if (err) {
+               /* This also closes the handle passed to vboxsf_create_sf_handle() */
+               vboxsf_release_sf_handle(d_inode(dentry), sf_handle);
+               goto out;
+       }
+
+       file->private_data = sf_handle;
+       file->f_mode |= FMODE_CREATED;
+out:
+       dput(res);
+       return err;
 }
 
 static int vboxsf_dir_unlink(struct inode *parent, struct dentry *dentry)
@@ -422,6 +475,7 @@ const struct inode_operations vboxsf_dir_iops = {
        .lookup  = vboxsf_dir_lookup,
        .create  = vboxsf_dir_mkfile,
        .mkdir   = vboxsf_dir_mkdir,
+       .atomic_open = vboxsf_dir_atomic_open,
        .rmdir   = vboxsf_dir_unlink,
        .unlink  = vboxsf_dir_unlink,
        .rename  = vboxsf_dir_rename,
index c4ab599..864c2fa 100644 (file)
@@ -20,17 +20,39 @@ struct vboxsf_handle {
        struct list_head head;
 };
 
-static int vboxsf_file_open(struct inode *inode, struct file *file)
+struct vboxsf_handle *vboxsf_create_sf_handle(struct inode *inode,
+                                             u64 handle, u32 access_flags)
 {
        struct vboxsf_inode *sf_i = VBOXSF_I(inode);
-       struct shfl_createparms params = {};
        struct vboxsf_handle *sf_handle;
-       u32 access_flags = 0;
-       int err;
 
        sf_handle = kmalloc(sizeof(*sf_handle), GFP_KERNEL);
        if (!sf_handle)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
+
+       /* the host may have given us different attr then requested */
+       sf_i->force_restat = 1;
+
+       /* init our handle struct and add it to the inode's handles list */
+       sf_handle->handle = handle;
+       sf_handle->root = VBOXSF_SBI(inode->i_sb)->root;
+       sf_handle->access_flags = access_flags;
+       kref_init(&sf_handle->refcount);
+
+       mutex_lock(&sf_i->handle_list_mutex);
+       list_add(&sf_handle->head, &sf_i->handle_list);
+       mutex_unlock(&sf_i->handle_list_mutex);
+
+       return sf_handle;
+}
+
+static int vboxsf_file_open(struct inode *inode, struct file *file)
+{
+       struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
+       struct shfl_createparms params = {};
+       struct vboxsf_handle *sf_handle;
+       u32 access_flags = 0;
+       int err;
 
        /*
         * We check the value of params.handle afterwards to find out if
@@ -83,23 +105,14 @@ static int vboxsf_file_open(struct inode *inode, struct file *file)
        err = vboxsf_create_at_dentry(file_dentry(file), &params);
        if (err == 0 && params.handle == SHFL_HANDLE_NIL)
                err = (params.result == SHFL_FILE_EXISTS) ? -EEXIST : -ENOENT;
-       if (err) {
-               kfree(sf_handle);
+       if (err)
                return err;
-       }
-
-       /* the host may have given us different attr then requested */
-       sf_i->force_restat = 1;
 
-       /* init our handle struct and add it to the inode's handles list */
-       sf_handle->handle = params.handle;
-       sf_handle->root = VBOXSF_SBI(inode->i_sb)->root;
-       sf_handle->access_flags = access_flags;
-       kref_init(&sf_handle->refcount);
-
-       mutex_lock(&sf_i->handle_list_mutex);
-       list_add(&sf_handle->head, &sf_i->handle_list);
-       mutex_unlock(&sf_i->handle_list_mutex);
+       sf_handle = vboxsf_create_sf_handle(inode, params.handle, access_flags);
+       if (IS_ERR(sf_handle)) {
+               vboxsf_close(sbi->root, params.handle);
+               return PTR_ERR(sf_handle);
+       }
 
        file->private_data = sf_handle;
        return 0;
@@ -114,22 +127,26 @@ static void vboxsf_handle_release(struct kref *refcount)
        kfree(sf_handle);
 }
 
-static int vboxsf_file_release(struct inode *inode, struct file *file)
+void vboxsf_release_sf_handle(struct inode *inode, struct vboxsf_handle *sf_handle)
 {
        struct vboxsf_inode *sf_i = VBOXSF_I(inode);
-       struct vboxsf_handle *sf_handle = file->private_data;
 
+       mutex_lock(&sf_i->handle_list_mutex);
+       list_del(&sf_handle->head);
+       mutex_unlock(&sf_i->handle_list_mutex);
+
+       kref_put(&sf_handle->refcount, vboxsf_handle_release);
+}
+
+static int vboxsf_file_release(struct inode *inode, struct file *file)
+{
        /*
         * When a file is closed on our (the guest) side, we want any subsequent
         * accesses done on the host side to see all changes done from our side.
         */
        filemap_write_and_wait(inode->i_mapping);
 
-       mutex_lock(&sf_i->handle_list_mutex);
-       list_del(&sf_handle->head);
-       mutex_unlock(&sf_i->handle_list_mutex);
-
-       kref_put(&sf_handle->refcount, vboxsf_handle_release);
+       vboxsf_release_sf_handle(inode, file->private_data);
        return 0;
 }
 
index 6a7a9ce..9047bef 100644 (file)
@@ -18,6 +18,8 @@
 #define VBOXSF_SBI(sb) ((struct vboxsf_sbi *)(sb)->s_fs_info)
 #define VBOXSF_I(i)    container_of(i, struct vboxsf_inode, vfs_inode)
 
+struct vboxsf_handle;
+
 struct vboxsf_options {
        unsigned long ttl;
        kuid_t uid;
@@ -80,6 +82,11 @@ extern const struct file_operations vboxsf_reg_fops;
 extern const struct address_space_operations vboxsf_reg_aops;
 extern const struct dentry_operations vboxsf_dentry_ops;
 
+/* from file.c */
+struct vboxsf_handle *vboxsf_create_sf_handle(struct inode *inode,
+                                             u64 handle, u32 access_flags);
+void vboxsf_release_sf_handle(struct inode *inode, struct vboxsf_handle *sf_handle);
+
 /* from utils.c */
 struct inode *vboxsf_new_inode(struct super_block *sb);
 int vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
index 778ec52..ee9ec0c 100644 (file)
@@ -804,6 +804,14 @@ xfs_ag_shrink_space(
        args.fsbno = XFS_AGB_TO_FSB(mp, agno, aglen - delta);
 
        /*
+        * Make sure that the last inode cluster cannot overlap with the new
+        * end of the AG, even if it's sparse.
+        */
+       error = xfs_ialloc_check_shrink(*tpp, agno, agibp, aglen - delta);
+       if (error)
+               return error;
+
+       /*
         * Disable perag reservations so it doesn't cause the allocation request
         * to fail. We'll reestablish reservation before we return.
         */
index d9d7d51..191d517 100644 (file)
@@ -483,7 +483,7 @@ xfs_attr_set_iter(
                if (error)
                        return error;
 
-               /* fallthrough */
+               fallthrough;
        case XFS_DAS_RM_LBLK:
                /* Set state in case xfs_attr_rmtval_remove returns -EAGAIN */
                dac->dela_state = XFS_DAS_RM_LBLK;
@@ -496,7 +496,7 @@ xfs_attr_set_iter(
                        return -EAGAIN;
                }
 
-               /* fallthrough */
+               fallthrough;
        case XFS_DAS_RD_LEAF:
                /*
                 * This is the last step for leaf format. Read the block with
@@ -528,7 +528,7 @@ xfs_attr_set_iter(
                                return error;
                }
 
-               /* fallthrough */
+               fallthrough;
        case XFS_DAS_ALLOC_NODE:
                /*
                 * If there was an out-of-line value, allocate the blocks we
@@ -590,7 +590,7 @@ xfs_attr_set_iter(
                if (error)
                        return error;
 
-               /* fallthrough */
+               fallthrough;
        case XFS_DAS_RM_NBLK:
                /* Set state in case xfs_attr_rmtval_remove returns -EAGAIN */
                dac->dela_state = XFS_DAS_RM_NBLK;
@@ -603,7 +603,7 @@ xfs_attr_set_iter(
                        return -EAGAIN;
                }
 
-               /* fallthrough */
+               fallthrough;
        case XFS_DAS_CLR_FLAG:
                /*
                 * The last state for node format. Look up the old attr and
@@ -1406,7 +1406,7 @@ xfs_attr_remove_iter(
                        state = dac->da_state;
                }
 
-               /* fallthrough */
+               fallthrough;
        case XFS_DAS_RMTBLK:
                dac->dela_state = XFS_DAS_RMTBLK;
 
@@ -1441,7 +1441,7 @@ xfs_attr_remove_iter(
                        return -EAGAIN;
                }
 
-               /* fallthrough */
+               fallthrough;
        case XFS_DAS_RM_NAME:
                /*
                 * If we came here fresh from a transaction roll, reattach all
@@ -1469,7 +1469,7 @@ xfs_attr_remove_iter(
                        return -EAGAIN;
                }
 
-               /* fallthrough */
+               fallthrough;
        case XFS_DAS_RM_SHRINK:
                /*
                 * If the result is small enough, push it all into the inode.
index 57d9cb6..aaf8805 100644 (file)
@@ -2928,3 +2928,58 @@ xfs_ialloc_calc_rootino(
 
        return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno));
 }
+
+/*
+ * Ensure there are not sparse inode clusters that cross the new EOAG.
+ *
+ * This is a no-op for non-spinode filesystems since clusters are always fully
+ * allocated and checking the bnobt suffices.  However, a spinode filesystem
+ * could have a record where the upper inodes are free blocks.  If those blocks
+ * were removed from the filesystem, the inode record would extend beyond EOAG,
+ * which will be flagged as corruption.
+ */
+int
+xfs_ialloc_check_shrink(
+       struct xfs_trans        *tp,
+       xfs_agnumber_t          agno,
+       struct xfs_buf          *agibp,
+       xfs_agblock_t           new_length)
+{
+       struct xfs_inobt_rec_incore rec;
+       struct xfs_btree_cur    *cur;
+       struct xfs_mount        *mp = tp->t_mountp;
+       struct xfs_perag        *pag;
+       xfs_agino_t             agino = XFS_AGB_TO_AGINO(mp, new_length);
+       int                     has;
+       int                     error;
+
+       if (!xfs_sb_version_hassparseinodes(&mp->m_sb))
+               return 0;
+
+       pag = xfs_perag_get(mp, agno);
+       cur = xfs_inobt_init_cursor(mp, tp, agibp, pag, XFS_BTNUM_INO);
+
+       /* Look up the inobt record that would correspond to the new EOFS. */
+       error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has);
+       if (error || !has)
+               goto out;
+
+       error = xfs_inobt_get_rec(cur, &rec, &has);
+       if (error)
+               goto out;
+
+       if (!has) {
+               error = -EFSCORRUPTED;
+               goto out;
+       }
+
+       /* If the record covers inodes that would be beyond EOFS, bail out. */
+       if (rec.ir_startino + XFS_INODES_PER_CHUNK > agino) {
+               error = -ENOSPC;
+               goto out;
+       }
+out:
+       xfs_btree_del_cursor(cur, error);
+       xfs_perag_put(pag);
+       return error;
+}
index 9df7c80..9a2112b 100644 (file)
@@ -122,4 +122,7 @@ int xfs_ialloc_cluster_alignment(struct xfs_mount *mp);
 void xfs_ialloc_setup_geometry(struct xfs_mount *mp);
 xfs_ino_t xfs_ialloc_calc_rootino(struct xfs_mount *mp, int sunit);
 
+int xfs_ialloc_check_shrink(struct xfs_trans *tp, xfs_agnumber_t agno,
+               struct xfs_buf *agibp, xfs_agblock_t new_length);
+
 #endif /* __XFS_IALLOC_H__ */
index 04ce361..84ea2e0 100644 (file)
@@ -592,23 +592,27 @@ xfs_inode_validate_extsize(
        /*
         * This comment describes a historic gap in this verifier function.
         *
-        * On older kernels, the extent size hint verifier doesn't check that
-        * the extent size hint is an integer multiple of the realtime extent
-        * size on a directory with both RTINHERIT and EXTSZINHERIT flags set.
-        * The verifier has always enforced the alignment rule for regular
-        * files with the REALTIME flag set.
+        * For a directory with both RTINHERIT and EXTSZINHERIT flags set, this
+        * function has never checked that the extent size hint is an integer
+        * multiple of the realtime extent size.  Since we allow users to set
+        * this combination  on non-rt filesystems /and/ to change the rt
+        * extent size when adding a rt device to a filesystem, the net effect
+        * is that users can configure a filesystem anticipating one rt
+        * geometry and change their minds later.  Directories do not use the
+        * extent size hint, so this is harmless for them.
         *
         * If a directory with a misaligned extent size hint is allowed to
         * propagate that hint into a new regular realtime file, the result
         * is that the inode cluster buffer verifier will trigger a corruption
-        * shutdown the next time it is run.
+        * shutdown the next time it is run, because the verifier has always
+        * enforced the alignment rule for regular files.
         *
-        * Unfortunately, there could be filesystems with these misconfigured
-        * directories in the wild, so we cannot add a check to this verifier
-        * at this time because that will result a new source of directory
-        * corruption errors when reading an existing filesystem.  Instead, we
-        * permit the misconfiguration to pass through the verifiers so that
-        * callers of this function can correct and mitigate externally.
+        * Because we allow administrators to set a new rt extent size when
+        * adding a rt section, we cannot add a check to this verifier because
+        * that will result a new source of directory corruption errors when
+        * reading an existing filesystem.  Instead, we rely on callers to
+        * decide when alignment checks are appropriate, and fix things up as
+        * needed.
         */
 
        if (rt_flag)
index d548ea4..2c5bcbc 100644 (file)
@@ -411,7 +411,16 @@ struct xfs_log_dinode {
        /* start of the extended dinode, writable fields */
        uint32_t        di_crc;         /* CRC of the inode */
        uint64_t        di_changecount; /* number of attribute changes */
-       xfs_lsn_t       di_lsn;         /* flush sequence */
+
+       /*
+        * The LSN we write to this field during formatting is not a reflection
+        * of the current on-disk LSN. It should never be used for recovery
+        * sequencing, nor should it be recovered into the on-disk inode at all.
+        * See xlog_recover_inode_commit_pass2() and xfs_log_dinode_to_disk()
+        * for details.
+        */
+       xfs_lsn_t       di_lsn;
+
        uint64_t        di_flags2;      /* more random flags */
        uint32_t        di_cowextsize;  /* basic cow extent size for file */
        uint8_t         di_pad2[12];    /* more padding for future expansion */
index 8d595a5..16f723e 100644 (file)
@@ -143,16 +143,14 @@ xfs_trans_log_inode(
        }
 
        /*
-        * Inode verifiers on older kernels don't check that the extent size
-        * hint is an integer multiple of the rt extent size on a directory
-        * with both rtinherit and extszinherit flags set.  If we're logging a
-        * directory that is misconfigured in this way, clear the hint.
+        * Inode verifiers do not check that the extent size hint is an integer
+        * multiple of the rt extent size on a directory with both rtinherit
+        * and extszinherit flags set.  If we're logging a directory that is
+        * misconfigured in this way, clear the hint.
         */
        if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
            (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) &&
            (ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) {
-               xfs_info_once(ip->i_mount,
-       "Correcting misaligned extent size hint in inode 0x%llx.", ip->i_ino);
                ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
                                   XFS_DIFLAG_EXTSZINHERIT);
                ip->i_extsize = 0;
index 61f90b2..76fbc7c 100644 (file)
@@ -73,11 +73,25 @@ xchk_inode_extsize(
        uint16_t                flags)
 {
        xfs_failaddr_t          fa;
+       uint32_t                value = be32_to_cpu(dip->di_extsize);
 
-       fa = xfs_inode_validate_extsize(sc->mp, be32_to_cpu(dip->di_extsize),
-                       mode, flags);
+       fa = xfs_inode_validate_extsize(sc->mp, value, mode, flags);
        if (fa)
                xchk_ino_set_corrupt(sc, ino);
+
+       /*
+        * XFS allows a sysadmin to change the rt extent size when adding a rt
+        * section to a filesystem after formatting.  If there are any
+        * directories with extszinherit and rtinherit set, the hint could
+        * become misaligned with the new rextsize.  The verifier doesn't check
+        * this, because we allow rtinherit directories even without an rt
+        * device.  Flag this as an administrative warning since we will clean
+        * this up eventually.
+        */
+       if ((flags & XFS_DIFLAG_RTINHERIT) &&
+           (flags & XFS_DIFLAG_EXTSZINHERIT) &&
+           value % sc->mp->m_sb.sb_rextsize > 0)
+               xchk_ino_set_warning(sc, ino);
 }
 
 /*
index d44e8b4..4775485 100644 (file)
@@ -698,7 +698,8 @@ xlog_recover_do_inode_buffer(
 static xfs_lsn_t
 xlog_recover_get_buf_lsn(
        struct xfs_mount        *mp,
-       struct xfs_buf          *bp)
+       struct xfs_buf          *bp,
+       struct xfs_buf_log_format *buf_f)
 {
        uint32_t                magic32;
        uint16_t                magic16;
@@ -706,11 +707,20 @@ xlog_recover_get_buf_lsn(
        void                    *blk = bp->b_addr;
        uuid_t                  *uuid;
        xfs_lsn_t               lsn = -1;
+       uint16_t                blft;
 
        /* v4 filesystems always recover immediately */
        if (!xfs_sb_version_hascrc(&mp->m_sb))
                goto recover_immediately;
 
+       /*
+        * realtime bitmap and summary file blocks do not have magic numbers or
+        * UUIDs, so we must recover them immediately.
+        */
+       blft = xfs_blft_from_flags(buf_f);
+       if (blft == XFS_BLFT_RTBITMAP_BUF || blft == XFS_BLFT_RTSUMMARY_BUF)
+               goto recover_immediately;
+
        magic32 = be32_to_cpu(*(__be32 *)blk);
        switch (magic32) {
        case XFS_ABTB_CRC_MAGIC:
@@ -796,6 +806,7 @@ xlog_recover_get_buf_lsn(
        switch (magicda) {
        case XFS_DIR3_LEAF1_MAGIC:
        case XFS_DIR3_LEAFN_MAGIC:
+       case XFS_ATTR3_LEAF_MAGIC:
        case XFS_DA3_NODE_MAGIC:
                lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
                uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
@@ -919,7 +930,7 @@ xlog_recover_buf_commit_pass2(
         * the verifier will be reset to match whatever recover turns that
         * buffer into.
         */
-       lsn = xlog_recover_get_buf_lsn(mp, bp);
+       lsn = xlog_recover_get_buf_lsn(mp, bp, buf_f);
        if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
                trace_xfs_log_recover_buf_skip(log, buf_f);
                xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
index a835ceb..990b72a 100644 (file)
@@ -2763,6 +2763,19 @@ xfs_remove(
                error = xfs_droplink(tp, ip);
                if (error)
                        goto out_trans_cancel;
+
+               /*
+                * Point the unlinked child directory's ".." entry to the root
+                * directory to eliminate back-references to inodes that may
+                * get freed before the child directory is closed.  If the fs
+                * gets shrunk, this can lead to dirent inode validation errors.
+                */
+               if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
+                       error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
+                                       tp->t_mountp->m_sb.sb_rootino, 0);
+                       if (error)
+                               return error;
+               }
        } else {
                /*
                 * When removing a non-directory we need to log the parent
index 7b79518..e0072a6 100644 (file)
@@ -145,7 +145,8 @@ xfs_log_dinode_to_disk_ts(
 STATIC void
 xfs_log_dinode_to_disk(
        struct xfs_log_dinode   *from,
-       struct xfs_dinode       *to)
+       struct xfs_dinode       *to,
+       xfs_lsn_t               lsn)
 {
        to->di_magic = cpu_to_be16(from->di_magic);
        to->di_mode = cpu_to_be16(from->di_mode);
@@ -182,7 +183,7 @@ xfs_log_dinode_to_disk(
                to->di_flags2 = cpu_to_be64(from->di_flags2);
                to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
                to->di_ino = cpu_to_be64(from->di_ino);
-               to->di_lsn = cpu_to_be64(from->di_lsn);
+               to->di_lsn = cpu_to_be64(lsn);
                memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
                uuid_copy(&to->di_uuid, &from->di_uuid);
                to->di_flushiter = 0;
@@ -261,16 +262,25 @@ xlog_recover_inode_commit_pass2(
        }
 
        /*
-        * If the inode has an LSN in it, recover the inode only if it's less
-        * than the lsn of the transaction we are replaying. Note: we still
-        * need to replay an owner change even though the inode is more recent
-        * than the transaction as there is no guarantee that all the btree
-        * blocks are more recent than this transaction, too.
+        * If the inode has an LSN in it, recover the inode only if the on-disk
+        * inode's LSN is older than the lsn of the transaction we are
+        * replaying. We can have multiple checkpoints with the same start LSN,
+        * so the current LSN being equal to the on-disk LSN doesn't necessarily
+        * mean that the on-disk inode is more recent than the change being
+        * replayed.
+        *
+        * We must check the current_lsn against the on-disk inode
+        * here because the we can't trust the log dinode to contain a valid LSN
+        * (see comment below before replaying the log dinode for details).
+        *
+        * Note: we still need to replay an owner change even though the inode
+        * is more recent than the transaction as there is no guarantee that all
+        * the btree blocks are more recent than this transaction, too.
         */
        if (dip->di_version >= 3) {
                xfs_lsn_t       lsn = be64_to_cpu(dip->di_lsn);
 
-               if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
+               if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) > 0) {
                        trace_xfs_log_recover_inode_skip(log, in_f);
                        error = 0;
                        goto out_owner_change;
@@ -368,8 +378,17 @@ xlog_recover_inode_commit_pass2(
                goto out_release;
        }
 
-       /* recover the log dinode inode into the on disk inode */
-       xfs_log_dinode_to_disk(ldip, dip);
+       /*
+        * Recover the log dinode inode into the on disk inode.
+        *
+        * The LSN in the log dinode is garbage - it can be zero or reflect
+        * stale in-memory runtime state that isn't coherent with the changes
+        * logged in this transaction or the changes written to the on-disk
+        * inode.  Hence we write the current lSN into the inode because that
+        * matches what xfs_iflush() would write inode the inode when flushing
+        * the changes in this transaction.
+        */
+       xfs_log_dinode_to_disk(ldip, dip, current_lsn);
 
        fields = in_f->ilf_fields;
        if (fields & XFS_ILOG_DEV)
index 65270e6..16039ea 100644 (file)
@@ -1065,7 +1065,24 @@ xfs_fill_fsxattr(
 
        fileattr_fill_xflags(fa, xfs_ip2xflags(ip));
 
-       fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
+       if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) {
+               fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
+       } else if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
+               /*
+                * Don't let a misaligned extent size hint on a directory
+                * escape to userspace if it won't pass the setattr checks
+                * later.
+                */
+               if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
+                   ip->i_extsize % mp->m_sb.sb_rextsize > 0) {
+                       fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE |
+                                           FS_XFLAG_EXTSZINHERIT);
+                       fa->fsx_extsize = 0;
+               } else {
+                       fa->fsx_extsize = XFS_FSB_TO_B(mp, ip->i_extsize);
+               }
+       }
+
        if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
                fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize);
        fa->fsx_projid = ip->i_projid;
@@ -1292,10 +1309,10 @@ xfs_ioctl_setattr_check_extsize(
        new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags);
 
        /*
-        * Inode verifiers on older kernels don't check that the extent size
-        * hint is an integer multiple of the rt extent size on a directory
-        * with both rtinherit and extszinherit flags set.  Don't let sysadmins
-        * misconfigure directories.
+        * Inode verifiers do not check that the extent size hint is an integer
+        * multiple of the rt extent size on a directory with both rtinherit
+        * and extszinherit flags set.  Don't let sysadmins misconfigure
+        * directories.
         */
        if ((new_diflags & XFS_DIFLAG_RTINHERIT) &&
            (new_diflags & XFS_DIFLAG_EXTSZINHERIT)) {
index 36fa265..60ac5fd 100644 (file)
@@ -78,13 +78,12 @@ xlog_verify_iclog(
 STATIC void
 xlog_verify_tail_lsn(
        struct xlog             *log,
-       struct xlog_in_core     *iclog,
-       xfs_lsn_t               tail_lsn);
+       struct xlog_in_core     *iclog);
 #else
 #define xlog_verify_dest_ptr(a,b)
 #define xlog_verify_grant_tail(a)
 #define xlog_verify_iclog(a,b,c)
-#define xlog_verify_tail_lsn(a,b,c)
+#define xlog_verify_tail_lsn(a,b)
 #endif
 
 STATIC int
@@ -487,51 +486,80 @@ out_error:
        return error;
 }
 
-static bool
-__xlog_state_release_iclog(
-       struct xlog             *log,
-       struct xlog_in_core     *iclog)
-{
-       lockdep_assert_held(&log->l_icloglock);
-
-       if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
-               /* update tail before writing to iclog */
-               xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
-
-               iclog->ic_state = XLOG_STATE_SYNCING;
-               iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
-               xlog_verify_tail_lsn(log, iclog, tail_lsn);
-               /* cycle incremented when incrementing curr_block */
-               trace_xlog_iclog_syncing(iclog, _RET_IP_);
-               return true;
-       }
-
-       ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
-       return false;
-}
-
 /*
  * Flush iclog to disk if this is the last reference to the given iclog and the
  * it is in the WANT_SYNC state.
+ *
+ * If the caller passes in a non-zero @old_tail_lsn and the current log tail
+ * does not match, there may be metadata on disk that must be persisted before
+ * this iclog is written.  To satisfy that requirement, set the
+ * XLOG_ICL_NEED_FLUSH flag as a condition for writing this iclog with the new
+ * log tail value.
+ *
+ * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
+ * log tail is updated correctly. NEED_FUA indicates that the iclog will be
+ * written to stable storage, and implies that a commit record is contained
+ * within the iclog. We need to ensure that the log tail does not move beyond
+ * the tail that the first commit record in the iclog ordered against, otherwise
+ * correct recovery of that checkpoint becomes dependent on future operations
+ * performed on this iclog.
+ *
+ * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
+ * current tail into iclog. Once the iclog tail is set, future operations must
+ * not modify it, otherwise they potentially violate ordering constraints for
+ * the checkpoint commit that wrote the initial tail lsn value. The tail lsn in
+ * the iclog will get zeroed on activation of the iclog after sync, so we
+ * always capture the tail lsn on the iclog on the first NEED_FUA release
+ * regardless of the number of active reference counts on this iclog.
  */
+
 int
 xlog_state_release_iclog(
        struct xlog             *log,
-       struct xlog_in_core     *iclog)
+       struct xlog_in_core     *iclog,
+       xfs_lsn_t               old_tail_lsn)
 {
+       xfs_lsn_t               tail_lsn;
        lockdep_assert_held(&log->l_icloglock);
 
        trace_xlog_iclog_release(iclog, _RET_IP_);
        if (iclog->ic_state == XLOG_STATE_IOERROR)
                return -EIO;
 
-       if (atomic_dec_and_test(&iclog->ic_refcnt) &&
-           __xlog_state_release_iclog(log, iclog)) {
-               spin_unlock(&log->l_icloglock);
-               xlog_sync(log, iclog);
-               spin_lock(&log->l_icloglock);
+       /*
+        * Grabbing the current log tail needs to be atomic w.r.t. the writing
+        * of the tail LSN into the iclog so we guarantee that the log tail does
+        * not move between deciding if a cache flush is required and writing
+        * the LSN into the iclog below.
+        */
+       if (old_tail_lsn || iclog->ic_state == XLOG_STATE_WANT_SYNC) {
+               tail_lsn = xlog_assign_tail_lsn(log->l_mp);
+
+               if (old_tail_lsn && tail_lsn != old_tail_lsn)
+                       iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
+
+               if ((iclog->ic_flags & XLOG_ICL_NEED_FUA) &&
+                   !iclog->ic_header.h_tail_lsn)
+                       iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
        }
 
+       if (!atomic_dec_and_test(&iclog->ic_refcnt))
+               return 0;
+
+       if (iclog->ic_state != XLOG_STATE_WANT_SYNC) {
+               ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
+               return 0;
+       }
+
+       iclog->ic_state = XLOG_STATE_SYNCING;
+       if (!iclog->ic_header.h_tail_lsn)
+               iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
+       xlog_verify_tail_lsn(log, iclog);
+       trace_xlog_iclog_syncing(iclog, _RET_IP_);
+
+       spin_unlock(&log->l_icloglock);
+       xlog_sync(log, iclog);
+       spin_lock(&log->l_icloglock);
        return 0;
 }
 
@@ -774,6 +802,21 @@ xfs_log_mount_cancel(
 }
 
 /*
+ * Flush out the iclog to disk ensuring that device caches are flushed and
+ * the iclog hits stable storage before any completion waiters are woken.
+ */
+static inline int
+xlog_force_iclog(
+       struct xlog_in_core     *iclog)
+{
+       atomic_inc(&iclog->ic_refcnt);
+       iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
+       if (iclog->ic_state == XLOG_STATE_ACTIVE)
+               xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
+       return xlog_state_release_iclog(iclog->ic_log, iclog, 0);
+}
+
+/*
  * Wait for the iclog and all prior iclogs to be written disk as required by the
  * log force state machine. Waiting on ic_force_wait ensures iclog completions
  * have been ordered and callbacks run before we are woken here, hence
@@ -827,13 +870,6 @@ xlog_write_unmount_record(
        /* account for space used by record data */
        ticket->t_curr_res -= sizeof(ulf);
 
-       /*
-        * For external log devices, we need to flush the data device cache
-        * first to ensure all metadata writeback is on stable storage before we
-        * stamp the tail LSN into the unmount record.
-        */
-       if (log->l_targ != log->l_mp->m_ddev_targp)
-               blkdev_issue_flush(log->l_targ->bt_bdev);
        return xlog_write(log, &vec, ticket, NULL, NULL, XLOG_UNMOUNT_TRANS);
 }
 
@@ -865,18 +901,7 @@ out_err:
 
        spin_lock(&log->l_icloglock);
        iclog = log->l_iclog;
-       atomic_inc(&iclog->ic_refcnt);
-       if (iclog->ic_state == XLOG_STATE_ACTIVE)
-               xlog_state_switch_iclogs(log, iclog, 0);
-       else
-               ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
-                      iclog->ic_state == XLOG_STATE_IOERROR);
-       /*
-        * Ensure the journal is fully flushed and on stable storage once the
-        * iclog containing the unmount record is written.
-        */
-       iclog->ic_flags |= (XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
-       error = xlog_state_release_iclog(log, iclog);
+       error = xlog_force_iclog(iclog);
        xlog_wait_on_iclog(iclog);
 
        if (tic) {
@@ -1796,10 +1821,20 @@ xlog_write_iclog(
         * metadata writeback and causing priority inversions.
         */
        iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE;
-       if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH)
+       if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
                iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
+               /*
+                * For external log devices, we also need to flush the data
+                * device cache first to ensure all metadata writeback covered
+                * by the LSN in this iclog is on stable storage. This is slow,
+                * but it *must* complete before we issue the external log IO.
+                */
+               if (log->l_targ != log->l_mp->m_ddev_targp)
+                       blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev);
+       }
        if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
                iclog->ic_bio.bi_opf |= REQ_FUA;
+
        iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
 
        if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
@@ -2310,7 +2345,7 @@ xlog_write_copy_finish(
        return 0;
 
 release_iclog:
-       error = xlog_state_release_iclog(log, iclog);
+       error = xlog_state_release_iclog(log, iclog, 0);
        spin_unlock(&log->l_icloglock);
        return error;
 }
@@ -2529,7 +2564,7 @@ next_lv:
                ASSERT(optype & XLOG_COMMIT_TRANS);
                *commit_iclog = iclog;
        } else {
-               error = xlog_state_release_iclog(log, iclog);
+               error = xlog_state_release_iclog(log, iclog, 0);
        }
        spin_unlock(&log->l_icloglock);
 
@@ -2567,6 +2602,7 @@ xlog_state_activate_iclog(
        memset(iclog->ic_header.h_cycle_data, 0,
                sizeof(iclog->ic_header.h_cycle_data));
        iclog->ic_header.h_lsn = 0;
+       iclog->ic_header.h_tail_lsn = 0;
 }
 
 /*
@@ -2967,7 +3003,7 @@ restart:
                 * reference to the iclog.
                 */
                if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
-                       error = xlog_state_release_iclog(log, iclog);
+                       error = xlog_state_release_iclog(log, iclog, 0);
                spin_unlock(&log->l_icloglock);
                if (error)
                        return error;
@@ -3132,6 +3168,35 @@ xlog_state_switch_iclogs(
 }
 
 /*
+ * Force the iclog to disk and check if the iclog has been completed before
+ * xlog_force_iclog() returns. This can happen on synchronous (e.g.
+ * pmem) or fast async storage because we drop the icloglock to issue the IO.
+ * If completion has already occurred, tell the caller so that it can avoid an
+ * unnecessary wait on the iclog.
+ */
+static int
+xlog_force_and_check_iclog(
+       struct xlog_in_core     *iclog,
+       bool                    *completed)
+{
+       xfs_lsn_t               lsn = be64_to_cpu(iclog->ic_header.h_lsn);
+       int                     error;
+
+       *completed = false;
+       error = xlog_force_iclog(iclog);
+       if (error)
+               return error;
+
+       /*
+        * If the iclog has already been completed and reused the header LSN
+        * will have been rewritten by completion
+        */
+       if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
+               *completed = true;
+       return 0;
+}
+
+/*
  * Write out all data in the in-core log as of this exact moment in time.
  *
  * Data may be written to the in-core log during this call.  However,
@@ -3165,7 +3230,6 @@ xfs_log_force(
 {
        struct xlog             *log = mp->m_log;
        struct xlog_in_core     *iclog;
-       xfs_lsn_t               lsn;
 
        XFS_STATS_INC(mp, xs_log_force);
        trace_xfs_log_force(mp, 0, _RET_IP_);
@@ -3193,39 +3257,33 @@ xfs_log_force(
                iclog = iclog->ic_prev;
        } else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
                if (atomic_read(&iclog->ic_refcnt) == 0) {
-                       /*
-                        * We are the only one with access to this iclog.
-                        *
-                        * Flush it out now.  There should be a roundoff of zero
-                        * to show that someone has already taken care of the
-                        * roundoff from the previous sync.
-                        */
-                       atomic_inc(&iclog->ic_refcnt);
-                       lsn = be64_to_cpu(iclog->ic_header.h_lsn);
-                       xlog_state_switch_iclogs(log, iclog, 0);
-                       if (xlog_state_release_iclog(log, iclog))
+                       /* We have exclusive access to this iclog. */
+                       bool    completed;
+
+                       if (xlog_force_and_check_iclog(iclog, &completed))
                                goto out_error;
 
-                       if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
+                       if (completed)
                                goto out_unlock;
                } else {
                        /*
-                        * Someone else is writing to this iclog.
-                        *
-                        * Use its call to flush out the data.  However, the
-                        * other thread may not force out this LR, so we mark
-                        * it WANT_SYNC.
+                        * Someone else is still writing to this iclog, so we
+                        * need to ensure that when they release the iclog it
+                        * gets synced immediately as we may be waiting on it.
                         */
                        xlog_state_switch_iclogs(log, iclog, 0);
                }
-       } else {
-               /*
-                * If the head iclog is not active nor dirty, we just attach
-                * ourselves to the head and go to sleep if necessary.
-                */
-               ;
        }
 
+       /*
+        * The iclog we are about to wait on may contain the checkpoint pushed
+        * by the above xlog_cil_force() call, but it may not have been pushed
+        * to disk yet. Like the ACTIVE case above, we need to make sure caches
+        * are flushed when this iclog is written.
+        */
+       if (iclog->ic_state == XLOG_STATE_WANT_SYNC)
+               iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
+
        if (flags & XFS_LOG_SYNC)
                return xlog_wait_on_iclog(iclog);
 out_unlock:
@@ -3245,6 +3303,7 @@ xlog_force_lsn(
        bool                    already_slept)
 {
        struct xlog_in_core     *iclog;
+       bool                    completed;
 
        spin_lock(&log->l_icloglock);
        iclog = log->l_iclog;
@@ -3258,7 +3317,8 @@ xlog_force_lsn(
                        goto out_unlock;
        }
 
-       if (iclog->ic_state == XLOG_STATE_ACTIVE) {
+       switch (iclog->ic_state) {
+       case XLOG_STATE_ACTIVE:
                /*
                 * We sleep here if we haven't already slept (e.g. this is the
                 * first time we've looked at the correct iclog buf) and the
@@ -3281,12 +3341,31 @@ xlog_force_lsn(
                                        &log->l_icloglock);
                        return -EAGAIN;
                }
-               atomic_inc(&iclog->ic_refcnt);
-               xlog_state_switch_iclogs(log, iclog, 0);
-               if (xlog_state_release_iclog(log, iclog))
+               if (xlog_force_and_check_iclog(iclog, &completed))
                        goto out_error;
                if (log_flushed)
                        *log_flushed = 1;
+               if (completed)
+                       goto out_unlock;
+               break;
+       case XLOG_STATE_WANT_SYNC:
+               /*
+                * This iclog may contain the checkpoint pushed by the
+                * xlog_cil_force_seq() call, but there are other writers still
+                * accessing it so it hasn't been pushed to disk yet. Like the
+                * ACTIVE case above, we need to make sure caches are flushed
+                * when this iclog is written.
+                */
+               iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
+               break;
+       default:
+               /*
+                * The entire checkpoint was written by the CIL force and is on
+                * its way to disk already. It will be stable when it
+                * completes, so we don't need to manipulate caches here at all.
+                * We just need to wait for completion if necessary.
+                */
+               break;
        }
 
        if (flags & XFS_LOG_SYNC)
@@ -3559,10 +3638,10 @@ xlog_verify_grant_tail(
 STATIC void
 xlog_verify_tail_lsn(
        struct xlog             *log,
-       struct xlog_in_core     *iclog,
-       xfs_lsn_t               tail_lsn)
+       struct xlog_in_core     *iclog)
 {
-    int blocks;
+       xfs_lsn_t       tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn);
+       int             blocks;
 
     if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
        blocks =
index b128aaa..4c44bc3 100644 (file)
@@ -654,8 +654,9 @@ xlog_cil_push_work(
        struct xfs_trans_header thdr;
        struct xfs_log_iovec    lhdr;
        struct xfs_log_vec      lvhdr = { NULL };
+       xfs_lsn_t               preflush_tail_lsn;
        xfs_lsn_t               commit_lsn;
-       xfs_lsn_t               push_seq;
+       xfs_csn_t               push_seq;
        struct bio              bio;
        DECLARE_COMPLETION_ONSTACK(bdev_flush);
 
@@ -730,7 +731,15 @@ xlog_cil_push_work(
         * because we hold the flush lock exclusively. Hence we can now issue
         * a cache flush to ensure all the completed metadata in the journal we
         * are about to overwrite is on stable storage.
+        *
+        * Because we are issuing this cache flush before we've written the
+        * tail lsn to the iclog, we can have metadata IO completions move the
+        * tail forwards between the completion of this flush and the iclog
+        * being written. In this case, we need to re-issue the cache flush
+        * before the iclog write. To detect whether the log tail moves, sample
+        * the tail LSN *before* we issue the flush.
         */
+       preflush_tail_lsn = atomic64_read(&log->l_tail_lsn);
        xfs_flush_bdev_async(&bio, log->l_mp->m_ddev_targp->bt_bdev,
                                &bdev_flush);
 
@@ -941,7 +950,7 @@ restart:
         * storage.
         */
        commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
-       xlog_state_release_iclog(log, commit_iclog);
+       xlog_state_release_iclog(log, commit_iclog, preflush_tail_lsn);
        spin_unlock(&log->l_icloglock);
        return;
 
index 4c41bbf..f3e79a4 100644 (file)
@@ -59,6 +59,16 @@ enum xlog_iclog_state {
        { XLOG_STATE_DIRTY,     "XLOG_STATE_DIRTY" }, \
        { XLOG_STATE_IOERROR,   "XLOG_STATE_IOERROR" }
 
+/*
+ * In core log flags
+ */
+#define XLOG_ICL_NEED_FLUSH    (1 << 0)        /* iclog needs REQ_PREFLUSH */
+#define XLOG_ICL_NEED_FUA      (1 << 1)        /* iclog needs REQ_FUA */
+
+#define XLOG_ICL_STRINGS \
+       { XLOG_ICL_NEED_FLUSH,  "XLOG_ICL_NEED_FLUSH" }, \
+       { XLOG_ICL_NEED_FUA,    "XLOG_ICL_NEED_FUA" }
+
 
 /*
  * Log ticket flags
@@ -143,9 +153,6 @@ enum xlog_iclog_state {
 
 #define XLOG_COVER_OPS         5
 
-#define XLOG_ICL_NEED_FLUSH    (1 << 0)        /* iclog needs REQ_PREFLUSH */
-#define XLOG_ICL_NEED_FUA      (1 << 1)        /* iclog needs REQ_FUA */
-
 /* Ticket reservation region accounting */ 
 #define XLOG_TIC_LEN_MAX       15
 
@@ -497,7 +504,8 @@ int xlog_commit_record(struct xlog *log, struct xlog_ticket *ticket,
 void   xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
 void   xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
 
-int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog);
+int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog,
+               xfs_lsn_t log_tail_lsn);
 
 /*
  * When we crack an atomic LSN, we sample it first so that the value will not
index 4e7be6b..699066f 100644 (file)
@@ -923,16 +923,41 @@ xfs_growfs_rt(
        uint8_t         *rsum_cache;    /* old summary cache */
 
        sbp = &mp->m_sb;
-       /*
-        * Initial error checking.
-        */
+
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
-       if (mp->m_rtdev_targp == NULL || mp->m_rbmip == NULL ||
-           (nrblocks = in->newblocks) <= sbp->sb_rblocks ||
-           (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize)))
+
+       /* Needs to have been mounted with an rt device. */
+       if (!XFS_IS_REALTIME_MOUNT(mp))
+               return -EINVAL;
+       /*
+        * Mount should fail if the rt bitmap/summary files don't load, but
+        * we'll check anyway.
+        */
+       if (!mp->m_rbmip || !mp->m_rsumip)
+               return -EINVAL;
+
+       /* Shrink not supported. */
+       if (in->newblocks <= sbp->sb_rblocks)
+               return -EINVAL;
+
+       /* Can only change rt extent size when adding rt volume. */
+       if (sbp->sb_rblocks > 0 && in->extsize != sbp->sb_rextsize)
+               return -EINVAL;
+
+       /* Range check the extent size. */
+       if (XFS_FSB_TO_B(mp, in->extsize) > XFS_MAX_RTEXTSIZE ||
+           XFS_FSB_TO_B(mp, in->extsize) < XFS_MIN_RTEXTSIZE)
                return -EINVAL;
-       if ((error = xfs_sb_validate_fsb_count(sbp, nrblocks)))
+
+       /* Unsupported realtime features. */
+       if (xfs_sb_version_hasrmapbt(&mp->m_sb) ||
+           xfs_sb_version_hasreflink(&mp->m_sb))
+               return -EOPNOTSUPP;
+
+       nrblocks = in->newblocks;
+       error = xfs_sb_validate_fsb_count(sbp, nrblocks);
+       if (error)
                return error;
        /*
         * Read in the last block of the device, make sure it exists.
@@ -996,7 +1021,8 @@ xfs_growfs_rt(
                     ((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0);
             bmbno < nrbmblocks;
             bmbno++) {
-               xfs_trans_t     *tp;
+               struct xfs_trans        *tp;
+               xfs_rfsblock_t          nrblocks_step;
 
                *nmp = *mp;
                nsbp = &nmp->m_sb;
@@ -1005,10 +1031,9 @@ xfs_growfs_rt(
                 */
                nsbp->sb_rextsize = in->extsize;
                nsbp->sb_rbmblocks = bmbno + 1;
-               nsbp->sb_rblocks =
-                       XFS_RTMIN(nrblocks,
-                                 nsbp->sb_rbmblocks * NBBY *
-                                 nsbp->sb_blocksize * nsbp->sb_rextsize);
+               nrblocks_step = (bmbno + 1) * NBBY * nsbp->sb_blocksize *
+                               nsbp->sb_rextsize;
+               nsbp->sb_rblocks = min(nrblocks, nrblocks_step);
                nsbp->sb_rextents = nsbp->sb_rblocks;
                do_div(nsbp->sb_rextents, nsbp->sb_rextsize);
                ASSERT(nsbp->sb_rextents != 0);
index f9d8d60..1926029 100644 (file)
@@ -3944,6 +3944,7 @@ DECLARE_EVENT_CLASS(xlog_iclog_class,
                __field(uint32_t, state)
                __field(int32_t, refcount)
                __field(uint32_t, offset)
+               __field(uint32_t, flags)
                __field(unsigned long long, lsn)
                __field(unsigned long, caller_ip)
        ),
@@ -3952,15 +3953,17 @@ DECLARE_EVENT_CLASS(xlog_iclog_class,
                __entry->state = iclog->ic_state;
                __entry->refcount = atomic_read(&iclog->ic_refcnt);
                __entry->offset = iclog->ic_offset;
+               __entry->flags = iclog->ic_flags;
                __entry->lsn = be64_to_cpu(iclog->ic_header.h_lsn);
                __entry->caller_ip = caller_ip;
        ),
-       TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx caller %pS",
+       TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx flags %s caller %pS",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __print_symbolic(__entry->state, XLOG_STATE_STRINGS),
                  __entry->refcount,
                  __entry->offset,
                  __entry->lsn,
+                 __print_flags(__entry->flags, "|", XLOG_ICL_STRINGS),
                  (char *)__entry->caller_ip)
 
 );
index dbf0363..70055d4 100644 (file)
@@ -705,9 +705,6 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
                return 0;
 
        bio = bio_alloc(GFP_NOFS, nr_pages);
-       if (!bio)
-               return -ENOMEM;
-
        bio_set_dev(bio, bdev);
        bio->bi_iter.bi_sector = zi->i_zsector;
        bio->bi_write_hint = iocb->ki_hint;
index 1ae993f..13d9337 100644 (file)
@@ -707,11 +707,6 @@ acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv);
  * @hrv: Hardware Revision of the device, pass -1 to not check _HRV
  *
  * The caller is responsible for invoking acpi_dev_put() on the returned device.
- *
- * FIXME: Due to above requirement there is a window that may invalidate @adev
- * and next iteration will use a dangling pointer, e.g. in the case of a
- * hotplug event. That said, the caller should ensure that this will never
- * happen.
  */
 #define for_each_acpi_dev_match(adev, hid, uid, hrv)                   \
        for (adev = acpi_dev_get_first_match_dev(hid, uid, hrv);        \
@@ -725,7 +720,8 @@ static inline struct acpi_device *acpi_dev_get(struct acpi_device *adev)
 
 static inline void acpi_dev_put(struct acpi_device *adev)
 {
-       put_device(&adev->dev);
+       if (adev)
+               put_device(&adev->dev);
 }
 
 struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle);
index 10100a4..afb27cb 100644 (file)
@@ -68,6 +68,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
                               unsigned long arg);
 
 #define DRM_IOCTL_NR(n)                _IOC_NR(n)
+#define DRM_IOCTL_TYPE(n)              _IOC_TYPE(n)
 #define DRM_MAJOR       226
 
 /**
index 1d89865..0728ad0 100644 (file)
 #define R9A07G044_OSCCLK               21
 
 /* R9A07G044 Module Clocks */
-#define R9A07G044_CLK_GIC600           0
-#define R9A07G044_CLK_IA55             1
-#define R9A07G044_CLK_SYC              2
-#define R9A07G044_CLK_DMAC             3
-#define R9A07G044_CLK_SYSC             4
-#define R9A07G044_CLK_MTU              5
-#define R9A07G044_CLK_GPT              6
-#define R9A07G044_CLK_ETH0             7
-#define R9A07G044_CLK_ETH1             8
-#define R9A07G044_CLK_I2C0             9
-#define R9A07G044_CLK_I2C1             10
-#define R9A07G044_CLK_I2C2             11
-#define R9A07G044_CLK_I2C3             12
-#define R9A07G044_CLK_SCIF0            13
-#define R9A07G044_CLK_SCIF1            14
-#define R9A07G044_CLK_SCIF2            15
-#define R9A07G044_CLK_SCIF3            16
-#define R9A07G044_CLK_SCIF4            17
-#define R9A07G044_CLK_SCI0             18
-#define R9A07G044_CLK_SCI1             19
-#define R9A07G044_CLK_GPIO             20
-#define R9A07G044_CLK_SDHI0            21
-#define R9A07G044_CLK_SDHI1            22
-#define R9A07G044_CLK_USB0             23
-#define R9A07G044_CLK_USB1             24
-#define R9A07G044_CLK_CANFD            25
-#define R9A07G044_CLK_SSI0             26
-#define R9A07G044_CLK_SSI1             27
-#define R9A07G044_CLK_SSI2             28
-#define R9A07G044_CLK_SSI3             29
-#define R9A07G044_CLK_MHU              30
-#define R9A07G044_CLK_OSTM0            31
-#define R9A07G044_CLK_OSTM1            32
-#define R9A07G044_CLK_OSTM2            33
-#define R9A07G044_CLK_WDT0             34
-#define R9A07G044_CLK_WDT1             35
-#define R9A07G044_CLK_WDT2             36
-#define R9A07G044_CLK_WDT_PON          37
-#define R9A07G044_CLK_GPU              38
-#define R9A07G044_CLK_ISU              39
-#define R9A07G044_CLK_H264             40
-#define R9A07G044_CLK_CRU              41
-#define R9A07G044_CLK_MIPI_DSI         42
-#define R9A07G044_CLK_LCDC             43
-#define R9A07G044_CLK_SRC              44
-#define R9A07G044_CLK_RSPI0            45
-#define R9A07G044_CLK_RSPI1            46
-#define R9A07G044_CLK_RSPI2            47
-#define R9A07G044_CLK_ADC              48
-#define R9A07G044_CLK_TSU_PCLK         49
-#define R9A07G044_CLK_SPI              50
-#define R9A07G044_CLK_MIPI_DSI_V       51
-#define R9A07G044_CLK_MIPI_DSI_PIN     52
+#define R9A07G044_CA55_SCLK            0
+#define R9A07G044_CA55_PCLK            1
+#define R9A07G044_CA55_ATCLK           2
+#define R9A07G044_CA55_GICCLK          3
+#define R9A07G044_CA55_PERICLK         4
+#define R9A07G044_CA55_ACLK            5
+#define R9A07G044_CA55_TSCLK           6
+#define R9A07G044_GIC600_GICCLK                7
+#define R9A07G044_IA55_CLK             8
+#define R9A07G044_IA55_PCLK            9
+#define R9A07G044_MHU_PCLK             10
+#define R9A07G044_SYC_CNT_CLK          11
+#define R9A07G044_DMAC_ACLK            12
+#define R9A07G044_DMAC_PCLK            13
+#define R9A07G044_OSTM0_PCLK           14
+#define R9A07G044_OSTM1_PCLK           15
+#define R9A07G044_OSTM2_PCLK           16
+#define R9A07G044_MTU_X_MCK_MTU3       17
+#define R9A07G044_POE3_CLKM_POE                18
+#define R9A07G044_GPT_PCLK             19
+#define R9A07G044_POEG_A_CLKP          20
+#define R9A07G044_POEG_B_CLKP          21
+#define R9A07G044_POEG_C_CLKP          22
+#define R9A07G044_POEG_D_CLKP          23
+#define R9A07G044_WDT0_PCLK            24
+#define R9A07G044_WDT0_CLK             25
+#define R9A07G044_WDT1_PCLK            26
+#define R9A07G044_WDT1_CLK             27
+#define R9A07G044_WDT2_PCLK            28
+#define R9A07G044_WDT2_CLK             29
+#define R9A07G044_SPI_CLK2             30
+#define R9A07G044_SPI_CLK              31
+#define R9A07G044_SDHI0_IMCLK          32
+#define R9A07G044_SDHI0_IMCLK2         33
+#define R9A07G044_SDHI0_CLK_HS         34
+#define R9A07G044_SDHI0_ACLK           35
+#define R9A07G044_SDHI1_IMCLK          36
+#define R9A07G044_SDHI1_IMCLK2         37
+#define R9A07G044_SDHI1_CLK_HS         38
+#define R9A07G044_SDHI1_ACLK           39
+#define R9A07G044_GPU_CLK              40
+#define R9A07G044_GPU_AXI_CLK          41
+#define R9A07G044_GPU_ACE_CLK          42
+#define R9A07G044_ISU_ACLK             43
+#define R9A07G044_ISU_PCLK             44
+#define R9A07G044_H264_CLK_A           45
+#define R9A07G044_H264_CLK_P           46
+#define R9A07G044_CRU_SYSCLK           47
+#define R9A07G044_CRU_VCLK             48
+#define R9A07G044_CRU_PCLK             49
+#define R9A07G044_CRU_ACLK             50
+#define R9A07G044_MIPI_DSI_PLLCLK      51
+#define R9A07G044_MIPI_DSI_SYSCLK      52
+#define R9A07G044_MIPI_DSI_ACLK                53
+#define R9A07G044_MIPI_DSI_PCLK                54
+#define R9A07G044_MIPI_DSI_VCLK                55
+#define R9A07G044_MIPI_DSI_LPCLK       56
+#define R9A07G044_LCDC_CLK_A           57
+#define R9A07G044_LCDC_CLK_P           58
+#define R9A07G044_LCDC_CLK_D           59
+#define R9A07G044_SSI0_PCLK2           60
+#define R9A07G044_SSI0_PCLK_SFR                61
+#define R9A07G044_SSI1_PCLK2           62
+#define R9A07G044_SSI1_PCLK_SFR                63
+#define R9A07G044_SSI2_PCLK2           64
+#define R9A07G044_SSI2_PCLK_SFR                65
+#define R9A07G044_SSI3_PCLK2           66
+#define R9A07G044_SSI3_PCLK_SFR                67
+#define R9A07G044_SRC_CLKP             68
+#define R9A07G044_USB_U2H0_HCLK                69
+#define R9A07G044_USB_U2H1_HCLK                70
+#define R9A07G044_USB_U2P_EXR_CPUCLK   71
+#define R9A07G044_USB_PCLK             72
+#define R9A07G044_ETH0_CLK_AXI         73
+#define R9A07G044_ETH0_CLK_CHI         74
+#define R9A07G044_ETH1_CLK_AXI         75
+#define R9A07G044_ETH1_CLK_CHI         76
+#define R9A07G044_I2C0_PCLK            77
+#define R9A07G044_I2C1_PCLK            78
+#define R9A07G044_I2C2_PCLK            79
+#define R9A07G044_I2C3_PCLK            80
+#define R9A07G044_SCIF0_CLK_PCK                81
+#define R9A07G044_SCIF1_CLK_PCK                82
+#define R9A07G044_SCIF2_CLK_PCK                83
+#define R9A07G044_SCIF3_CLK_PCK                84
+#define R9A07G044_SCIF4_CLK_PCK                85
+#define R9A07G044_SCI0_CLKP            86
+#define R9A07G044_SCI1_CLKP            87
+#define R9A07G044_IRDA_CLKP            88
+#define R9A07G044_RSPI0_CLKB           89
+#define R9A07G044_RSPI1_CLKB           90
+#define R9A07G044_RSPI2_CLKB           91
+#define R9A07G044_CANFD_PCLK           92
+#define R9A07G044_GPIO_HCLK            93
+#define R9A07G044_ADC_ADCLK            94
+#define R9A07G044_ADC_PCLK             95
+#define R9A07G044_TSU_PCLK             96
+
+/* R9A07G044 Resets */
+#define R9A07G044_CA55_RST_1_0         0
+#define R9A07G044_CA55_RST_1_1         1
+#define R9A07G044_CA55_RST_3_0         2
+#define R9A07G044_CA55_RST_3_1         3
+#define R9A07G044_CA55_RST_4           4
+#define R9A07G044_CA55_RST_5           5
+#define R9A07G044_CA55_RST_6           6
+#define R9A07G044_CA55_RST_7           7
+#define R9A07G044_CA55_RST_8           8
+#define R9A07G044_CA55_RST_9           9
+#define R9A07G044_CA55_RST_10          10
+#define R9A07G044_CA55_RST_11          11
+#define R9A07G044_CA55_RST_12          12
+#define R9A07G044_GIC600_GICRESET_N    13
+#define R9A07G044_GIC600_DBG_GICRESET_N        14
+#define R9A07G044_IA55_RESETN          15
+#define R9A07G044_MHU_RESETN           16
+#define R9A07G044_DMAC_ARESETN         17
+#define R9A07G044_DMAC_RST_ASYNC       18
+#define R9A07G044_SYC_RESETN           19
+#define R9A07G044_OSTM0_PRESETZ                20
+#define R9A07G044_OSTM1_PRESETZ                21
+#define R9A07G044_OSTM2_PRESETZ                22
+#define R9A07G044_MTU_X_PRESET_MTU3    23
+#define R9A07G044_POE3_RST_M_REG       24
+#define R9A07G044_GPT_RST_C            25
+#define R9A07G044_POEG_A_RST           26
+#define R9A07G044_POEG_B_RST           27
+#define R9A07G044_POEG_C_RST           28
+#define R9A07G044_POEG_D_RST           29
+#define R9A07G044_WDT0_PRESETN         30
+#define R9A07G044_WDT1_PRESETN         31
+#define R9A07G044_WDT2_PRESETN         32
+#define R9A07G044_SPI_RST              33
+#define R9A07G044_SDHI0_IXRST          34
+#define R9A07G044_SDHI1_IXRST          35
+#define R9A07G044_GPU_RESETN           36
+#define R9A07G044_GPU_AXI_RESETN       37
+#define R9A07G044_GPU_ACE_RESETN       38
+#define R9A07G044_ISU_ARESETN          39
+#define R9A07G044_ISU_PRESETN          40
+#define R9A07G044_H264_X_RESET_VCP     41
+#define R9A07G044_H264_CP_PRESET_P     42
+#define R9A07G044_CRU_CMN_RSTB         43
+#define R9A07G044_CRU_PRESETN          44
+#define R9A07G044_CRU_ARESETN          45
+#define R9A07G044_MIPI_DSI_CMN_RSTB    46
+#define R9A07G044_MIPI_DSI_ARESET_N    47
+#define R9A07G044_MIPI_DSI_PRESET_N    48
+#define R9A07G044_LCDC_RESET_N         49
+#define R9A07G044_SSI0_RST_M2_REG      50
+#define R9A07G044_SSI1_RST_M2_REG      51
+#define R9A07G044_SSI2_RST_M2_REG      52
+#define R9A07G044_SSI3_RST_M2_REG      53
+#define R9A07G044_SRC_RST              54
+#define R9A07G044_USB_U2H0_HRESETN     55
+#define R9A07G044_USB_U2H1_HRESETN     56
+#define R9A07G044_USB_U2P_EXL_SYSRST   57
+#define R9A07G044_USB_PRESETN          58
+#define R9A07G044_ETH0_RST_HW_N                59
+#define R9A07G044_ETH1_RST_HW_N                60
+#define R9A07G044_I2C0_MRST            61
+#define R9A07G044_I2C1_MRST            62
+#define R9A07G044_I2C2_MRST            63
+#define R9A07G044_I2C3_MRST            64
+#define R9A07G044_SCIF0_RST_SYSTEM_N   65
+#define R9A07G044_SCIF1_RST_SYSTEM_N   66
+#define R9A07G044_SCIF2_RST_SYSTEM_N   67
+#define R9A07G044_SCIF3_RST_SYSTEM_N   68
+#define R9A07G044_SCIF4_RST_SYSTEM_N   69
+#define R9A07G044_SCI0_RST             70
+#define R9A07G044_SCI1_RST             71
+#define R9A07G044_IRDA_RST             72
+#define R9A07G044_RSPI0_RST            73
+#define R9A07G044_RSPI1_RST            74
+#define R9A07G044_RSPI2_RST            75
+#define R9A07G044_CANFD_RSTP_N         76
+#define R9A07G044_CANFD_RSTC_N         77
+#define R9A07G044_GPIO_RSTN            78
+#define R9A07G044_GPIO_PORT_RESETN     79
+#define R9A07G044_GPIO_SPARE_RESETN    80
+#define R9A07G044_ADC_PRESETN          81
+#define R9A07G044_ADC_ADRST_N          82
+#define R9A07G044_TSU_PRESETN          83
 
 #endif /* __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ */
index 3177181..d3afea4 100644 (file)
@@ -57,7 +57,7 @@ struct blk_keyslot_manager;
  * Maximum number of blkcg policies allowed to be registered concurrently.
  * Defined here to simplify include dependency.
  */
-#define BLKCG_MAX_POLS         5
+#define BLKCG_MAX_POLS         6
 
 typedef void (rq_end_io_fn)(struct request *, blk_status_t);
 
index f309fc1..e8e2b03 100644 (file)
@@ -780,6 +780,7 @@ struct bpf_jit_poke_descriptor {
        void *tailcall_target;
        void *tailcall_bypass;
        void *bypass_addr;
+       void *aux;
        union {
                struct {
                        struct bpf_map *map;
index a9db1ea..ae3ac3a 100644 (file)
@@ -134,4 +134,5 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup)
 BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
 #ifdef CONFIG_NET
 BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
+BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
 #endif
index e774ecc..828d08a 100644 (file)
@@ -340,8 +340,8 @@ struct bpf_insn_aux_data {
        };
        u64 map_key_state; /* constant (32 bit) key tracking for maps */
        int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
-       int sanitize_stack_off; /* stack slot to be cleared */
        u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
+       bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
        bool zext_dst; /* this insn zero extends dst reg */
        u8 alu_state; /* used in combination with alu_limit */
 
@@ -414,6 +414,7 @@ struct bpf_verifier_env {
        u32 used_map_cnt;               /* number of used maps */
        u32 used_btf_cnt;               /* number of used BTF objects */
        u32 id_gen;                     /* used to generate unique reg IDs */
+       bool explore_alu_limits;
        bool allow_ptr_leaks;
        bool allow_uninit_stack;
        bool allow_ptr_to_map_access;
index 29dbb60..232daae 100644 (file)
@@ -758,6 +758,16 @@ ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
                              enum ethtool_link_mode_bit_indices link_mode);
 
 /**
+ * ethtool_get_phc_vclocks - Derive phc vclocks information, and caller
+ *                           is responsible to free memory of vclock_index
+ * @dev: pointer to net_device structure
+ * @vclock_index: pointer to pointer of vclock index
+ *
+ * Return number of phc vclocks
+ */
+int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index);
+
+/**
  * ethtool_sprintf - Write formatted string to ethtool string data
  * @data: Pointer to start of string to update
  * @fmt: Format of string to write
index 472f970..83b8960 100644 (file)
@@ -73,6 +73,11 @@ struct ctl_table_header;
 /* unused opcode to mark call to interpreter with arguments */
 #define BPF_CALL_ARGS  0xe0
 
+/* unused opcode to mark speculation barrier for mitigating
+ * Speculative Store Bypass
+ */
+#define BPF_NOSPEC     0xc0
+
 /* As per nm, we expose JITed images as text (code) section for
  * kallsyms. That way, tools like perf can find it to match
  * addresses.
@@ -390,6 +395,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
                .off   = 0,                                     \
                .imm   = 0 })
 
+/* Speculation barrier */
+
+#define BPF_ST_NOSPEC()                                                \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ST | BPF_NOSPEC,                   \
+               .dst_reg = 0,                                   \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = 0 })
+
 /* Internal classic blocks for direct assignment */
 
 #define __BPF_STMT(CODE, K)                                    \
index 37e1e8f..6b54982 100644 (file)
@@ -139,6 +139,9 @@ extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
 extern int generic_parse_monolithic(struct fs_context *fc, void *data);
 extern int vfs_get_tree(struct fs_context *fc);
 extern void put_fs_context(struct fs_context *fc);
+extern int vfs_parse_fs_param_source(struct fs_context *fc,
+                                    struct fs_parameter *param);
+extern void fc_drop_locked(struct fs_context *fc);
 
 /*
  * sget() wrappers to be called from the ->get_tree() op.
index 8c6e8e9..d9a606a 100644 (file)
@@ -318,14 +318,16 @@ static inline void memcpy_to_page(struct page *page, size_t offset,
 
        VM_BUG_ON(offset + len > PAGE_SIZE);
        memcpy(to + offset, from, len);
+       flush_dcache_page(page);
        kunmap_local(to);
 }
 
 static inline void memzero_page(struct page *page, size_t offset, size_t len)
 {
-       char *addr = kmap_atomic(page);
+       char *addr = kmap_local_page(page);
        memset(addr + offset, 0, len);
-       kunmap_atomic(addr);
+       flush_dcache_page(page);
+       kunmap_local(addr);
 }
 
 #endif /* _LINUX_HIGHMEM_H */
index 25e2b4e..aee8ff4 100644 (file)
@@ -81,6 +81,8 @@ int ishtp_register_event_cb(struct ishtp_cl_device *device,
 
 /* Get the device * from ishtp device instance */
 struct device *ishtp_device(struct ishtp_cl_device *cl_device);
+/* wait for IPC resume */
+bool ishtp_wait_resume(struct ishtp_device *dev);
 /* Trace interface for clients */
 ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device);
 /* Get device pointer of PCI device for DMA acces */
index 5310e21..dd874a1 100644 (file)
@@ -3,6 +3,7 @@
 #define _LINUX_KASAN_H
 
 #include <linux/bug.h>
+#include <linux/kernel.h>
 #include <linux/static_key.h>
 #include <linux/types.h>
 
index acee44b..0f06c22 100644 (file)
 #define MARVELL_PHY_ID_88E1545         0x01410ea0
 #define MARVELL_PHY_ID_88E1548P                0x01410ec0
 #define MARVELL_PHY_ID_88E3016         0x01410e60
+#define MARVELL_PHY_ID_88X3310         0x002b09a0
 #define MARVELL_PHY_ID_88E2110         0x002b09b0
 #define MARVELL_PHY_ID_88X2222         0x01410f10
 
-/* PHY IDs and mask for Alaska 10G PHYs */
-#define MARVELL_PHY_ID_88X33X0_MASK    0xfffffff8
-#define MARVELL_PHY_ID_88X3310         0x002b09a0
-#define MARVELL_PHY_ID_88X3340         0x002b09a8
-
 /* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
 #define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
 
index cbf46f5..4a53c3c 100644 (file)
@@ -209,7 +209,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
  */
 #define for_each_mem_range(i, p_start, p_end) \
        __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,   \
-                            MEMBLOCK_NONE, p_start, p_end, NULL)
+                            MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
 
 /**
  * for_each_mem_range_rev - reverse iterate through memblock areas from
@@ -220,7 +220,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
  */
 #define for_each_mem_range_rev(i, p_start, p_end)                      \
        __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
-                                MEMBLOCK_NONE, p_start, p_end, NULL)
+                                MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
 
 /**
  * for_each_reserved_mem_range - iterate over all reserved memblock areas
index 2d1895c..40a0c2d 100644 (file)
@@ -200,13 +200,13 @@ enum rt5033_reg {
 #define RT5033_REGULATOR_BUCK_VOLTAGE_MIN              1000000U
 #define RT5033_REGULATOR_BUCK_VOLTAGE_MAX              3000000U
 #define RT5033_REGULATOR_BUCK_VOLTAGE_STEP             100000U
-#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM         32
+#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM         21
 
 /* RT5033 regulator LDO output voltage uV */
 #define RT5033_REGULATOR_LDO_VOLTAGE_MIN               1200000U
 #define RT5033_REGULATOR_LDO_VOLTAGE_MAX               3000000U
 #define RT5033_REGULATOR_LDO_VOLTAGE_STEP              100000U
-#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM          32
+#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM          19
 
 /* RT5033 regulator SAFE LDO output voltage uV */
 #define RT5033_REGULATOR_SAFE_LDO_VOLTAGE              4900000U
index 9b7b7cd..23dadf7 100644 (file)
@@ -51,7 +51,6 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
                                  struct page *newpage, struct page *page);
 extern int migrate_page_move_mapping(struct address_space *mapping,
                struct page *newpage, struct page *page, int extra_count);
-extern void copy_huge_page(struct page *dst, struct page *src);
 #else
 
 static inline void putback_movable_pages(struct list_head *l) {}
@@ -77,10 +76,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
 {
        return -ENOSYS;
 }
-
-static inline void copy_huge_page(struct page *dst, struct page *src)
-{
-}
 #endif /* CONFIG_MIGRATION */
 
 #ifdef CONFIG_COMPACTION
index 57453db..7ca22e6 100644 (file)
@@ -906,6 +906,7 @@ void __put_page(struct page *page);
 void put_pages_list(struct list_head *pages);
 
 void split_page(struct page *page, unsigned int order);
+void copy_huge_page(struct page *dst, struct page *src);
 
 /*
  * Compound pages have a destructor function.  Provide a
index b7bf735..0828419 100644 (file)
@@ -81,9 +81,6 @@ extern int gpmc_configure(int cmd, int wval);
 extern void gpmc_read_settings_dt(struct device_node *np,
                                  struct gpmc_settings *p);
 
-extern void omap3_gpmc_save_context(void);
-extern void omap3_gpmc_restore_context(void);
-
 struct gpmc_timings;
 struct omap_nand_platform_data;
 struct omap_onenand_platform_data;
index d147480..e24d2c9 100644 (file)
@@ -1397,34 +1397,10 @@ static inline int p4d_clear_huge(p4d_t *p4d)
 }
 #endif /* !__PAGETABLE_P4D_FOLDED */
 
-#ifndef __PAGETABLE_PUD_FOLDED
 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
-int pud_clear_huge(pud_t *pud);
-#else
-static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
-{
-       return 0;
-}
-static inline int pud_clear_huge(pud_t *pud)
-{
-       return 0;
-}
-#endif /* !__PAGETABLE_PUD_FOLDED */
-
-#ifndef __PAGETABLE_PMD_FOLDED
 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
+int pud_clear_huge(pud_t *pud);
 int pmd_clear_huge(pmd_t *pmd);
-#else
-static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
-{
-       return 0;
-}
-static inline int pmd_clear_huge(pmd_t *pmd)
-{
-       return 0;
-}
-#endif /* !__PAGETABLE_PMD_FOLDED */
-
 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
 int pud_free_pmd_page(pud_t *pud, unsigned long addr);
 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
index 601ba97..e60fa41 100644 (file)
@@ -14,8 +14,8 @@ struct ixp4xx_pata_data {
        volatile u32    *cs1_cfg;
        unsigned long   cs0_bits;
        unsigned long   cs1_bits;
-       void __iomem    *cs0;
-       void __iomem    *cs1;
+       void __iomem    *cmd;
+       void __iomem    *ctl;
 };
 
 #endif
index 971c926..167b9b0 100644 (file)
@@ -155,6 +155,7 @@ struct omap_sr {
        struct voltagedomain            *voltdm;
        struct dentry                   *dbg_dir;
        unsigned int                    irq;
+       struct clk                      *fck;
        int                             srid;
        int                             ip_type;
        int                             nvalue_count;
@@ -169,6 +170,7 @@ struct omap_sr {
        u32                             senp_mod;
        u32                             senn_mod;
        void __iomem                    *base;
+       unsigned long                   enabled:1;
 };
 
 /**
index aba237c..71fac92 100644 (file)
 #include <linux/device.h>
 #include <linux/pps_kernel.h>
 #include <linux/ptp_clock.h>
+#include <linux/timecounter.h>
+#include <linux/skbuff.h>
 
+#define PTP_CLOCK_NAME_LEN     32
 /**
  * struct ptp_clock_request - request PTP clock event
  *
@@ -134,7 +137,7 @@ struct ptp_system_timestamp {
 
 struct ptp_clock_info {
        struct module *owner;
-       char name[16];
+       char name[PTP_CLOCK_NAME_LEN];
        s32 max_adj;
        int n_alarm;
        int n_ext_ts;
@@ -304,6 +307,27 @@ int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
  */
 void ptp_cancel_worker_sync(struct ptp_clock *ptp);
 
+/**
+ * ptp_get_vclocks_index() - get all vclocks index on pclock, and
+ *                           caller is responsible to free memory
+ *                           of vclock_index
+ *
+ * @pclock_index: phc index of ptp pclock.
+ * @vclock_index: pointer to pointer of vclock index.
+ *
+ * return number of vclocks.
+ */
+int ptp_get_vclocks_index(int pclock_index, int **vclock_index);
+
+/**
+ * ptp_convert_timestamp() - convert timestamp to a ptp vclock time
+ *
+ * @hwtstamps:    skb_shared_hwtstamps structure pointer
+ * @vclock_index: phc index of ptp vclock.
+ */
+void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
+                          int vclock_index);
+
 #else
 static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
                                                   struct device *parent)
@@ -323,6 +347,11 @@ static inline int ptp_schedule_worker(struct ptp_clock *ptp,
 { return -EOPNOTSUPP; }
 static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp)
 { }
+static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
+{ return 0; }
+static inline void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
+                                        int vclock_index)
+{ }
 
 #endif
 
index 83fb861..c976cc6 100644 (file)
@@ -291,7 +291,9 @@ static inline int page_referenced(struct page *page, int is_locked,
        return 0;
 }
 
-#define try_to_unmap(page, refs) false
+static inline void try_to_unmap(struct page *page, enum ttu_flags flags)
+{
+}
 
 static inline int page_mkclean(struct page *page)
 {
index 79d0a12..80e781c 100644 (file)
@@ -101,6 +101,10 @@ struct scmi_clk_proto_ops {
  *     to sustained performance level mapping
  * @est_power_get: gets the estimated power cost for a given performance domain
  *     at a given frequency
+ * @fast_switch_possible: indicates if fast DVFS switching is possible or not
+ *     for a given device
+ * @power_scale_mw_get: indicates if the power values provided are in milliWatts
+ *     or in some other (abstract) scale
  */
 struct scmi_perf_proto_ops {
        int (*limits_set)(const struct scmi_protocol_handle *ph, u32 domain,
@@ -153,7 +157,7 @@ struct scmi_power_proto_ops {
 };
 
 /**
- * scmi_sensor_reading  - represent a timestamped read
+ * struct scmi_sensor_reading  - represent a timestamped read
  *
  * Used by @reading_get_timestamped method.
  *
@@ -167,7 +171,7 @@ struct scmi_sensor_reading {
 };
 
 /**
- * scmi_range_attrs  - specifies a sensor or axis values' range
+ * struct scmi_range_attrs  - specifies a sensor or axis values' range
  * @min_range: The minimum value which can be represented by the sensor/axis.
  * @max_range: The maximum value which can be represented by the sensor/axis.
  */
@@ -177,7 +181,7 @@ struct scmi_range_attrs {
 };
 
 /**
- * scmi_sensor_axis_info  - describes one sensor axes
+ * struct scmi_sensor_axis_info  - describes one sensor axes
  * @id: The axes ID.
  * @type: Axes type. Chosen amongst one of @enum scmi_sensor_class.
  * @scale: Power-of-10 multiplier applied to the axis unit.
@@ -205,8 +209,8 @@ struct scmi_sensor_axis_info {
 };
 
 /**
- * scmi_sensor_intervals_info  - describes number and type of available update
- * intervals
+ * struct scmi_sensor_intervals_info  - describes number and type of available
+ *     update intervals
  * @segmented: Flag for segmented intervals' representation. When True there
  *            will be exactly 3 intervals in @desc, with each entry
  *            representing a member of a segment in this order:
index afbf803..d2176a5 100644 (file)
@@ -51,6 +51,14 @@ struct scpi_sensor_info {
  *     OPP is an index to the list return by @dvfs_get_info
  * @dvfs_get_info: returns the DVFS capabilities of the given power
  *     domain. It includes the OPP list and the latency information
+ * @device_domain_id: gets the scpi domain id for a given device
+ * @get_transition_latency: gets the DVFS transition latency for a given device
+ * @add_opps_to_device: adds all the OPPs for a given device
+ * @sensor_get_capability: get the list of capabilities for the sensors
+ * @sensor_get_info: get the information of the specified sensor
+ * @sensor_get_value: gets the current value of the sensor
+ * @device_get_power_state: gets the power state of a power domain
+ * @device_set_power_state: sets the power state of a power domain
  */
 struct scpi_ops {
        u32 (*get_version)(void);
index 96f3190..14ab0c0 100644 (file)
@@ -285,11 +285,45 @@ static inline struct sk_psock *sk_psock(const struct sock *sk)
        return rcu_dereference_sk_user_data(sk);
 }
 
+static inline void sk_psock_set_state(struct sk_psock *psock,
+                                     enum sk_psock_state_bits bit)
+{
+       set_bit(bit, &psock->state);
+}
+
+static inline void sk_psock_clear_state(struct sk_psock *psock,
+                                       enum sk_psock_state_bits bit)
+{
+       clear_bit(bit, &psock->state);
+}
+
+static inline bool sk_psock_test_state(const struct sk_psock *psock,
+                                      enum sk_psock_state_bits bit)
+{
+       return test_bit(bit, &psock->state);
+}
+
+static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
+{
+       sk_drops_add(sk, skb);
+       kfree_skb(skb);
+}
+
+static inline void drop_sk_msg(struct sk_psock *psock, struct sk_msg *msg)
+{
+       if (msg->skb)
+               sock_drop(psock->sk, msg->skb);
+       kfree(msg);
+}
+
 static inline void sk_psock_queue_msg(struct sk_psock *psock,
                                      struct sk_msg *msg)
 {
        spin_lock_bh(&psock->ingress_lock);
-       list_add_tail(&msg->list, &psock->ingress_msg);
+       if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
+               list_add_tail(&msg->list, &psock->ingress_msg);
+       else
+               drop_sk_msg(psock, msg);
        spin_unlock_bh(&psock->ingress_lock);
 }
 
@@ -406,24 +440,6 @@ static inline void sk_psock_restore_proto(struct sock *sk,
                psock->psock_update_sk_prot(sk, psock, true);
 }
 
-static inline void sk_psock_set_state(struct sk_psock *psock,
-                                     enum sk_psock_state_bits bit)
-{
-       set_bit(bit, &psock->state);
-}
-
-static inline void sk_psock_clear_state(struct sk_psock *psock,
-                                       enum sk_psock_state_bits bit)
-{
-       clear_bit(bit, &psock->state);
-}
-
-static inline bool sk_psock_test_state(const struct sk_psock *psock,
-                                      enum sk_psock_state_bits bit)
-{
-       return test_bit(bit, &psock->state);
-}
-
 static inline struct sk_psock *sk_psock_get(struct sock *sk)
 {
        struct sk_psock *psock;
index d5ae621..a6f03b3 100644 (file)
@@ -115,7 +115,9 @@ struct stmmac_axi {
 
 #define EST_GCL                1024
 struct stmmac_est {
+       struct mutex lock;
        int enable;
+       u32 btr_reserve[2];
        u32 btr_offset[2];
        u32 btr[2];
        u32 ctr[2];
index 143568d..4b57bbb 100644 (file)
@@ -338,7 +338,7 @@ do {                                                                             \
        FP_SET_EXCEPTION(FP_EX_INVALID | FP_EX_INVALID_ISI);                 \
        break;                                                               \
       }                                                                             \
-    /* FALLTHRU */                                                          \
+    fallthrough;                                                            \
                                                                             \
   case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL):                           \
   case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO):                                     \
index 1533573..625d9c7 100644 (file)
@@ -201,6 +201,11 @@ struct bond_up_slave {
  */
 #define BOND_LINK_NOCHANGE -1
 
+struct bond_ipsec {
+       struct list_head list;
+       struct xfrm_state *xs;
+};
+
 /*
  * Here are the locking policies for the two bonding locks:
  * Get rcu_read_lock when reading or RTNL when writing slave list.
@@ -249,7 +254,9 @@ struct bonding {
 #endif /* CONFIG_DEBUG_FS */
        struct rtnl_link_stats64 bond_stats;
 #ifdef CONFIG_XFRM_OFFLOAD
-       struct xfrm_state *xs;
+       struct list_head ipsec_list;
+       /* protecting ipsec_list */
+       spinlock_t ipsec_lock;
 #endif /* CONFIG_XFRM_OFFLOAD */
 };
 
index 73af4a6..40296ed 100644 (file)
@@ -38,7 +38,7 @@ static inline bool net_busy_loop_on(void)
 
 static inline bool sk_can_busy_loop(const struct sock *sk)
 {
-       return sk->sk_ll_usec && !signal_pending(current);
+       return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
 }
 
 bool sk_busy_loop_end(void *p, unsigned long start_time);
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
deleted file mode 100644 (file)
index 552cf68..0000000
+++ /dev/null
@@ -1,200 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Author:  Daniel Martensson / daniel.martensson@stericsson.com
- *         Dmitry.Tarnyagin  / dmitry.tarnyagin@stericsson.com
- */
-
-#ifndef CAIF_HSI_H_
-#define CAIF_HSI_H_
-
-#include <net/caif/caif_layer.h>
-#include <net/caif/caif_device.h>
-#include <linux/atomic.h>
-
-/*
- * Maximum number of CAIF frames that can reside in the same HSI frame.
- */
-#define CFHSI_MAX_PKTS 15
-
-/*
- * Maximum number of bytes used for the frame that can be embedded in the
- * HSI descriptor.
- */
-#define CFHSI_MAX_EMB_FRM_SZ 96
-
-/*
- * Decides if HSI buffers should be prefilled with 0xFF pattern for easier
- * debugging. Both TX and RX buffers will be filled before the transfer.
- */
-#define CFHSI_DBG_PREFILL              0
-
-/* Structure describing a HSI packet descriptor. */
-#pragma pack(1) /* Byte alignment. */
-struct cfhsi_desc {
-       u8 header;
-       u8 offset;
-       u16 cffrm_len[CFHSI_MAX_PKTS];
-       u8 emb_frm[CFHSI_MAX_EMB_FRM_SZ];
-};
-#pragma pack() /* Default alignment. */
-
-/* Size of the complete HSI packet descriptor. */
-#define CFHSI_DESC_SZ (sizeof(struct cfhsi_desc))
-
-/*
- * Size of the complete HSI packet descriptor excluding the optional embedded
- * CAIF frame.
- */
-#define CFHSI_DESC_SHORT_SZ (CFHSI_DESC_SZ - CFHSI_MAX_EMB_FRM_SZ)
-
-/*
- * Maximum bytes transferred in one transfer.
- */
-#define CFHSI_MAX_CAIF_FRAME_SZ 4096
-
-#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * CFHSI_MAX_CAIF_FRAME_SZ)
-
-/* Size of the complete HSI TX buffer. */
-#define CFHSI_BUF_SZ_TX (CFHSI_DESC_SZ + CFHSI_MAX_PAYLOAD_SZ)
-
-/* Size of the complete HSI RX buffer. */
-#define CFHSI_BUF_SZ_RX ((2 * CFHSI_DESC_SZ) + CFHSI_MAX_PAYLOAD_SZ)
-
-/* Bitmasks for the HSI descriptor. */
-#define CFHSI_PIGGY_DESC               (0x01 << 7)
-
-#define CFHSI_TX_STATE_IDLE                    0
-#define CFHSI_TX_STATE_XFER                    1
-
-#define CFHSI_RX_STATE_DESC                    0
-#define CFHSI_RX_STATE_PAYLOAD                 1
-
-/* Bitmasks for power management. */
-#define CFHSI_WAKE_UP                          0
-#define CFHSI_WAKE_UP_ACK                      1
-#define CFHSI_WAKE_DOWN_ACK                    2
-#define CFHSI_AWAKE                            3
-#define CFHSI_WAKELOCK_HELD                    4
-#define CFHSI_SHUTDOWN                         5
-#define CFHSI_FLUSH_FIFO                       6
-
-#ifndef CFHSI_INACTIVITY_TOUT
-#define CFHSI_INACTIVITY_TOUT                  (1 * HZ)
-#endif /* CFHSI_INACTIVITY_TOUT */
-
-#ifndef CFHSI_WAKE_TOUT
-#define CFHSI_WAKE_TOUT                        (3 * HZ)
-#endif /* CFHSI_WAKE_TOUT */
-
-#ifndef CFHSI_MAX_RX_RETRIES
-#define CFHSI_MAX_RX_RETRIES           (10 * HZ)
-#endif
-
-/* Structure implemented by the CAIF HSI driver. */
-struct cfhsi_cb_ops {
-       void (*tx_done_cb) (struct cfhsi_cb_ops *drv);
-       void (*rx_done_cb) (struct cfhsi_cb_ops *drv);
-       void (*wake_up_cb) (struct cfhsi_cb_ops *drv);
-       void (*wake_down_cb) (struct cfhsi_cb_ops *drv);
-};
-
-/* Structure implemented by HSI device. */
-struct cfhsi_ops {
-       int (*cfhsi_up) (struct cfhsi_ops *dev);
-       int (*cfhsi_down) (struct cfhsi_ops *dev);
-       int (*cfhsi_tx) (u8 *ptr, int len, struct cfhsi_ops *dev);
-       int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_ops *dev);
-       int (*cfhsi_wake_up) (struct cfhsi_ops *dev);
-       int (*cfhsi_wake_down) (struct cfhsi_ops *dev);
-       int (*cfhsi_get_peer_wake) (struct cfhsi_ops *dev, bool *status);
-       int (*cfhsi_fifo_occupancy) (struct cfhsi_ops *dev, size_t *occupancy);
-       int (*cfhsi_rx_cancel)(struct cfhsi_ops *dev);
-       struct cfhsi_cb_ops *cb_ops;
-};
-
-/* Structure holds status of received CAIF frames processing */
-struct cfhsi_rx_state {
-       int state;
-       int nfrms;
-       int pld_len;
-       int retries;
-       bool piggy_desc;
-};
-
-/* Priority mapping */
-enum {
-       CFHSI_PRIO_CTL = 0,
-       CFHSI_PRIO_VI,
-       CFHSI_PRIO_VO,
-       CFHSI_PRIO_BEBK,
-       CFHSI_PRIO_LAST,
-};
-
-struct cfhsi_config {
-       u32 inactivity_timeout;
-       u32 aggregation_timeout;
-       u32 head_align;
-       u32 tail_align;
-       u32 q_high_mark;
-       u32 q_low_mark;
-};
-
-/* Structure implemented by CAIF HSI drivers. */
-struct cfhsi {
-       struct caif_dev_common cfdev;
-       struct net_device *ndev;
-       struct platform_device *pdev;
-       struct sk_buff_head qhead[CFHSI_PRIO_LAST];
-       struct cfhsi_cb_ops cb_ops;
-       struct cfhsi_ops *ops;
-       int tx_state;
-       struct cfhsi_rx_state rx_state;
-       struct cfhsi_config cfg;
-       int rx_len;
-       u8 *rx_ptr;
-       u8 *tx_buf;
-       u8 *rx_buf;
-       u8 *rx_flip_buf;
-       spinlock_t lock;
-       int flow_off_sent;
-       struct list_head list;
-       struct work_struct wake_up_work;
-       struct work_struct wake_down_work;
-       struct work_struct out_of_sync_work;
-       struct workqueue_struct *wq;
-       wait_queue_head_t wake_up_wait;
-       wait_queue_head_t wake_down_wait;
-       wait_queue_head_t flush_fifo_wait;
-       struct timer_list inactivity_timer;
-       struct timer_list rx_slowpath_timer;
-
-       /* TX aggregation */
-       int aggregation_len;
-       struct timer_list aggregation_timer;
-
-       unsigned long bits;
-};
-extern struct platform_driver cfhsi_driver;
-
-/**
- * enum ifla_caif_hsi - CAIF HSI NetlinkRT parameters.
- * @IFLA_CAIF_HSI_INACTIVITY_TOUT: Inactivity timeout before
- *                     taking the HSI wakeline down, in milliseconds.
- * When using RT Netlink to create, destroy or configure a CAIF HSI interface,
- * enum ifla_caif_hsi is used to specify the configuration attributes.
- */
-enum ifla_caif_hsi {
-       __IFLA_CAIF_HSI_UNSPEC,
-       __IFLA_CAIF_HSI_INACTIVITY_TOUT,
-       __IFLA_CAIF_HSI_AGGREGATION_TOUT,
-       __IFLA_CAIF_HSI_HEAD_ALIGN,
-       __IFLA_CAIF_HSI_TAIL_ALIGN,
-       __IFLA_CAIF_HSI_QHIGH_WATERMARK,
-       __IFLA_CAIF_HSI_QLOW_WATERMARK,
-       __IFLA_CAIF_HSI_MAX
-};
-
-struct cfhsi_ops *cfhsi_get_ops(void);
-
-#endif         /* CAIF_HSI_H_ */
index 56cb3c3..14efa0d 100644 (file)
@@ -45,7 +45,9 @@ skb_tunnel_info(const struct sk_buff *skb)
                return &md_dst->u.tun_info;
 
        dst = skb_dst(skb);
-       if (dst && dst->lwtstate)
+       if (dst && dst->lwtstate &&
+           (dst->lwtstate->type == LWTUNNEL_ENCAP_IP ||
+            dst->lwtstate->type == LWTUNNEL_ENCAP_IP6))
                return lwt_tun_info(dst->lwtstate);
 
        return NULL;
index f14149d..625a38c 100644 (file)
@@ -263,7 +263,7 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
 int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                 int (*output)(struct net *, struct sock *, struct sk_buff *));
 
-static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
+static inline unsigned int ip6_skb_dst_mtu(struct sk_buff *skb)
 {
        int mtu;
 
index c0f0a13..49aa79c 100644 (file)
 #include <linux/if_ether.h>
 
 /* Lengths of frame formats */
-#define LLC_PDU_LEN_I  4       /* header and 2 control bytes */
-#define LLC_PDU_LEN_S  4
-#define LLC_PDU_LEN_U  3       /* header and 1 control byte */
+#define LLC_PDU_LEN_I          4       /* header and 2 control bytes */
+#define LLC_PDU_LEN_S          4
+#define LLC_PDU_LEN_U          3       /* header and 1 control byte */
+/* header and 1 control byte and XID info */
+#define LLC_PDU_LEN_U_XID      (LLC_PDU_LEN_U + sizeof(struct llc_xid_info))
 /* Known SAP addresses */
 #define LLC_GLOBAL_SAP 0xFF
 #define LLC_NULL_SAP   0x00    /* not network-layer visible */
 #define LLC_PDU_TYPE_U_MASK    0x03    /* 8-bit control field */
 #define LLC_PDU_TYPE_MASK      0x03
 
-#define LLC_PDU_TYPE_I 0       /* first bit */
-#define LLC_PDU_TYPE_S 1       /* first two bits */
-#define LLC_PDU_TYPE_U 3       /* first two bits */
+#define LLC_PDU_TYPE_I         0       /* first bit */
+#define LLC_PDU_TYPE_S         1       /* first two bits */
+#define LLC_PDU_TYPE_U         3       /* first two bits */
+#define LLC_PDU_TYPE_U_XID     4       /* private type for detecting XID commands */
 
 #define LLC_PDU_TYPE_IS_I(pdu) \
        ((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0)
@@ -230,9 +233,18 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
 static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
                                       u8 ssap, u8 dsap, u8 cr)
 {
-       const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4;
+       int hlen = 4; /* default value for I and S types */
        struct llc_pdu_un *pdu;
 
+       switch (type) {
+       case LLC_PDU_TYPE_U:
+               hlen = 3;
+               break;
+       case LLC_PDU_TYPE_U_XID:
+               hlen = 6;
+               break;
+       }
+
        skb_push(skb, hlen);
        skb_reset_network_header(skb);
        pdu = llc_pdu_un_hdr(skb);
@@ -374,7 +386,10 @@ static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb,
        xid_info->fmt_id = LLC_XID_FMT_ID;      /* 0x81 */
        xid_info->type   = svcs_supported;
        xid_info->rw     = rx_window << 1;      /* size of receive window */
-       skb_put(skb, sizeof(struct llc_xid_info));
+
+       /* no need to push/put since llc_pdu_header_init() has already
+        * pushed 3 + 3 bytes
+        */
 }
 
 /**
index cb580b0..8b5af68 100644 (file)
@@ -105,7 +105,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
 bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
                               unsigned int *size, unsigned int remaining,
                               struct mptcp_out_options *opts);
-void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
+bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
 
 void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
                         struct mptcp_out_options *opts);
@@ -227,9 +227,10 @@ static inline bool mptcp_established_options(struct sock *sk,
        return false;
 }
 
-static inline void mptcp_incoming_options(struct sock *sk,
+static inline bool mptcp_incoming_options(struct sock *sk,
                                          struct sk_buff *skb)
 {
+       return true;
 }
 
 static inline void mptcp_skb_ext_move(struct sk_buff *to,
index 09f2efe..13807ea 100644 (file)
@@ -30,7 +30,6 @@ void nf_conntrack_cleanup_net(struct net *net);
 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
 
 void nf_conntrack_proto_pernet_init(struct net *net);
-void nf_conntrack_proto_pernet_fini(struct net *net);
 
 int nf_conntrack_proto_init(void);
 void nf_conntrack_proto_fini(void);
index c3094b8..37e5300 100644 (file)
@@ -27,6 +27,7 @@ struct nf_tcp_net {
        u8 tcp_loose;
        u8 tcp_be_liberal;
        u8 tcp_max_retrans;
+       u8 tcp_ignore_invalid_rst;
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
        unsigned int offload_timeout;
        unsigned int offload_pickup;
index 265fffa..5859e0a 100644 (file)
@@ -360,8 +360,7 @@ enum {
 #define SCTP_SCOPE_POLICY_MAX  SCTP_SCOPE_POLICY_LINK
 
 /* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
- * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24,
- * 192.88.99.0/24.
+ * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
  * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
  * addresses.
  */
@@ -369,7 +368,6 @@ enum {
        ((htonl(INADDR_BROADCAST) == a) ||  \
         ipv4_is_multicast(a) ||            \
         ipv4_is_zeronet(a) ||              \
-        ipv4_is_test_198(a) ||             \
         ipv4_is_anycast_6to4(a))
 
 /* Flags used for the bind address copy functions.  */
index 32fc4a3..651bba6 100644 (file)
@@ -984,6 +984,7 @@ struct sctp_transport {
        } cacc;
 
        struct {
+               __u32 last_rtx_chunks;
                __u16 pmtu;
                __u16 probe_size;
                __u16 probe_high;
@@ -1024,8 +1025,8 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
 void sctp_transport_immediate_rtx(struct sctp_transport *);
 void sctp_transport_dst_release(struct sctp_transport *t);
 void sctp_transport_dst_confirm(struct sctp_transport *t);
-void sctp_transport_pl_send(struct sctp_transport *t);
-void sctp_transport_pl_recv(struct sctp_transport *t);
+bool sctp_transport_pl_send(struct sctp_transport *t);
+bool sctp_transport_pl_recv(struct sctp_transport *t);
 
 
 /* This is the structure we use to queue packets as they come into
index 8bdd800..f23cb25 100644 (file)
@@ -316,7 +316,9 @@ struct bpf_local_storage;
   *    @sk_timer: sock cleanup timer
   *    @sk_stamp: time stamp of last packet received
   *    @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
-  *    @sk_tsflags: SO_TIMESTAMPING socket options
+  *    @sk_tsflags: SO_TIMESTAMPING flags
+  *    @sk_bind_phc: SO_TIMESTAMPING bind PHC index of PTP virtual clock
+  *                  for timestamping
   *    @sk_tskey: counter to disambiguate concurrent tstamp requests
   *    @sk_zckey: counter to order MSG_ZEROCOPY notifications
   *    @sk_socket: Identd and reporting IO signals
@@ -493,6 +495,7 @@ struct sock {
        seqlock_t               sk_stamp_seq;
 #endif
        u16                     sk_tsflags;
+       int                     sk_bind_phc;
        u8                      sk_shutdown;
        u32                     sk_tskey;
        atomic_t                sk_zckey;
@@ -2755,7 +2758,8 @@ void sock_def_readable(struct sock *sk);
 
 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
 void sock_set_timestamp(struct sock *sk, int optname, bool valbool);
-int sock_set_timestamping(struct sock *sk, int optname, int val);
+int sock_set_timestamping(struct sock *sk, int optname,
+                         struct so_timestamping timestamping);
 
 void sock_enable_timestamps(struct sock *sk);
 void sock_no_linger(struct sock *sk);
index e668f1b..784d5c3 100644 (file)
@@ -686,6 +686,10 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
 
 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
 {
+       /* mptcp hooks are only on the slow path */
+       if (sk_is_mptcp((struct sock *)tp))
+               return;
+
        tp->pred_flags = htonl((tp->tcp_header_len << 26) |
                               ntohl(TCP_FLAG_ACK) |
                               snd_wnd);
@@ -1705,7 +1709,6 @@ struct tcp_fastopen_context {
        struct rcu_head rcu;
 };
 
-extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
 void tcp_fastopen_active_disable(struct sock *sk);
 bool tcp_fastopen_active_should_disable(struct sock *sk);
 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
index e19c250..1066b11 100644 (file)
@@ -237,14 +237,19 @@ unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc);
 
 #ifdef CONFIG_TEGRA_MC
 struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev);
+int tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev);
 #else
 static inline struct tegra_mc *
 devm_tegra_memory_controller_get(struct device *dev)
 {
        return ERR_PTR(-ENODEV);
 }
-#endif
 
-int tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev);
+static inline int
+tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev)
+{
+       return -ENODEV;
+}
+#endif
 
 #endif /* __SOC_TEGRA_MC_H__ */
index 08477d7..4338789 100644 (file)
@@ -14,6 +14,7 @@ enum tegra_suspend_mode {
        TEGRA_SUSPEND_LP1, /* CPU voltage off, DRAM self-refresh */
        TEGRA_SUSPEND_LP0, /* CPU + core voltage off, DRAM self-refresh */
        TEGRA_MAX_SUSPEND_MODE,
+       TEGRA_SUSPEND_NOT_READY,
 };
 
 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM)
@@ -28,6 +29,7 @@ void tegra_pm_clear_cpu_in_lp2(void);
 void tegra_pm_set_cpu_in_lp2(void);
 int tegra_pm_enter_lp2(void);
 int tegra_pm_park_secondary_cpu(unsigned long cpu);
+void tegra_pm_init_suspend(void);
 #else
 static inline enum tegra_suspend_mode
 tegra_pm_validate_suspend_mode(enum tegra_suspend_mode mode)
@@ -61,6 +63,10 @@ static inline int tegra_pm_park_secondary_cpu(unsigned long cpu)
 {
        return -ENOTSUPP;
 }
+
+static inline void tegra_pm_init_suspend(void)
+{
+}
 #endif /* CONFIG_PM_SLEEP */
 
 #endif /* __SOC_TEGRA_PM_H__ */
index 675849d..8e6dd8a 100644 (file)
@@ -712,6 +712,12 @@ struct snd_soc_dai_link {
        /* Do not create a PCM for this DAI link (Backend link) */
        unsigned int ignore:1;
 
+       /* This flag will reorder stop sequence. By enabling this flag
+        * DMA controller stop sequence will be invoked first followed by
+        * CPU DAI driver stop sequence
+        */
+       unsigned int stop_dma_first:1;
+
 #ifdef CONFIG_SND_SOC_TOPOLOGY
        struct snd_soc_dobj dobj; /* For topology */
 #endif
index 3ccf591..9f73ed2 100644 (file)
@@ -174,6 +174,34 @@ enum afs_vl_operation {
        afs_VL_GetCapabilities  = 65537,        /* AFS Get VL server capabilities */
 };
 
+enum afs_cm_operation {
+       afs_CB_CallBack                 = 204,  /* AFS break callback promises */
+       afs_CB_InitCallBackState        = 205,  /* AFS initialise callback state */
+       afs_CB_Probe                    = 206,  /* AFS probe client */
+       afs_CB_GetLock                  = 207,  /* AFS get contents of CM lock table */
+       afs_CB_GetCE                    = 208,  /* AFS get cache file description */
+       afs_CB_GetXStatsVersion         = 209,  /* AFS get version of extended statistics */
+       afs_CB_GetXStats                = 210,  /* AFS get contents of extended statistics data */
+       afs_CB_InitCallBackState3       = 213,  /* AFS initialise callback state, version 3 */
+       afs_CB_ProbeUuid                = 214,  /* AFS check the client hasn't rebooted */
+};
+
+enum yfs_cm_operation {
+       yfs_CB_Probe                    = 206,  /* YFS probe client */
+       yfs_CB_GetLock                  = 207,  /* YFS get contents of CM lock table */
+       yfs_CB_XStatsVersion            = 209,  /* YFS get version of extended statistics */
+       yfs_CB_GetXStats                = 210,  /* YFS get contents of extended statistics data */
+       yfs_CB_InitCallBackState3       = 213,  /* YFS initialise callback state, version 3 */
+       yfs_CB_ProbeUuid                = 214,  /* YFS check the client hasn't rebooted */
+       yfs_CB_GetServerPrefs           = 215,
+       yfs_CB_GetCellServDV            = 216,
+       yfs_CB_GetLocalCell             = 217,
+       yfs_CB_GetCacheConfig           = 218,
+       yfs_CB_GetCellByNum             = 65537,
+       yfs_CB_TellMeAboutYourself      = 65538, /* get client capabilities */
+       yfs_CB_CallBack                 = 64204,
+};
+
 enum afs_edit_dir_op {
        afs_edit_dir_create,
        afs_edit_dir_create_error,
@@ -436,6 +464,32 @@ enum afs_cb_break_reason {
        EM(afs_YFSVL_GetCellName,               "YFSVL.GetCellName") \
        E_(afs_VL_GetCapabilities,              "VL.GetCapabilities")
 
+#define afs_cm_operations \
+       EM(afs_CB_CallBack,                     "CB.CallBack") \
+       EM(afs_CB_InitCallBackState,            "CB.InitCallBackState") \
+       EM(afs_CB_Probe,                        "CB.Probe") \
+       EM(afs_CB_GetLock,                      "CB.GetLock") \
+       EM(afs_CB_GetCE,                        "CB.GetCE") \
+       EM(afs_CB_GetXStatsVersion,             "CB.GetXStatsVersion") \
+       EM(afs_CB_GetXStats,                    "CB.GetXStats") \
+       EM(afs_CB_InitCallBackState3,           "CB.InitCallBackState3") \
+       E_(afs_CB_ProbeUuid,                    "CB.ProbeUuid")
+
+#define yfs_cm_operations \
+       EM(yfs_CB_Probe,                        "YFSCB.Probe") \
+       EM(yfs_CB_GetLock,                      "YFSCB.GetLock") \
+       EM(yfs_CB_XStatsVersion,                "YFSCB.XStatsVersion") \
+       EM(yfs_CB_GetXStats,                    "YFSCB.GetXStats") \
+       EM(yfs_CB_InitCallBackState3,           "YFSCB.InitCallBackState3") \
+       EM(yfs_CB_ProbeUuid,                    "YFSCB.ProbeUuid") \
+       EM(yfs_CB_GetServerPrefs,               "YFSCB.GetServerPrefs") \
+       EM(yfs_CB_GetCellServDV,                "YFSCB.GetCellServDV") \
+       EM(yfs_CB_GetLocalCell,                 "YFSCB.GetLocalCell") \
+       EM(yfs_CB_GetCacheConfig,               "YFSCB.GetCacheConfig") \
+       EM(yfs_CB_GetCellByNum,                 "YFSCB.GetCellByNum") \
+       EM(yfs_CB_TellMeAboutYourself,          "YFSCB.TellMeAboutYourself") \
+       E_(yfs_CB_CallBack,                     "YFSCB.CallBack")
+
 #define afs_edit_dir_ops                                 \
        EM(afs_edit_dir_create,                 "create") \
        EM(afs_edit_dir_create_error,           "c_fail") \
@@ -569,6 +623,8 @@ afs_server_traces;
 afs_cell_traces;
 afs_fs_operations;
 afs_vl_operations;
+afs_cm_operations;
+yfs_cm_operations;
 afs_edit_dir_ops;
 afs_edit_dir_reasons;
 afs_eproto_causes;
@@ -649,20 +705,21 @@ TRACE_EVENT(afs_cb_call,
 
            TP_STRUCT__entry(
                    __field(unsigned int,               call            )
-                   __field(const char *,               name            )
                    __field(u32,                        op              )
+                   __field(u16,                        service_id      )
                             ),
 
            TP_fast_assign(
                    __entry->call       = call->debug_id;
-                   __entry->name       = call->type->name;
                    __entry->op         = call->operation_ID;
+                   __entry->service_id = call->service_id;
                           ),
 
-           TP_printk("c=%08x %s o=%u",
+           TP_printk("c=%08x %s",
                      __entry->call,
-                     __entry->name,
-                     __entry->op)
+                     __entry->service_id == 2501 ?
+                     __print_symbolic(__entry->op, yfs_cm_operations) :
+                     __print_symbolic(__entry->op, afs_cm_operations))
            );
 
 TRACE_EVENT(afs_call,
index 2399073..78c448c 100644 (file)
@@ -136,7 +136,7 @@ DECLARE_EVENT_CLASS(net_dev_template,
                __assign_str(name, skb->dev->name);
        ),
 
-       TP_printk("dev=%s skbaddr=%p len=%u",
+       TP_printk("dev=%s skbaddr=%px len=%u",
                __get_str(name), __entry->skbaddr, __entry->len)
 )
 
index 330d32d..c3006c6 100644 (file)
@@ -41,11 +41,37 @@ TRACE_EVENT(qdisc_dequeue,
                __entry->txq_state      = txq->state;
        ),
 
-       TP_printk("dequeue ifindex=%d qdisc handle=0x%X parent=0x%X txq_state=0x%lX packets=%d skbaddr=%p",
+       TP_printk("dequeue ifindex=%d qdisc handle=0x%X parent=0x%X txq_state=0x%lX packets=%d skbaddr=%px",
                  __entry->ifindex, __entry->handle, __entry->parent,
                  __entry->txq_state, __entry->packets, __entry->skbaddr )
 );
 
+TRACE_EVENT(qdisc_enqueue,
+
+       TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, struct sk_buff *skb),
+
+       TP_ARGS(qdisc, txq, skb),
+
+       TP_STRUCT__entry(
+               __field(struct Qdisc *, qdisc)
+               __field(void *, skbaddr)
+               __field(int, ifindex)
+               __field(u32, handle)
+               __field(u32, parent)
+       ),
+
+       TP_fast_assign(
+               __entry->qdisc = qdisc;
+               __entry->skbaddr = skb;
+               __entry->ifindex = txq->dev ? txq->dev->ifindex : 0;
+               __entry->handle  = qdisc->handle;
+               __entry->parent  = qdisc->parent;
+       ),
+
+       TP_printk("enqueue ifindex=%d qdisc handle=0x%X parent=0x%X skbaddr=%px",
+                 __entry->ifindex, __entry->handle, __entry->parent, __entry->skbaddr)
+);
+
 TRACE_EVENT(qdisc_reset,
 
        TP_PROTO(struct Qdisc *q),
index c7135c9..b3b9371 100644 (file)
@@ -46,6 +46,7 @@ enum {
        ETHTOOL_MSG_FEC_SET,
        ETHTOOL_MSG_MODULE_EEPROM_GET,
        ETHTOOL_MSG_STATS_GET,
+       ETHTOOL_MSG_PHC_VCLOCKS_GET,
 
        /* add new constants above here */
        __ETHTOOL_MSG_USER_CNT,
@@ -88,6 +89,7 @@ enum {
        ETHTOOL_MSG_FEC_NTF,
        ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY,
        ETHTOOL_MSG_STATS_GET_REPLY,
+       ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY,
 
        /* add new constants above here */
        __ETHTOOL_MSG_KERNEL_CNT,
@@ -440,6 +442,19 @@ enum {
        ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1)
 };
 
+/* PHC VCLOCKS */
+
+enum {
+       ETHTOOL_A_PHC_VCLOCKS_UNSPEC,
+       ETHTOOL_A_PHC_VCLOCKS_HEADER,                   /* nest - _A_HEADER_* */
+       ETHTOOL_A_PHC_VCLOCKS_NUM,                      /* u32 */
+       ETHTOOL_A_PHC_VCLOCKS_INDEX,                    /* array, s32 */
+
+       /* add new constants above here */
+       __ETHTOOL_A_PHC_VCLOCKS_CNT,
+       ETHTOOL_A_PHC_VCLOCKS_MAX = (__ETHTOOL_A_PHC_VCLOCKS_CNT - 1)
+};
+
 /* CABLE TEST */
 
 enum {
index e33997b..edc346a 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: LGPL-2.1 WITH Linux-syscall-note */
 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
 #ifndef _USR_IDXD_H_
 #define _USR_IDXD_H_
index 7ed0b3d..fcc61c7 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/types.h>
 #include <linux/socket.h>   /* for SO_TIMESTAMPING */
 
-/* SO_TIMESTAMPING gets an integer bit field comprised of these values */
+/* SO_TIMESTAMPING flags */
 enum {
        SOF_TIMESTAMPING_TX_HARDWARE = (1<<0),
        SOF_TIMESTAMPING_TX_SOFTWARE = (1<<1),
@@ -30,8 +30,9 @@ enum {
        SOF_TIMESTAMPING_OPT_STATS = (1<<12),
        SOF_TIMESTAMPING_OPT_PKTINFO = (1<<13),
        SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14),
+       SOF_TIMESTAMPING_BIND_PHC = (1 << 15),
 
-       SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TX_SWHW,
+       SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_BIND_PHC,
        SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
                                 SOF_TIMESTAMPING_LAST
 };
@@ -47,6 +48,18 @@ enum {
                                         SOF_TIMESTAMPING_TX_ACK)
 
 /**
+ * struct so_timestamping - SO_TIMESTAMPING parameter
+ *
+ * @flags:     SO_TIMESTAMPING flags
+ * @bind_phc:  Index of PTP virtual clock bound to sock. This is available
+ *             if flag SOF_TIMESTAMPING_BIND_PHC is set.
+ */
+struct so_timestamping {
+       int flags;
+       int bind_phc;
+};
+
+/**
  * struct hwtstamp_config - %SIOCGHWTSTAMP and %SIOCSHWTSTAMP parameter
  *
  * @flags:     no flags defined right now, must be zero for %SIOCSHWTSTAMP
index 45c8d3b..0af9c11 100644 (file)
@@ -61,7 +61,7 @@ enum nfulnl_attr_type {
        NFULA_HWTYPE,                   /* hardware type */
        NFULA_HWHEADER,                 /* hardware header */
        NFULA_HWLEN,                    /* hardware header length */
-       NFULA_CT,                       /* nf_conntrack_netlink.h */
+       NFULA_CT,                       /* nfnetlink_conntrack.h */
        NFULA_CT_INFO,                  /* enum ip_conntrack_info */
        NFULA_VLAN,                     /* nested attribute: packet vlan info */
        NFULA_L2HDR,                    /* full L2 header */
index bcb2cb5..aed90c4 100644 (file)
@@ -51,11 +51,11 @@ enum nfqnl_attr_type {
        NFQA_IFINDEX_PHYSOUTDEV,        /* __u32 ifindex */
        NFQA_HWADDR,                    /* nfqnl_msg_packet_hw */
        NFQA_PAYLOAD,                   /* opaque data payload */
-       NFQA_CT,                        /* nf_conntrack_netlink.h */
+       NFQA_CT,                        /* nfnetlink_conntrack.h */
        NFQA_CT_INFO,                   /* enum ip_conntrack_info */
        NFQA_CAP_LEN,                   /* __u32 length of captured packet */
        NFQA_SKB_INFO,                  /* __u32 skb meta information */
-       NFQA_EXP,                       /* nf_conntrack_netlink.h */
+       NFQA_EXP,                       /* nfnetlink_conntrack.h */
        NFQA_UID,                       /* __u32 sk uid */
        NFQA_GID,                       /* __u32 sk gid */
        NFQA_SECCTX,                    /* security context string */
index 70a8057..f74155f 100644 (file)
@@ -55,6 +55,7 @@
 #define VIRTIO_ID_FS                   26 /* virtio filesystem */
 #define VIRTIO_ID_PMEM                 27 /* virtio pmem */
 #define VIRTIO_ID_MAC80211_HWSIM       29 /* virtio mac80211-hwsim */
+#define VIRTIO_ID_SCMI                 32 /* virtio SCMI */
 #define VIRTIO_ID_BT                   40 /* virtio bluetooth */
 
 /*
diff --git a/include/uapi/linux/virtio_scmi.h b/include/uapi/linux/virtio_scmi.h
new file mode 100644 (file)
index 0000000..f8ddd04
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2020-2021 OpenSynergy GmbH
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#ifndef _UAPI_LINUX_VIRTIO_SCMI_H
+#define _UAPI_LINUX_VIRTIO_SCMI_H
+
+#include <linux/virtio_types.h>
+
+/* Device implements some SCMI notifications, or delayed responses. */
+#define VIRTIO_SCMI_F_P2A_CHANNELS 0
+
+/* Device implements any SCMI statistics shared memory region */
+#define VIRTIO_SCMI_F_SHARED_MEMORY 1
+
+/* Virtqueues */
+
+#define VIRTIO_SCMI_VQ_TX 0 /* cmdq */
+#define VIRTIO_SCMI_VQ_RX 1 /* eventq */
+#define VIRTIO_SCMI_VQ_MAX_CNT 2
+
+#endif /* _UAPI_LINUX_VIRTIO_SCMI_H */
index 26b638a..a7085e0 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB */
 /*
  * Copyright (c) 2006 - 2021 Intel Corporation.  All rights reserved.
  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
index bb0d6e6..55f9f77 100644 (file)
@@ -1847,7 +1847,6 @@ config SLUB_DEBUG
        default y
        bool "Enable SLUB debugging support" if EXPERT
        depends on SLUB && SYSFS
-       select STACKDEPOT if STACKTRACE_SUPPORT
        help
          SLUB has extensive debug support features. Disabling these can
          result in significant savings in code size. This also disables
index 034ad93..b1a5fc0 100644 (file)
@@ -32,6 +32,8 @@
 #include <linux/perf_event.h>
 #include <linux/extable.h>
 #include <linux/log2.h>
+
+#include <asm/barrier.h>
 #include <asm/unaligned.h>
 
 /* Registers */
@@ -1377,6 +1379,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
                /* Non-UAPI available opcodes. */
                [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
                [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
+               [BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
                [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
                [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
                [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
@@ -1621,7 +1624,21 @@ out:
        COND_JMP(s, JSGE, >=)
        COND_JMP(s, JSLE, <=)
 #undef COND_JMP
-       /* STX and ST and LDX*/
+       /* ST, STX and LDX*/
+       ST_NOSPEC:
+               /* Speculation barrier for mitigating Speculative Store Bypass.
+                * In case of arm64, we rely on the firmware mitigation as
+                * controlled via the ssbd kernel parameter. Whenever the
+                * mitigation is enabled, it works for all of the kernel code
+                * with no need to provide any additional instructions here.
+                * In case of x86, we use 'lfence' insn for mitigation. We
+                * reuse preexisting logic from Spectre v1 mitigation that
+                * happens to produce the required code on x86 for v4 as well.
+                */
+#ifdef CONFIG_X86
+               barrier_nospec();
+#endif
+               CONT;
 #define LDST(SIZEOP, SIZE)                                             \
        STX_MEM_##SIZEOP:                                               \
                *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
@@ -2236,8 +2253,14 @@ static void bpf_prog_free_deferred(struct work_struct *work)
 #endif
        if (aux->dst_trampoline)
                bpf_trampoline_put(aux->dst_trampoline);
-       for (i = 0; i < aux->func_cnt; i++)
+       for (i = 0; i < aux->func_cnt; i++) {
+               /* We can just unlink the subprog poke descriptor table as
+                * it was originally linked to the main program and is also
+                * released along with it.
+                */
+               aux->func[i]->aux->poke_tab = NULL;
                bpf_jit_free(aux->func[i]);
+       }
        if (aux->func_cnt) {
                kfree(aux->func);
                bpf_prog_unlock_free(aux->prog);
index 2546daf..fdc2089 100644 (file)
@@ -558,7 +558,8 @@ int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
 
        if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
                for (i = 0; i < map->max_entries; i++) {
-                       dst = READ_ONCE(dtab->netdev_map[i]);
+                       dst = rcu_dereference_check(dtab->netdev_map[i],
+                                                   rcu_read_lock_bh_held());
                        if (!is_valid_dst(dst, xdp, exclude_ifindex))
                                continue;
 
@@ -654,7 +655,8 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
 
        if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
                for (i = 0; i < map->max_entries; i++) {
-                       dst = READ_ONCE(dtab->netdev_map[i]);
+                       dst = rcu_dereference_check(dtab->netdev_map[i],
+                                                   rcu_read_lock_bh_held());
                        if (!dst || dst->dev->ifindex == exclude_ifindex)
                                continue;
 
index bbfc6bb..ca3cd9a 100644 (file)
@@ -206,15 +206,17 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
                        verbose(cbs->private_data, "BUG_%02x\n", insn->code);
                }
        } else if (class == BPF_ST) {
-               if (BPF_MODE(insn->code) != BPF_MEM) {
+               if (BPF_MODE(insn->code) == BPF_MEM) {
+                       verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
+                               insn->code,
+                               bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
+                               insn->dst_reg,
+                               insn->off, insn->imm);
+               } else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) {
+                       verbose(cbs->private_data, "(%02x) nospec\n", insn->code);
+               } else {
                        verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
-                       return;
                }
-               verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
-                       insn->code,
-                       bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
-                       insn->dst_reg,
-                       insn->off, insn->imm);
        } else if (class == BPF_LDX) {
                if (BPF_MODE(insn->code) != BPF_MEM) {
                        verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);
index be38bb9..f9bda54 100644 (file)
@@ -2610,6 +2610,19 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
        cur = env->cur_state->frame[env->cur_state->curframe];
        if (value_regno >= 0)
                reg = &cur->regs[value_regno];
+       if (!env->bypass_spec_v4) {
+               bool sanitize = reg && is_spillable_regtype(reg->type);
+
+               for (i = 0; i < size; i++) {
+                       if (state->stack[spi].slot_type[i] == STACK_INVALID) {
+                               sanitize = true;
+                               break;
+                       }
+               }
+
+               if (sanitize)
+                       env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
+       }
 
        if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) &&
            !register_is_null(reg) && env->bpf_capable) {
@@ -2632,47 +2645,10 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
                        verbose(env, "invalid size of register spill\n");
                        return -EACCES;
                }
-
                if (state != cur && reg->type == PTR_TO_STACK) {
                        verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
                        return -EINVAL;
                }
-
-               if (!env->bypass_spec_v4) {
-                       bool sanitize = false;
-
-                       if (state->stack[spi].slot_type[0] == STACK_SPILL &&
-                           register_is_const(&state->stack[spi].spilled_ptr))
-                               sanitize = true;
-                       for (i = 0; i < BPF_REG_SIZE; i++)
-                               if (state->stack[spi].slot_type[i] == STACK_MISC) {
-                                       sanitize = true;
-                                       break;
-                               }
-                       if (sanitize) {
-                               int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
-                               int soff = (-spi - 1) * BPF_REG_SIZE;
-
-                               /* detected reuse of integer stack slot with a pointer
-                                * which means either llvm is reusing stack slot or
-                                * an attacker is trying to exploit CVE-2018-3639
-                                * (speculative store bypass)
-                                * Have to sanitize that slot with preemptive
-                                * store of zero.
-                                */
-                               if (*poff && *poff != soff) {
-                                       /* disallow programs where single insn stores
-                                        * into two different stack slots, since verifier
-                                        * cannot sanitize them
-                                        */
-                                       verbose(env,
-                                               "insn %d cannot access two stack slots fp%d and fp%d",
-                                               insn_idx, *poff, soff);
-                                       return -EINVAL;
-                               }
-                               *poff = soff;
-                       }
-               }
                save_register_state(state, spi, reg);
        } else {
                u8 type = STACK_MISC;
@@ -3677,6 +3653,8 @@ continue_func:
        if (tail_call_reachable)
                for (j = 0; j < frame; j++)
                        subprog[ret_prog[j]].tail_call_reachable = true;
+       if (subprog[0].tail_call_reachable)
+               env->prog->aux->tail_call_reachable = true;
 
        /* end of for() loop means the last insn of the 'subprog'
         * was reached. Doesn't matter whether it was JA or EXIT
@@ -6559,6 +6537,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
                alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
                alu_state |= ptr_is_dst_reg ?
                             BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+
+               /* Limit pruning on unknown scalars to enable deep search for
+                * potential masking differences from other program paths.
+                */
+               if (!off_is_imm)
+                       env->explore_alu_limits = true;
        }
 
        err = update_alu_sanitation_state(aux, alu_state, alu_limit);
@@ -9934,8 +9918,8 @@ next:
 }
 
 /* Returns true if (rold safe implies rcur safe) */
-static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
-                   struct bpf_id_pair *idmap)
+static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
+                   struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
 {
        bool equal;
 
@@ -9961,6 +9945,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
                return false;
        switch (rold->type) {
        case SCALAR_VALUE:
+               if (env->explore_alu_limits)
+                       return false;
                if (rcur->type == SCALAR_VALUE) {
                        if (!rold->precise && !rcur->precise)
                                return true;
@@ -10051,9 +10037,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
        return false;
 }
 
-static bool stacksafe(struct bpf_func_state *old,
-                     struct bpf_func_state *cur,
-                     struct bpf_id_pair *idmap)
+static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
+                     struct bpf_func_state *cur, struct bpf_id_pair *idmap)
 {
        int i, spi;
 
@@ -10098,9 +10083,8 @@ static bool stacksafe(struct bpf_func_state *old,
                        continue;
                if (old->stack[spi].slot_type[0] != STACK_SPILL)
                        continue;
-               if (!regsafe(&old->stack[spi].spilled_ptr,
-                            &cur->stack[spi].spilled_ptr,
-                            idmap))
+               if (!regsafe(env, &old->stack[spi].spilled_ptr,
+                            &cur->stack[spi].spilled_ptr, idmap))
                        /* when explored and current stack slot are both storing
                         * spilled registers, check that stored pointers types
                         * are the same as well.
@@ -10157,10 +10141,11 @@ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_stat
 
        memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
        for (i = 0; i < MAX_BPF_REG; i++)
-               if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch))
+               if (!regsafe(env, &old->regs[i], &cur->regs[i],
+                            env->idmap_scratch))
                        return false;
 
-       if (!stacksafe(old, cur, env->idmap_scratch))
+       if (!stacksafe(env, old, cur, env->idmap_scratch))
                return false;
 
        if (!refsafe(old, cur))
@@ -11904,35 +11889,33 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
 
        for (i = 0; i < insn_cnt; i++, insn++) {
                bpf_convert_ctx_access_t convert_ctx_access;
+               bool ctx_access;
 
                if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
                    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
                    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
-                   insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
+                   insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
                        type = BPF_READ;
-               else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
-                        insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
-                        insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
-                        insn->code == (BPF_STX | BPF_MEM | BPF_DW))
+                       ctx_access = true;
+               } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
+                          insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
+                          insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
+                          insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
+                          insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
+                          insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
+                          insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
+                          insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
                        type = BPF_WRITE;
-               else
+                       ctx_access = BPF_CLASS(insn->code) == BPF_STX;
+               } else {
                        continue;
+               }
 
                if (type == BPF_WRITE &&
-                   env->insn_aux_data[i + delta].sanitize_stack_off) {
+                   env->insn_aux_data[i + delta].sanitize_stack_spill) {
                        struct bpf_insn patch[] = {
-                               /* Sanitize suspicious stack slot with zero.
-                                * There are no memory dependencies for this store,
-                                * since it's only using frame pointer and immediate
-                                * constant of zero
-                                */
-                               BPF_ST_MEM(BPF_DW, BPF_REG_FP,
-                                          env->insn_aux_data[i + delta].sanitize_stack_off,
-                                          0),
-                               /* the original STX instruction will immediately
-                                * overwrite the same stack slot with appropriate value
-                                */
                                *insn,
+                               BPF_ST_NOSPEC(),
                        };
 
                        cnt = ARRAY_SIZE(patch);
@@ -11946,6 +11929,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
                        continue;
                }
 
+               if (!ctx_access)
+                       continue;
+
                switch (env->insn_aux_data[i + delta].ptr_type) {
                case PTR_TO_CTX:
                        if (!ops->convert_ctx_access)
@@ -12121,33 +12107,19 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                        goto out_free;
                func[i]->is_func = 1;
                func[i]->aux->func_idx = i;
-               /* the btf and func_info will be freed only at prog->aux */
+               /* Below members will be freed only at prog->aux */
                func[i]->aux->btf = prog->aux->btf;
                func[i]->aux->func_info = prog->aux->func_info;
+               func[i]->aux->poke_tab = prog->aux->poke_tab;
+               func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
 
                for (j = 0; j < prog->aux->size_poke_tab; j++) {
-                       u32 insn_idx = prog->aux->poke_tab[j].insn_idx;
-                       int ret;
+                       struct bpf_jit_poke_descriptor *poke;
 
-                       if (!(insn_idx >= subprog_start &&
-                             insn_idx <= subprog_end))
-                               continue;
-
-                       ret = bpf_jit_add_poke_descriptor(func[i],
-                                                         &prog->aux->poke_tab[j]);
-                       if (ret < 0) {
-                               verbose(env, "adding tail call poke descriptor failed\n");
-                               goto out_free;
-                       }
-
-                       func[i]->insnsi[insn_idx - subprog_start].imm = ret + 1;
-
-                       map_ptr = func[i]->aux->poke_tab[ret].tail_call.map;
-                       ret = map_ptr->ops->map_poke_track(map_ptr, func[i]->aux);
-                       if (ret < 0) {
-                               verbose(env, "tracking tail call prog failed\n");
-                               goto out_free;
-                       }
+                       poke = &prog->aux->poke_tab[j];
+                       if (poke->insn_idx < subprog_end &&
+                           poke->insn_idx >= subprog_start)
+                               poke->aux = func[i]->aux;
                }
 
                /* Use bpf_prog_F_tag to indicate functions in stack traces.
@@ -12178,18 +12150,6 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                cond_resched();
        }
 
-       /* Untrack main program's aux structs so that during map_poke_run()
-        * we will not stumble upon the unfilled poke descriptors; each
-        * of the main program's poke descs got distributed across subprogs
-        * and got tracked onto map, so we are sure that none of them will
-        * be missed after the operation below
-        */
-       for (i = 0; i < prog->aux->size_poke_tab; i++) {
-               map_ptr = prog->aux->poke_tab[i].tail_call.map;
-
-               map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
-       }
-
        /* at this point all bpf functions were successfully JITed
         * now populate all bpf_calls with correct addresses and
         * run last pass of JIT
@@ -12267,14 +12227,22 @@ static int jit_subprogs(struct bpf_verifier_env *env)
        bpf_prog_jit_attempt_done(prog);
        return 0;
 out_free:
+       /* We failed JIT'ing, so at this point we need to unregister poke
+        * descriptors from subprogs, so that kernel is not attempting to
+        * patch it anymore as we're freeing the subprog JIT memory.
+        */
+       for (i = 0; i < prog->aux->size_poke_tab; i++) {
+               map_ptr = prog->aux->poke_tab[i].tail_call.map;
+               map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
+       }
+       /* At this point we're guaranteed that poke descriptors are not
+        * live anymore. We can just unlink its descriptor table as it's
+        * released with the main prog.
+        */
        for (i = 0; i < env->subprog_cnt; i++) {
                if (!func[i])
                        continue;
-
-               for (j = 0; j < func[i]->aux->size_poke_tab; j++) {
-                       map_ptr = func[i]->aux->poke_tab[j].tail_call.map;
-                       map_ptr->ops->map_poke_untrack(map_ptr, func[i]->aux);
-               }
+               func[i]->aux->poke_tab = NULL;
                bpf_jit_free(func[i]);
        }
        kfree(func);
@@ -12768,37 +12736,6 @@ static void free_states(struct bpf_verifier_env *env)
        }
 }
 
-/* The verifier is using insn_aux_data[] to store temporary data during
- * verification and to store information for passes that run after the
- * verification like dead code sanitization. do_check_common() for subprogram N
- * may analyze many other subprograms. sanitize_insn_aux_data() clears all
- * temporary data after do_check_common() finds that subprogram N cannot be
- * verified independently. pass_cnt counts the number of times
- * do_check_common() was run and insn->aux->seen tells the pass number
- * insn_aux_data was touched. These variables are compared to clear temporary
- * data from failed pass. For testing and experiments do_check_common() can be
- * run multiple times even when prior attempt to verify is unsuccessful.
- *
- * Note that special handling is needed on !env->bypass_spec_v1 if this is
- * ever called outside of error path with subsequent program rejection.
- */
-static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
-{
-       struct bpf_insn *insn = env->prog->insnsi;
-       struct bpf_insn_aux_data *aux;
-       int i, class;
-
-       for (i = 0; i < env->prog->len; i++) {
-               class = BPF_CLASS(insn[i].code);
-               if (class != BPF_LDX && class != BPF_STX)
-                       continue;
-               aux = &env->insn_aux_data[i];
-               if (aux->seen != env->pass_cnt)
-                       continue;
-               memset(aux, 0, offsetof(typeof(*aux), orig_idx));
-       }
-}
-
 static int do_check_common(struct bpf_verifier_env *env, int subprog)
 {
        bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
@@ -12875,9 +12812,6 @@ out:
        if (!ret && pop_log)
                bpf_vlog_reset(&env->log, 0);
        free_states(env);
-       if (ret)
-               /* clean aux data in case subprog was rejected */
-               sanitize_insn_aux_data(env);
        return ret;
 }
 
index ee93b6e..de2c432 100644 (file)
@@ -911,13 +911,11 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
 
        opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
        if (opt == -ENOPARAM) {
-               if (strcmp(param->key, "source") == 0) {
-                       if (fc->source)
-                               return invalf(fc, "Multiple sources not supported");
-                       fc->source = param->string;
-                       param->string = NULL;
-                       return 0;
-               }
+               int ret;
+
+               ret = vfs_parse_fs_param_source(fc, param);
+               if (ret != -ENOPARAM)
+                       return ret;
                for_each_subsys(ss, i) {
                        if (strcmp(param->key, ss->legacy_name))
                                continue;
@@ -1223,9 +1221,7 @@ int cgroup1_get_tree(struct fs_context *fc)
                ret = cgroup_do_get_tree(fc);
 
        if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
-               struct super_block *sb = fc->root->d_sb;
-               dput(fc->root);
-               deactivate_locked_super(sb);
+               fc_drop_locked(fc);
                ret = 1;
        }
 
index 8372897..b6f28fa 100644 (file)
@@ -1045,8 +1045,8 @@ int gdb_serial_stub(struct kgdb_state *ks)
                                gdb_cmd_detachkill(ks);
                                return DBG_PASS_EVENT;
                        }
-#endif
                        fallthrough;
+#endif
                case 'C': /* Exception passing */
                        tmp = gdb_cmd_exception_pass(ks);
                        if (tmp > 0)
index 910ae69..af4a6ef 100644 (file)
@@ -5,6 +5,13 @@
  */
 #include <linux/dma-map-ops.h>
 
+static struct page *dma_common_vaddr_to_page(void *cpu_addr)
+{
+       if (is_vmalloc_addr(cpu_addr))
+               return vmalloc_to_page(cpu_addr);
+       return virt_to_page(cpu_addr);
+}
+
 /*
  * Create scatter-list for the already allocated DMA buffer.
  */
@@ -12,7 +19,7 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
                 unsigned long attrs)
 {
-       struct page *page = virt_to_page(cpu_addr);
+       struct page *page = dma_common_vaddr_to_page(cpu_addr);
        int ret;
 
        ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
@@ -32,6 +39,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
        unsigned long user_count = vma_pages(vma);
        unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        unsigned long off = vma->vm_pgoff;
+       struct page *page = dma_common_vaddr_to_page(cpu_addr);
        int ret = -ENXIO;
 
        vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
@@ -43,7 +51,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
                return -ENXIO;
 
        return remap_pfn_range(vma, vma->vm_start,
-                       page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
+                       page_to_pfn(page) + vma->vm_pgoff,
                        user_count << PAGE_SHIFT, vma->vm_page_prot);
 #else
        return -ENXIO;
index 313d454..d998a76 100644 (file)
@@ -487,13 +487,13 @@ ref_scale_reader(void *arg)
        s64 duration;
 
        VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me);
-       set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
+       WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)));
        set_user_nice(current, MAX_NICE);
        atomic_inc(&n_init);
        if (holdoff)
                schedule_timeout_interruptible(holdoff * HZ);
 repeat:
-       VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, smp_processor_id());
+       VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, raw_smp_processor_id());
 
        // Wait for signal that this reader can start.
        wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) ||
@@ -503,7 +503,7 @@ repeat:
                goto end;
 
        // Make sure that the CPU is affinitized appropriately during testing.
-       WARN_ON_ONCE(smp_processor_id() != me);
+       WARN_ON_ONCE(raw_smp_processor_id() != me);
 
        WRITE_ONCE(rt->start_reader, 0);
        if (!atomic_dec_return(&n_started))
index 03a118d..8536c55 100644 (file)
@@ -953,10 +953,9 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
                in_qs = likely(!t->trc_reader_nesting);
        }
 
-       // Mark as checked.  Because this is called from the grace-period
-       // kthread, also remove the task from the holdout list.
+       // Mark as checked so that the grace-period kthread will
+       // remove it from the holdout list.
        t->trc_reader_checked = true;
-       trc_del_holdout(t);
 
        if (in_qs)
                return true;  // Already in quiescent state, done!!!
@@ -983,7 +982,6 @@ static void trc_wait_for_one_reader(struct task_struct *t,
        // The current task had better be in a quiescent state.
        if (t == current) {
                t->trc_reader_checked = true;
-               trc_del_holdout(t);
                WARN_ON_ONCE(t->trc_reader_nesting);
                return;
        }
index 3f937b2..6c76988 100644 (file)
@@ -795,9 +795,9 @@ void show_rcu_gp_kthreads(void)
        jr = j - data_race(rcu_state.gp_req_activity);
        js = j - data_race(rcu_state.gp_start);
        jw = j - data_race(rcu_state.gp_wake_time);
-       pr_info("%s: wait state: %s(%d) ->state: %#lx ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
+       pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
                rcu_state.name, gp_state_getname(rcu_state.gp_state),
-               rcu_state.gp_state, t ? t->__state : 0x1ffffL, t ? t->rt_priority : 0xffU,
+               rcu_state.gp_state, t ? t->__state : 0x1ffff, t ? t->rt_priority : 0xffU,
                js, ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq),
                (long)data_race(rcu_state.gp_seq),
                (long)data_race(rcu_get_root()->gp_seq_needed),
index 2377cbb..29e8fc5 100644 (file)
@@ -405,15 +405,15 @@ static int scftorture_invoker(void *arg)
 
        VERBOSE_SCFTORTOUT("scftorture_invoker %d: task started", scfp->cpu);
        cpu = scfp->cpu % nr_cpu_ids;
-       set_cpus_allowed_ptr(current, cpumask_of(cpu));
+       WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(cpu)));
        set_user_nice(current, MAX_NICE);
        if (holdoff)
                schedule_timeout_interruptible(holdoff * HZ);
 
-       VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, smp_processor_id());
+       VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, raw_smp_processor_id());
 
        // Make sure that the CPU is affinitized appropriately during testing.
-       curcpu = smp_processor_id();
+       curcpu = raw_smp_processor_id();
        WARN_ONCE(curcpu != scfp->cpu % nr_cpu_ids,
                  "%s: Wanted CPU %d, running on %d, nr_cpu_ids = %d\n",
                  __func__, scfp->cpu, curcpu, nr_cpu_ids);
index e416304..cf6acab 100644 (file)
@@ -47,7 +47,7 @@ void __init idle_thread_set_boot_cpu(void)
  *
  * Creates the thread if it does not exist.
  */
-static inline void idle_init(unsigned int cpu)
+static __always_inline void idle_init(unsigned int cpu)
 {
        struct task_struct *tsk = per_cpu(idle_threads, cpu);
 
index 29a5e54..517be7f 100644 (file)
@@ -991,6 +991,11 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
        if (!p)
                goto out;
 
+       /* Protect timer list r/w in arm_timer() */
+       sighand = lock_task_sighand(p, &flags);
+       if (unlikely(sighand == NULL))
+               goto out;
+
        /*
         * Fetch the current sample and update the timer's expiry time.
         */
@@ -1001,11 +1006,6 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
 
        bump_cpu_timer(timer, now);
 
-       /* Protect timer list r/w in arm_timer() */
-       sighand = lock_task_sighand(p, &flags);
-       if (unlikely(sighand == NULL))
-               goto out;
-
        /*
         * Now re-arm for the new expiry time.
         */
index 3fadb58..9eb11c2 100644 (file)
@@ -207,6 +207,7 @@ struct timer_base {
        unsigned int            cpu;
        bool                    next_expiry_recalc;
        bool                    is_idle;
+       bool                    timers_pending;
        DECLARE_BITMAP(pending_map, WHEEL_SIZE);
        struct hlist_head       vectors[WHEEL_SIZE];
 } ____cacheline_aligned;
@@ -595,6 +596,7 @@ static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
                 * can reevaluate the wheel:
                 */
                base->next_expiry = bucket_expiry;
+               base->timers_pending = true;
                base->next_expiry_recalc = false;
                trigger_dyntick_cpu(base, timer);
        }
@@ -1582,6 +1584,7 @@ static unsigned long __next_timer_interrupt(struct timer_base *base)
        }
 
        base->next_expiry_recalc = false;
+       base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA);
 
        return next;
 }
@@ -1633,7 +1636,6 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
        struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
        u64 expires = KTIME_MAX;
        unsigned long nextevt;
-       bool is_max_delta;
 
        /*
         * Pretend that there is no timer pending if the cpu is offline.
@@ -1646,7 +1648,6 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
        if (base->next_expiry_recalc)
                base->next_expiry = __next_timer_interrupt(base);
        nextevt = base->next_expiry;
-       is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
 
        /*
         * We have a fresh next event. Check whether we can forward the
@@ -1664,7 +1665,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
                expires = basem;
                base->is_idle = false;
        } else {
-               if (!is_max_delta)
+               if (base->timers_pending)
                        expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
                /*
                 * If we expect to sleep more than a tick, mark the base idle.
@@ -1947,6 +1948,7 @@ int timers_prepare_cpu(unsigned int cpu)
                base = per_cpu_ptr(&timer_bases[b], cpu);
                base->clk = jiffies;
                base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
+               base->timers_pending = false;
                base->is_idle = false;
        }
        return 0;
index e6fb3e6..7b180f6 100644 (file)
@@ -5985,7 +5985,8 @@ ftrace_graph_release(struct inode *inode, struct file *file)
                 * infrastructure to do the synchronization, thus we must do it
                 * ourselves.
                 */
-               synchronize_rcu_tasks_rude();
+               if (old_hash != EMPTY_HASH)
+                       synchronize_rcu_tasks_rude();
 
                free_ftrace_hash(old_hash);
        }
@@ -7544,7 +7545,7 @@ int ftrace_is_dead(void)
  */
 int register_ftrace_function(struct ftrace_ops *ops)
 {
-       int ret = -1;
+       int ret;
 
        ftrace_ops_init(ops);
 
index d1463ea..e592d1d 100644 (file)
@@ -3880,10 +3880,30 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
        if (unlikely(!head))
                return true;
 
-       return reader->read == rb_page_commit(reader) &&
-               (commit == reader ||
-                (commit == head &&
-                 head->read == rb_page_commit(commit)));
+       /* Reader should exhaust content in reader page */
+       if (reader->read != rb_page_commit(reader))
+               return false;
+
+       /*
+        * If writers are committing on the reader page, knowing all
+        * committed content has been read, the ring buffer is empty.
+        */
+       if (commit == reader)
+               return true;
+
+       /*
+        * If writers are committing on a page other than reader page
+        * and head page, there should always be content to read.
+        */
+       if (commit != head)
+               return false;
+
+       /*
+        * Writers are committing on the head page, we just need
+        * to care about there're committed data, and the reader will
+        * swap reader page with head page when it is to read data.
+        */
+       return rb_page_commit(commit) == 0;
 }
 
 /**
index f8b80b5..c59dd35 100644 (file)
@@ -5609,6 +5609,10 @@ static const char readme_msg[] =
        "\t            [:name=histname1]\n"
        "\t            [:<handler>.<action>]\n"
        "\t            [if <filter>]\n\n"
+       "\t    Note, special fields can be used as well:\n"
+       "\t            common_timestamp - to record current timestamp\n"
+       "\t            common_cpu - to record the CPU the event happened on\n"
+       "\n"
        "\t    When a matching event is hit, an entry is added to a hash\n"
        "\t    table using the key(s) and value(s) named, and the value of a\n"
        "\t    sum called 'hitcount' is incremented.  Keys and values\n"
index 0207aee..34325f4 100644 (file)
@@ -1111,7 +1111,7 @@ static const char *hist_field_name(struct hist_field *field,
                 field->flags & HIST_FIELD_FL_ALIAS)
                field_name = hist_field_name(field->operands[0], ++level);
        else if (field->flags & HIST_FIELD_FL_CPU)
-               field_name = "cpu";
+               field_name = "common_cpu";
        else if (field->flags & HIST_FIELD_FL_EXPR ||
                 field->flags & HIST_FIELD_FL_VAR_REF) {
                if (field->system) {
@@ -1689,7 +1689,9 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
        if (WARN_ON_ONCE(!field))
                goto out;
 
-       if (is_string_field(field)) {
+       /* Pointers to strings are just pointers and dangerous to dereference */
+       if (is_string_field(field) &&
+           (field->filter_type != FILTER_PTR_STRING)) {
                flags |= HIST_FIELD_FL_STRING;
 
                hist_field->size = MAX_FILTER_STR_VAL;
@@ -1989,14 +1991,24 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
                hist_data->enable_timestamps = true;
                if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
                        hist_data->attrs->ts_in_usecs = true;
-       } else if (strcmp(field_name, "cpu") == 0)
+       } else if (strcmp(field_name, "common_cpu") == 0)
                *flags |= HIST_FIELD_FL_CPU;
        else {
                field = trace_find_event_field(file->event_call, field_name);
                if (!field || !field->size) {
-                       hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name));
-                       field = ERR_PTR(-EINVAL);
-                       goto out;
+                       /*
+                        * For backward compatibility, if field_name
+                        * was "cpu", then we treat this the same as
+                        * common_cpu.
+                        */
+                       if (strcmp(field_name, "cpu") == 0) {
+                               *flags |= HIST_FIELD_FL_CPU;
+                       } else {
+                               hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
+                                        errpos(field_name));
+                               field = ERR_PTR(-EINVAL);
+                               goto out;
+                       }
                }
        }
  out:
@@ -4495,8 +4507,6 @@ static inline void add_to_key(char *compound_key, void *key,
                field = key_field->field;
                if (field->filter_type == FILTER_DYN_STRING)
                        size = *(u32 *)(rec + field->offset) >> 16;
-               else if (field->filter_type == FILTER_PTR_STRING)
-                       size = strlen(key);
                else if (field->filter_type == FILTER_STATIC_STRING)
                        size = field->size;
 
@@ -5085,7 +5095,7 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
                seq_printf(m, "%s=", hist_field->var.name);
 
        if (hist_field->flags & HIST_FIELD_FL_CPU)
-               seq_puts(m, "cpu");
+               seq_puts(m, "common_cpu");
        else if (field_name) {
                if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
                    hist_field->flags & HIST_FIELD_FL_ALIAS)
index 2ac75eb..9315fc0 100644 (file)
@@ -893,15 +893,13 @@ static struct synth_event *alloc_synth_event(const char *name, int n_fields,
        dyn_event_init(&event->devent, &synth_event_ops);
 
        for (i = 0, j = 0; i < n_fields; i++) {
+               fields[i]->field_pos = i;
                event->fields[i] = fields[i];
 
-               if (fields[i]->is_dynamic) {
-                       event->dynamic_fields[j] = fields[i];
-                       event->dynamic_fields[j]->field_pos = i;
+               if (fields[i]->is_dynamic)
                        event->dynamic_fields[j++] = fields[i];
-                       event->n_dynamic_fields++;
-               }
        }
+       event->n_dynamic_fields = j;
        event->n_fields = n_fields;
  out:
        return event;
index 6e146b9..4007fe9 100644 (file)
@@ -14,10 +14,10 @@ struct synth_field {
        char *name;
        size_t size;
        unsigned int offset;
+       unsigned int field_pos;
        bool is_signed;
        bool is_string;
        bool is_dynamic;
-       bool field_pos;
 };
 
 struct synth_event {
index 976bf8c..fc32821 100644 (file)
@@ -299,8 +299,8 @@ static int tracepoint_add_func(struct tracepoint *tp,
         * a pointer to it.  This array is referenced by __DO_TRACE from
         * include/linux/tracepoint.h using rcu_dereference_sched().
         */
-       rcu_assign_pointer(tp->funcs, tp_funcs);
        tracepoint_update_call(tp, tp_funcs, false);
+       rcu_assign_pointer(tp->funcs, tp_funcs);
        static_key_enable(&tp->key);
 
        release_probes(old);
index 50142fc..f148eac 100644 (file)
@@ -3676,15 +3676,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
                                                  unbound_release_work);
        struct workqueue_struct *wq = pwq->wq;
        struct worker_pool *pool = pwq->pool;
-       bool is_last;
+       bool is_last = false;
 
-       if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
-               return;
+       /*
+        * when @pwq is not linked, it doesn't hold any reference to the
+        * @wq, and @wq is invalid to access.
+        */
+       if (!list_empty(&pwq->pwqs_node)) {
+               if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
+                       return;
 
-       mutex_lock(&wq->mutex);
-       list_del_rcu(&pwq->pwqs_node);
-       is_last = list_empty(&wq->pwqs);
-       mutex_unlock(&wq->mutex);
+               mutex_lock(&wq->mutex);
+               list_del_rcu(&pwq->pwqs_node);
+               is_last = list_empty(&wq->pwqs);
+               mutex_unlock(&wq->mutex);
+       }
 
        mutex_lock(&wq_pool_mutex);
        put_unbound_pool(pool);
index d241fe4..5c9c068 100644 (file)
@@ -683,9 +683,6 @@ config PARMAN
 config OBJAGG
        tristate "objagg" if COMPILE_TEST
 
-config STRING_SELFTEST
-       tristate "Test string functions"
-
 endmenu
 
 config GENERIC_IOREMAP
index 8312127..5ddd575 100644 (file)
@@ -2180,6 +2180,9 @@ config ASYNC_RAID6_TEST
 config TEST_HEXDUMP
        tristate "Test functions located in the hexdump module at runtime"
 
+config STRING_SELFTEST
+       tristate "Test string functions at runtime"
+
 config TEST_STRING_HELPERS
        tristate "Test functions located in the string_helpers module at runtime"
 
index 8c55c47..c259842 100644 (file)
@@ -628,10 +628,8 @@ static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
 
        for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
                void *entry;
-               struct page *page;
 
                entry = xa_load(&dmirror->pt, pfn);
-               page = xa_untag_pointer(entry);
                if (xa_pointer_tag(entry) == DPT_XA_TAG_ATOMIC)
                        return -EPERM;
        }
index 271f2ca..f5561ea 100644 (file)
@@ -398,12 +398,12 @@ static void cgwb_release_workfn(struct work_struct *work)
        blkcg_unpin_online(blkcg);
 
        fprop_local_destroy_percpu(&wb->memcg_completions);
-       percpu_ref_exit(&wb->refcnt);
 
        spin_lock_irq(&cgwb_lock);
        list_del(&wb->offline_node);
        spin_unlock_irq(&cgwb_lock);
 
+       percpu_ref_exit(&wb->refcnt);
        wb_exit(wb);
        WARN_ON_ONCE(!list_empty(&wb->b_attached));
        kfree_rcu(wb, rcu);
index 924553a..dfc940d 100644 (file)
@@ -5440,8 +5440,9 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        continue;
                }
 
-               refs = min3(pages_per_huge_page(h) - pfn_offset,
-                           (vma->vm_end - vaddr) >> PAGE_SHIFT, remainder);
+               /* vaddr may not be aligned to PAGE_SIZE */
+               refs = min3(pages_per_huge_page(h) - pfn_offset, remainder,
+                   (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
 
                if (pages || vmas)
                        record_subpages_vmas(mem_map_offset(page, pfn_offset),
index 98e3059..d739cdd 100644 (file)
@@ -9,6 +9,7 @@
 #ifdef CONFIG_KASAN_HW_TAGS
 
 #include <linux/static_key.h>
+#include "../slab.h"
 
 DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
 extern bool kasan_flag_async __ro_after_init;
@@ -387,6 +388,17 @@ static inline void kasan_unpoison(const void *addr, size_t size, bool init)
 
        if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
                return;
+       /*
+        * Explicitly initialize the memory with the precise object size to
+        * avoid overwriting the SLAB redzone. This disables initialization in
+        * the arch code and may thus lead to performance penalty. The penalty
+        * is accepted since SLAB redzones aren't enabled in production builds.
+        */
+       if (__slub_debug_enabled() &&
+           init && ((unsigned long)size & KASAN_GRANULE_MASK)) {
+               init = false;
+               memzero_explicit((void *)addr, size);
+       }
        size = round_up(size, KASAN_GRANULE_SIZE);
 
        hw_set_mem_tag_range((void *)addr, size, tag, init);
index d7666ac..575c685 100644 (file)
@@ -734,6 +734,22 @@ void kfence_shutdown_cache(struct kmem_cache *s)
 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
 {
        /*
+        * Perform size check before switching kfence_allocation_gate, so that
+        * we don't disable KFENCE without making an allocation.
+        */
+       if (size > PAGE_SIZE)
+               return NULL;
+
+       /*
+        * Skip allocations from non-default zones, including DMA. We cannot
+        * guarantee that pages in the KFENCE pool will have the requested
+        * properties (e.g. reside in DMAable memory).
+        */
+       if ((flags & GFP_ZONEMASK) ||
+           (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32)))
+               return NULL;
+
+       /*
         * allocation_gate only needs to become non-zero, so it doesn't make
         * sense to continue writing to it and pay the associated contention
         * cost, in case we have a large number of concurrent allocations.
@@ -757,9 +773,6 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
        if (!READ_ONCE(kfence_enabled))
                return NULL;
 
-       if (size > PAGE_SIZE)
-               return NULL;
-
        return kfence_guarded_alloc(s, size, flags);
 }
 
index 7f24b9b..942cbc1 100644 (file)
@@ -852,7 +852,7 @@ static void kfence_test_exit(void)
        tracepoint_synchronize_unregister();
 }
 
-late_initcall(kfence_test_init);
+late_initcall_sync(kfence_test_init);
 module_exit(kfence_test_exit);
 
 MODULE_LICENSE("GPL v2");
index 0041ff6..de7b553 100644 (file)
@@ -947,7 +947,8 @@ static bool should_skip_region(struct memblock_type *type,
                return true;
 
        /* skip hotpluggable memory regions if needed */
-       if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
+       if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
+           !(flags & MEMBLOCK_HOTPLUG))
                return true;
 
        /* if we want mirror memory skip non-mirror memory regions */
index ae1f5d0..eb8e87c 100644 (file)
@@ -3574,7 +3574,8 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
        unsigned long val;
 
        if (mem_cgroup_is_root(memcg)) {
-               cgroup_rstat_flush(memcg->css.cgroup);
+               /* mem_cgroup_threshold() calls here from irqsafe context */
+               cgroup_rstat_flush_irqsafe(memcg->css.cgroup);
                val = memcg_page_state(memcg, NR_FILE_PAGES) +
                        memcg_page_state(memcg, NR_ANON_MAPPED);
                if (swap)
index 747a01d..25fc46e 100644 (file)
@@ -4026,8 +4026,17 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
                                return ret;
                }
 
-               if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
+               if (vmf->prealloc_pte) {
+                       vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+                       if (likely(pmd_none(*vmf->pmd))) {
+                               mm_inc_nr_ptes(vma->vm_mm);
+                               pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
+                               vmf->prealloc_pte = NULL;
+                       }
+                       spin_unlock(vmf->ptl);
+               } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
                        return VM_FAULT_OOM;
+               }
        }
 
        /* See comment in handle_pte_fault() */
index 23cbd9d..7e24043 100644 (file)
@@ -537,54 +537,6 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
 }
 
 /*
- * Gigantic pages are so large that we do not guarantee that page++ pointer
- * arithmetic will work across the entire page.  We need something more
- * specialized.
- */
-static void __copy_gigantic_page(struct page *dst, struct page *src,
-                               int nr_pages)
-{
-       int i;
-       struct page *dst_base = dst;
-       struct page *src_base = src;
-
-       for (i = 0; i < nr_pages; ) {
-               cond_resched();
-               copy_highpage(dst, src);
-
-               i++;
-               dst = mem_map_next(dst, dst_base, i);
-               src = mem_map_next(src, src_base, i);
-       }
-}
-
-void copy_huge_page(struct page *dst, struct page *src)
-{
-       int i;
-       int nr_pages;
-
-       if (PageHuge(src)) {
-               /* hugetlbfs page */
-               struct hstate *h = page_hstate(src);
-               nr_pages = pages_per_huge_page(h);
-
-               if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
-                       __copy_gigantic_page(dst, src, nr_pages);
-                       return;
-               }
-       } else {
-               /* thp page */
-               BUG_ON(!PageTransHuge(src));
-               nr_pages = thp_nr_pages(src);
-       }
-
-       for (i = 0; i < nr_pages; i++) {
-               cond_resched();
-               copy_highpage(dst + i, src + i);
-       }
-}
-
-/*
  * Copy the page to its new location
  */
 void migrate_page_states(struct page *newpage, struct page *page)
@@ -2116,7 +2068,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
        LIST_HEAD(migratepages);
        new_page_t *new;
        bool compound;
-       unsigned int nr_pages = thp_nr_pages(page);
+       int nr_pages = thp_nr_pages(page);
 
        /*
         * PTE mapped THP or HugeTLB page can't reach here so the page could
index f5852a0..1854850 100644 (file)
@@ -156,14 +156,14 @@ static inline void put_memcg_path_buf(void)
 #define TRACE_MMAP_LOCK_EVENT(type, mm, ...)                                   \
        do {                                                                   \
                const char *memcg_path;                                        \
-               preempt_disable();                                             \
+               local_lock(&memcg_paths.lock);                                 \
                memcg_path = get_mm_memcg_path(mm);                            \
                trace_mmap_lock_##type(mm,                                     \
                                       memcg_path != NULL ? memcg_path : "",   \
                                       ##__VA_ARGS__);                         \
                if (likely(memcg_path != NULL))                                \
                        put_memcg_path_buf();                                  \
-               preempt_enable();                                              \
+               local_unlock(&memcg_paths.lock);                               \
        } while (0)
 
 #else /* !CONFIG_MEMCG */
index 3b97e17..856b175 100644 (file)
@@ -840,21 +840,24 @@ void init_mem_debugging_and_hardening(void)
        }
 #endif
 
-       if (_init_on_alloc_enabled_early) {
-               if (page_poisoning_requested)
-                       pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
-                               "will take precedence over init_on_alloc\n");
-               else
-                       static_branch_enable(&init_on_alloc);
-       }
-       if (_init_on_free_enabled_early) {
-               if (page_poisoning_requested)
-                       pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
-                               "will take precedence over init_on_free\n");
-               else
-                       static_branch_enable(&init_on_free);
+       if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
+           page_poisoning_requested) {
+               pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
+                       "will take precedence over init_on_alloc and init_on_free\n");
+               _init_on_alloc_enabled_early = false;
+               _init_on_free_enabled_early = false;
        }
 
+       if (_init_on_alloc_enabled_early)
+               static_branch_enable(&init_on_alloc);
+       else
+               static_branch_disable(&init_on_alloc);
+
+       if (_init_on_free_enabled_early)
+               static_branch_enable(&init_on_free);
+       else
+               static_branch_disable(&init_on_free);
+
 #ifdef CONFIG_DEBUG_PAGEALLOC
        if (!debug_pagealloc_enabled())
                return;
@@ -3820,7 +3823,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
 
 #endif /* CONFIG_FAIL_PAGE_ALLOC */
 
-static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
 {
        return __should_fail_alloc_page(gfp_mask, order);
 }
@@ -5221,9 +5224,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
        unsigned int alloc_flags = ALLOC_WMARK_LOW;
        int nr_populated = 0, nr_account = 0;
 
-       if (unlikely(nr_pages <= 0))
-               return 0;
-
        /*
         * Skip populated array elements to determine if any pages need
         * to be allocated before disabling IRQs.
@@ -5231,19 +5231,35 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
        while (page_array && nr_populated < nr_pages && page_array[nr_populated])
                nr_populated++;
 
+       /* No pages requested? */
+       if (unlikely(nr_pages <= 0))
+               goto out;
+
        /* Already populated array? */
        if (unlikely(page_array && nr_pages - nr_populated == 0))
-               return nr_populated;
+               goto out;
 
        /* Use the single page allocator for one page. */
        if (nr_pages - nr_populated == 1)
                goto failed;
 
+#ifdef CONFIG_PAGE_OWNER
+       /*
+        * PAGE_OWNER may recurse into the allocator to allocate space to
+        * save the stack with pagesets.lock held. Releasing/reacquiring
+        * removes much of the performance benefit of bulk allocation so
+        * force the caller to allocate one page at a time as it'll have
+        * similar performance to added complexity to the bulk allocator.
+        */
+       if (static_branch_unlikely(&page_owner_inited))
+               goto failed;
+#endif
+
        /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
        gfp &= gfp_allowed_mask;
        alloc_gfp = gfp;
        if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
-               return 0;
+               goto out;
        gfp = alloc_gfp;
 
        /* Find an allowed local zone that meets the low watermark. */
@@ -5311,6 +5327,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
        __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
        zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
 
+out:
        return nr_populated;
 
 failed_irq:
@@ -5326,7 +5343,7 @@ failed:
                nr_populated++;
        }
 
-       return nr_populated;
+       goto out;
 }
 EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
 
index 795f9d5..b9eb5c1 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1440,21 +1440,20 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                /*
                 * If the page is mlock()d, we cannot swap it out.
                 */
-               if (!(flags & TTU_IGNORE_MLOCK)) {
-                       if (vma->vm_flags & VM_LOCKED) {
-                               /* PTE-mapped THP are never marked as mlocked */
-                               if (!PageTransCompound(page) ||
-                                   (PageHead(page) && !PageDoubleMap(page))) {
-                                       /*
-                                        * Holding pte lock, we do *not* need
-                                        * mmap_lock here
-                                        */
-                                       mlock_vma_page(page);
-                               }
-                               ret = false;
-                               page_vma_mapped_walk_done(&pvmw);
-                               break;
-                       }
+               if (!(flags & TTU_IGNORE_MLOCK) &&
+                   (vma->vm_flags & VM_LOCKED)) {
+                       /*
+                        * PTE-mapped THP are never marked as mlocked: so do
+                        * not set it on a DoubleMap THP, nor on an Anon THP
+                        * (which may still be PTE-mapped after DoubleMap was
+                        * cleared).  But stop unmapping even in those cases.
+                        */
+                       if (!PageTransCompound(page) || (PageHead(page) &&
+                            !PageDoubleMap(page) && !PageAnon(page)))
+                               mlock_vma_page(page);
+                       page_vma_mapped_walk_done(&pvmw);
+                       ret = false;
+                       break;
                }
 
                /* Unexpected PMD-mapped THP? */
@@ -1986,8 +1985,10 @@ static bool page_mlock_one(struct page *page, struct vm_area_struct *vma,
                 */
                if (vma->vm_flags & VM_LOCKED) {
                        /*
-                        * PTE-mapped THP are never marked as mlocked, but
-                        * this function is never called when PageDoubleMap().
+                        * PTE-mapped THP are never marked as mlocked; but
+                        * this function is never called on a DoubleMap THP,
+                        * nor on an Anon THP (which may still be PTE-mapped
+                        * after DoubleMap was cleared).
                         */
                        mlock_vma_page(page);
                        /*
@@ -2022,6 +2023,10 @@ void page_mlock(struct page *page)
        VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
        VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
 
+       /* Anon THP are only marked as mlocked when singly mapped */
+       if (PageTransCompound(page) && PageAnon(page))
+               return;
+
        rmap_walk(page, &rwc);
 }
 
index f77d254..030f02d 100644 (file)
@@ -152,6 +152,7 @@ static void secretmem_freepage(struct page *page)
 }
 
 const struct address_space_operations secretmem_aops = {
+       .set_page_dirty = __set_page_dirty_no_writeback,
        .freepage       = secretmem_freepage,
        .migratepage    = secretmem_migratepage,
        .isolate_page   = secretmem_isolate_page,
index 67e0663..58c01a3 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -216,10 +216,18 @@ DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
 #endif
 extern void print_tracking(struct kmem_cache *s, void *object);
 long validate_slab_cache(struct kmem_cache *s);
+static inline bool __slub_debug_enabled(void)
+{
+       return static_branch_unlikely(&slub_debug_enabled);
+}
 #else
 static inline void print_tracking(struct kmem_cache *s, void *object)
 {
 }
+static inline bool __slub_debug_enabled(void)
+{
+       return false;
+}
 #endif
 
 /*
@@ -229,11 +237,10 @@ static inline void print_tracking(struct kmem_cache *s, void *object)
  */
 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
 {
-#ifdef CONFIG_SLUB_DEBUG
-       VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
-       if (static_branch_unlikely(&slub_debug_enabled))
+       if (IS_ENABLED(CONFIG_SLUB_DEBUG))
+               VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
+       if (__slub_debug_enabled())
                return s->flags & flags;
-#endif
        return false;
 }
 
@@ -339,7 +346,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
                        continue;
 
                page = virt_to_head_page(p[i]);
-               objcgs = page_objcgs(page);
+               objcgs = page_objcgs_check(page);
                if (!objcgs)
                        continue;
 
index dc863c1..af984e4 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -26,7 +26,6 @@
 #include <linux/cpuset.h>
 #include <linux/mempolicy.h>
 #include <linux/ctype.h>
-#include <linux/stackdepot.h>
 #include <linux/debugobjects.h>
 #include <linux/kallsyms.h>
 #include <linux/kfence.h>
  */
 
 #ifdef CONFIG_SLUB_DEBUG
-
 #ifdef CONFIG_SLUB_DEBUG_ON
 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
 #else
 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
 #endif
-
-static inline bool __slub_debug_enabled(void)
-{
-       return static_branch_unlikely(&slub_debug_enabled);
-}
-
-#else          /* CONFIG_SLUB_DEBUG */
-
-static inline bool __slub_debug_enabled(void)
-{
-       return false;
-}
-
 #endif         /* CONFIG_SLUB_DEBUG */
 
 static inline bool kmem_cache_debug(struct kmem_cache *s)
@@ -221,8 +206,8 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
 #define TRACK_ADDRS_COUNT 16
 struct track {
        unsigned long addr;     /* Called from address */
-#ifdef CONFIG_STACKDEPOT
-       depot_stack_handle_t handle;
+#ifdef CONFIG_STACKTRACE
+       unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
 #endif
        int cpu;                /* Was running on cpu */
        int pid;                /* Pid context */
@@ -626,27 +611,22 @@ static struct track *get_track(struct kmem_cache *s, void *object,
        return kasan_reset_tag(p + alloc);
 }
 
-#ifdef CONFIG_STACKDEPOT
-static depot_stack_handle_t save_stack_depot_trace(gfp_t flags)
-{
-       unsigned long entries[TRACK_ADDRS_COUNT];
-       depot_stack_handle_t handle;
-       unsigned int nr_entries;
-
-       nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 4);
-       handle = stack_depot_save(entries, nr_entries, flags);
-       return handle;
-}
-#endif
-
 static void set_track(struct kmem_cache *s, void *object,
                        enum track_item alloc, unsigned long addr)
 {
        struct track *p = get_track(s, object, alloc);
 
        if (addr) {
-#ifdef CONFIG_STACKDEPOT
-               p->handle = save_stack_depot_trace(GFP_NOWAIT);
+#ifdef CONFIG_STACKTRACE
+               unsigned int nr_entries;
+
+               metadata_access_enable();
+               nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
+                                             TRACK_ADDRS_COUNT, 3);
+               metadata_access_disable();
+
+               if (nr_entries < TRACK_ADDRS_COUNT)
+                       p->addrs[nr_entries] = 0;
 #endif
                p->addr = addr;
                p->cpu = smp_processor_id();
@@ -673,19 +653,14 @@ static void print_track(const char *s, struct track *t, unsigned long pr_time)
 
        pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
               s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
-#ifdef CONFIG_STACKDEPOT
+#ifdef CONFIG_STACKTRACE
        {
-               depot_stack_handle_t handle;
-               unsigned long *entries;
-               unsigned int nr_entries;
-
-               handle = READ_ONCE(t->handle);
-               if (!handle) {
-                       pr_err("object allocation/free stack trace missing\n");
-               } else {
-                       nr_entries = stack_depot_fetch(handle, &entries);
-                       stack_trace_print(entries, nr_entries, 0);
-               }
+               int i;
+               for (i = 0; i < TRACK_ADDRS_COUNT; i++)
+                       if (t->addrs[i])
+                               pr_err("\t%pS\n", (void *)t->addrs[i]);
+                       else
+                               break;
        }
 #endif
 }
@@ -3261,6 +3236,16 @@ struct detached_freelist {
        struct kmem_cache *s;
 };
 
+static inline void free_nonslab_page(struct page *page)
+{
+       unsigned int order = compound_order(page);
+
+       VM_BUG_ON_PAGE(!PageCompound(page), page);
+       kfree_hook(page_address(page));
+       mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
+       __free_pages(page, order);
+}
+
 /*
  * This function progressively scans the array with free objects (with
  * a limited look ahead) and extract objects belonging to the same
@@ -3297,9 +3282,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
        if (!s) {
                /* Handle kalloc'ed objects */
                if (unlikely(!PageSlab(page))) {
-                       BUG_ON(!PageCompound(page));
-                       kfree_hook(object);
-                       __free_pages(page, compound_order(page));
+                       free_nonslab_page(page);
                        p[size] = NULL; /* mark object processed */
                        return size;
                }
@@ -4059,26 +4042,18 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
        objp = fixup_red_left(s, objp);
        trackp = get_track(s, objp, TRACK_ALLOC);
        kpp->kp_ret = (void *)trackp->addr;
-#ifdef CONFIG_STACKDEPOT
-       {
-               depot_stack_handle_t handle;
-               unsigned long *entries;
-               unsigned int nr_entries;
-
-               handle = READ_ONCE(trackp->handle);
-               if (handle) {
-                       nr_entries = stack_depot_fetch(handle, &entries);
-                       for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
-                               kpp->kp_stack[i] = (void *)entries[i];
-               }
+#ifdef CONFIG_STACKTRACE
+       for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
+               kpp->kp_stack[i] = (void *)trackp->addrs[i];
+               if (!kpp->kp_stack[i])
+                       break;
+       }
 
-               trackp = get_track(s, objp, TRACK_FREE);
-               handle = READ_ONCE(trackp->handle);
-               if (handle) {
-                       nr_entries = stack_depot_fetch(handle, &entries);
-                       for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
-                               kpp->kp_free_stack[i] = (void *)entries[i];
-               }
+       trackp = get_track(s, objp, TRACK_FREE);
+       for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
+               kpp->kp_free_stack[i] = (void *)trackp->addrs[i];
+               if (!kpp->kp_free_stack[i])
+                       break;
        }
 #endif
 #endif
@@ -4283,13 +4258,7 @@ void kfree(const void *x)
 
        page = virt_to_head_page(x);
        if (unlikely(!PageSlab(page))) {
-               unsigned int order = compound_order(page);
-
-               BUG_ON(!PageCompound(page));
-               kfree_hook(object);
-               mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
-                                     -(PAGE_SIZE << order));
-               __free_pages(page, order);
+               free_nonslab_page(page);
                return;
        }
        slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
index 99c6cc7..9043d03 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -731,6 +731,16 @@ int __page_mapcount(struct page *page)
 }
 EXPORT_SYMBOL_GPL(__page_mapcount);
 
+void copy_huge_page(struct page *dst, struct page *src)
+{
+       unsigned i, nr = compound_nr(src);
+
+       for (i = 0; i < nr; i++) {
+               cond_resched();
+               copy_highpage(nth_page(dst, i), nth_page(src, i));
+       }
+}
+
 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
 int sysctl_overcommit_ratio __read_mostly = 50;
 unsigned long sysctl_overcommit_kbytes __read_mostly;
index 400bd85..f6012f8 100644 (file)
@@ -203,6 +203,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr
        kfree(attr);
 }
 
+static void garp_attr_destroy_all(struct garp_applicant *app)
+{
+       struct rb_node *node, *next;
+       struct garp_attr *attr;
+
+       for (node = rb_first(&app->gid);
+            next = node ? rb_next(node) : NULL, node != NULL;
+            node = next) {
+               attr = rb_entry(node, struct garp_attr, node);
+               garp_attr_destroy(app, attr);
+       }
+}
+
 static int garp_pdu_init(struct garp_applicant *app)
 {
        struct sk_buff *skb;
@@ -609,6 +622,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
 
        spin_lock_bh(&app->lock);
        garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
+       garp_attr_destroy_all(app);
        garp_pdu_queue(app);
        spin_unlock_bh(&app->lock);
 
index bea6e43..35e04cc 100644 (file)
@@ -292,6 +292,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
        kfree(attr);
 }
 
+static void mrp_attr_destroy_all(struct mrp_applicant *app)
+{
+       struct rb_node *node, *next;
+       struct mrp_attr *attr;
+
+       for (node = rb_first(&app->mad);
+            next = node ? rb_next(node) : NULL, node != NULL;
+            node = next) {
+               attr = rb_entry(node, struct mrp_attr, node);
+               mrp_attr_destroy(app, attr);
+       }
+}
+
 static int mrp_pdu_init(struct mrp_applicant *app)
 {
        struct sk_buff *skb;
@@ -895,6 +908,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
 
        spin_lock_bh(&app->lock);
        mrp_mad_event(app, MRP_EVENT_TX);
+       mrp_attr_destroy_all(app);
        mrp_pdu_queue(app);
        spin_unlock_bh(&app->lock);
 
index aa47af3..1cc75c8 100644 (file)
@@ -701,6 +701,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
        void *data;
        int ret;
 
+       if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
+           prog->expected_attach_type == BPF_XDP_CPUMAP)
+               return -EINVAL;
        if (kattr->test.ctx_in || kattr->test.ctx_out)
                return -EINVAL;
 
index 2b862cf..a16191d 100644 (file)
@@ -780,7 +780,7 @@ int br_fdb_replay(const struct net_device *br_dev, const struct net_device *dev,
                struct net_device *dst_dev;
 
                dst_dev = dst ? dst->dev : br->dev;
-               if (dst_dev != br_dev && dst_dev != dev)
+               if (dst_dev && dst_dev != dev)
                        continue;
 
                err = br_fdb_replay_one(nb, fdb, dst_dev, action, ctx);
index f7d2f47..6e4a323 100644 (file)
@@ -562,7 +562,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
        struct net_bridge_port *p;
        int err = 0;
        unsigned br_hr, dev_hr;
-       bool changed_addr;
+       bool changed_addr, fdb_synced = false;
 
        /* Don't allow bridging non-ethernet like devices. */
        if ((dev->flags & IFF_LOOPBACK) ||
@@ -652,6 +652,19 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
        list_add_rcu(&p->list, &br->port_list);
 
        nbp_update_port_count(br);
+       if (!br_promisc_port(p) && (p->dev->priv_flags & IFF_UNICAST_FLT)) {
+               /* When updating the port count we also update all ports'
+                * promiscuous mode.
+                * A port leaving promiscuous mode normally gets the bridge's
+                * fdb synced to the unicast filter (if supported), however,
+                * `br_port_clear_promisc` does not distinguish between
+                * non-promiscuous ports and *new* ports, so we need to
+                * sync explicitly here.
+                */
+               fdb_synced = br_fdb_sync_static(br, p) == 0;
+               if (!fdb_synced)
+                       netdev_err(dev, "failed to sync bridge static fdb addresses to this port\n");
+       }
 
        netdev_update_features(br->dev);
 
@@ -701,6 +714,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
        return 0;
 
 err7:
+       if (fdb_synced)
+               br_fdb_unsync_static(br, p);
        list_del_rcu(&p->list);
        br_fdb_delete_by_port(br, p, 0, 1);
        nbp_update_port_count(br);
index 53c3a9d..d0434dc 100644 (file)
@@ -3264,7 +3264,9 @@ static void br_multicast_pim(struct net_bridge *br,
            pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
                return;
 
+       spin_lock(&br->multicast_lock);
        br_ip4_multicast_mark_router(br, port);
+       spin_unlock(&br->multicast_lock);
 }
 
 static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
@@ -3275,7 +3277,9 @@ static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
            igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
                return -ENOMSG;
 
+       spin_lock(&br->multicast_lock);
        br_ip4_multicast_mark_router(br, port);
+       spin_unlock(&br->multicast_lock);
 
        return 0;
 }
@@ -3343,7 +3347,9 @@ static void br_ip6_multicast_mrd_rcv(struct net_bridge *br,
        if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
                return;
 
+       spin_lock(&br->multicast_lock);
        br_ip6_multicast_mark_router(br, port);
+       spin_unlock(&br->multicast_lock);
 }
 
 static int br_multicast_ipv6_rcv(struct net_bridge *br,
index 647554c..e12fd3c 100644 (file)
@@ -539,7 +539,8 @@ static int caif_seqpkt_sendmsg(struct socket *sock, struct msghdr *msg,
                goto err;
 
        ret = -EINVAL;
-       if (unlikely(msg->msg_iter.iov->iov_base == NULL))
+       if (unlikely(msg->msg_iter.nr_segs == 0) ||
+           unlikely(msg->msg_iter.iov->iov_base == NULL))
                goto err;
        noblock = msg->msg_flags & MSG_DONTWAIT;
 
index c3946c3..bdc95bd 100644 (file)
@@ -1075,11 +1075,16 @@ static bool j1939_session_deactivate_locked(struct j1939_session *session)
 
 static bool j1939_session_deactivate(struct j1939_session *session)
 {
+       struct j1939_priv *priv = session->priv;
        bool active;
 
-       j1939_session_list_lock(session->priv);
+       j1939_session_list_lock(priv);
+       /* This function should be called with a session ref-count of at
+        * least 2.
+        */
+       WARN_ON_ONCE(kref_read(&session->kref) < 2);
        active = j1939_session_deactivate_locked(session);
-       j1939_session_list_unlock(session->priv);
+       j1939_session_list_unlock(priv);
 
        return active;
 }
@@ -1869,7 +1874,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
                if (!session->transmission)
                        j1939_tp_schedule_txtimer(session, 0);
        } else {
-               j1939_tp_set_rxtimeout(session, 250);
+               j1939_tp_set_rxtimeout(session, 750);
        }
        session->last_cmd = 0xff;
        consume_skb(se_skb);
index ed4fcb7..cd5a493 100644 (file)
@@ -546,10 +546,18 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                                return -EFAULT;
                }
 
+               rtnl_lock();
                lock_sock(sk);
 
-               if (ro->bound && ro->ifindex)
+               if (ro->bound && ro->ifindex) {
                        dev = dev_get_by_index(sock_net(sk), ro->ifindex);
+                       if (!dev) {
+                               if (count > 1)
+                                       kfree(filter);
+                               err = -ENODEV;
+                               goto out_fil;
+                       }
+               }
 
                if (ro->bound) {
                        /* (try to) register the new filters */
@@ -588,6 +596,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                        dev_put(dev);
 
                release_sock(sk);
+               rtnl_unlock();
 
                break;
 
@@ -600,10 +609,16 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
 
                err_mask &= CAN_ERR_MASK;
 
+               rtnl_lock();
                lock_sock(sk);
 
-               if (ro->bound && ro->ifindex)
+               if (ro->bound && ro->ifindex) {
                        dev = dev_get_by_index(sock_net(sk), ro->ifindex);
+                       if (!dev) {
+                               err = -ENODEV;
+                               goto out_err;
+                       }
+               }
 
                /* remove current error mask */
                if (ro->bound) {
@@ -627,6 +642,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                        dev_put(dev);
 
                release_sock(sk);
+               rtnl_unlock();
 
                break;
 
index c253c2a..8f1a47a 100644 (file)
 #include <trace/events/napi.h>
 #include <trace/events/net.h>
 #include <trace/events/skb.h>
+#include <trace/events/qdisc.h>
 #include <linux/inetdevice.h>
 #include <linux/cpu_rmap.h>
 #include <linux/static_key.h>
@@ -3844,6 +3845,18 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
        }
 }
 
+static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
+                            struct sk_buff **to_free,
+                            struct netdev_queue *txq)
+{
+       int rc;
+
+       rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
+       if (rc == NET_XMIT_SUCCESS)
+               trace_qdisc_enqueue(q, txq, skb);
+       return rc;
+}
+
 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                                 struct net_device *dev,
                                 struct netdev_queue *txq)
@@ -3862,8 +3875,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                         * of q->seqlock to protect from racing with requeuing.
                         */
                        if (unlikely(!nolock_qdisc_is_empty(q))) {
-                               rc = q->enqueue(skb, q, &to_free) &
-                                       NET_XMIT_MASK;
+                               rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
                                __qdisc_run(q);
                                qdisc_run_end(q);
 
@@ -3879,7 +3891,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                        return NET_XMIT_SUCCESS;
                }
 
-               rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+               rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
                qdisc_run(q);
 
 no_lock_out:
@@ -3923,7 +3935,7 @@ no_lock_out:
                qdisc_run_end(q);
                rc = NET_XMIT_SUCCESS;
        } else {
-               rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+               rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
                if (qdisc_run_begin(q)) {
                        if (unlikely(contended)) {
                                spin_unlock(&q->busylock);
@@ -6008,6 +6020,19 @@ static void gro_list_prepare(const struct list_head *head,
                        diffs = memcmp(skb_mac_header(p),
                                       skb_mac_header(skb),
                                       maclen);
+
+               diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
+#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+               if (!diffs) {
+                       struct tc_skb_ext *skb_ext = skb_ext_find(skb, TC_SKB_EXT);
+                       struct tc_skb_ext *p_ext = skb_ext_find(p, TC_SKB_EXT);
+
+                       diffs |= (!!p_ext) ^ (!!skb_ext);
+                       if (!diffs && unlikely(skb_ext))
+                               diffs |= p_ext->chain ^ skb_ext->chain;
+               }
+#endif
+
                NAPI_GRO_CB(p)->same_flow = !diffs;
        }
 }
@@ -6221,6 +6246,8 @@ static gro_result_t napi_skb_finish(struct napi_struct *napi,
        case GRO_MERGED_FREE:
                if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
                        napi_skb_free_stolen_head(skb);
+               else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
+                       __kfree_skb(skb);
                else
                        __kfree_skb_defer(skb);
                break;
@@ -6270,6 +6297,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
        skb_shinfo(skb)->gso_type = 0;
        skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
        skb_ext_reset(skb);
+       nf_reset_ct(skb);
 
        napi->skb = skb;
 }
@@ -9684,14 +9712,17 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
        struct net_device *dev;
        int err, fd;
 
+       rtnl_lock();
        dev = dev_get_by_index(net, attr->link_create.target_ifindex);
-       if (!dev)
+       if (!dev) {
+               rtnl_unlock();
                return -EINVAL;
+       }
 
        link = kzalloc(sizeof(*link), GFP_USER);
        if (!link) {
                err = -ENOMEM;
-               goto out_put_dev;
+               goto unlock;
        }
 
        bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
@@ -9701,14 +9732,14 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
        err = bpf_link_prime(&link->link, &link_primer);
        if (err) {
                kfree(link);
-               goto out_put_dev;
+               goto unlock;
        }
 
-       rtnl_lock();
        err = dev_xdp_attach_link(dev, NULL, link);
        rtnl_unlock();
 
        if (err) {
+               link->dev = NULL;
                bpf_link_cleanup(&link_primer);
                goto out_put_dev;
        }
@@ -9718,6 +9749,9 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
        dev_put(dev);
        return fd;
 
+unlock:
+       rtnl_unlock();
+
 out_put_dev:
        dev_put(dev);
        return err;
index 8fdd04f..8503262 100644 (file)
@@ -9328,18 +9328,10 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
 
        switch (attrs->flavour) {
        case DEVLINK_PORT_FLAVOUR_PHYSICAL:
-       case DEVLINK_PORT_FLAVOUR_VIRTUAL:
                n = snprintf(name, len, "p%u", attrs->phys.port_number);
                if (n < len && attrs->split)
                        n += snprintf(name + n, len - n, "s%u",
                                      attrs->phys.split_subport_number);
-               if (!attrs->split)
-                       n = snprintf(name, len, "p%u", attrs->phys.port_number);
-               else
-                       n = snprintf(name, len, "p%us%u",
-                                    attrs->phys.port_number,
-                                    attrs->phys.split_subport_number);
-
                break;
        case DEVLINK_PORT_FLAVOUR_CPU:
        case DEVLINK_PORT_FLAVOUR_DSA:
@@ -9381,6 +9373,8 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
                n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
                             attrs->pci_sf.sf);
                break;
+       case DEVLINK_PORT_FLAVOUR_VIRTUAL:
+               return -EOPNOTSUPP;
        }
 
        if (n >= len)
index 2aadbfc..4b2415d 100644 (file)
@@ -1504,7 +1504,7 @@ __be32 flow_get_u32_dst(const struct flow_keys *flow)
 }
 EXPORT_SYMBOL(flow_get_u32_dst);
 
-/* Sort the source and destination IP (and the ports if the IP are the same),
+/* Sort the source and destination IP and the ports,
  * to have consistent hash within the two directions
  */
 static inline void __flow_hash_consistentify(struct flow_keys *keys)
@@ -1515,11 +1515,11 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
        case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
                addr_diff = (__force u32)keys->addrs.v4addrs.dst -
                            (__force u32)keys->addrs.v4addrs.src;
-               if ((addr_diff < 0) ||
-                   (addr_diff == 0 &&
-                    ((__force u16)keys->ports.dst <
-                     (__force u16)keys->ports.src))) {
+               if (addr_diff < 0)
                        swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
+
+               if ((__force u16)keys->ports.dst <
+                   (__force u16)keys->ports.src) {
                        swap(keys->ports.src, keys->ports.dst);
                }
                break;
@@ -1527,13 +1527,13 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
                addr_diff = memcmp(&keys->addrs.v6addrs.dst,
                                   &keys->addrs.v6addrs.src,
                                   sizeof(keys->addrs.v6addrs.dst));
-               if ((addr_diff < 0) ||
-                   (addr_diff == 0 &&
-                    ((__force u16)keys->ports.dst <
-                     (__force u16)keys->ports.src))) {
+               if (addr_diff < 0) {
                        for (i = 0; i < 4; i++)
                                swap(keys->addrs.v6addrs.src.s6_addr32[i],
                                     keys->addrs.v6addrs.dst.s6_addr32[i]);
+               }
+               if ((__force u16)keys->ports.dst <
+                   (__force u16)keys->ports.src) {
                        swap(keys->ports.src, keys->ports.dst);
                }
                break;
index 12aabcd..fc7942c 100644 (file)
@@ -663,7 +663,7 @@ static void skb_release_data(struct sk_buff *skb)
        if (skb->cloned &&
            atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
                              &shinfo->dataref))
-               return;
+               goto exit;
 
        skb_zcopy_clear(skb, true);
 
@@ -674,6 +674,17 @@ static void skb_release_data(struct sk_buff *skb)
                kfree_skb_list(shinfo->frag_list);
 
        skb_free_head(skb);
+exit:
+       /* When we clone an SKB we copy the reycling bit. The pp_recycle
+        * bit is only set on the head though, so in order to avoid races
+        * while trying to recycle fragments on __skb_frag_unref() we need
+        * to make one SKB responsible for triggering the recycle path.
+        * So disable the recycling bit if an SKB is cloned and we have
+        * additional references to to the fragmented part of the SKB.
+        * Eventually the last SKB will have the recycling bit set and it's
+        * dataref set to 0, which will trigger the recycling
+        */
+       skb->pp_recycle = 0;
 }
 
 /*
@@ -943,6 +954,7 @@ void __kfree_skb_defer(struct sk_buff *skb)
 
 void napi_skb_free_stolen_head(struct sk_buff *skb)
 {
+       nf_reset_ct(skb);
        skb_dst_drop(skb);
        skb_ext_put(skb);
        napi_skb_cache_put(skb);
@@ -3010,8 +3022,11 @@ skb_zerocopy_headlen(const struct sk_buff *from)
 
        if (!from->head_frag ||
            skb_headlen(from) < L1_CACHE_BYTES ||
-           skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
+           skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) {
                hlen = skb_headlen(from);
+               if (!hlen)
+                       hlen = from->len;
+       }
 
        if (skb_has_frag_list(from))
                hlen = from->len;
index 9b6160a..2d6249b 100644 (file)
@@ -508,10 +508,8 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
        if (skb_linearize(skb))
                return -EAGAIN;
        num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
-       if (unlikely(num_sge < 0)) {
-               kfree(msg);
+       if (unlikely(num_sge < 0))
                return num_sge;
-       }
 
        copied = skb->len;
        msg->sg.start = 0;
@@ -530,6 +528,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
 {
        struct sock *sk = psock->sk;
        struct sk_msg *msg;
+       int err;
 
        /* If we are receiving on the same sock skb->sk is already assigned,
         * skip memory accounting and owner transition seeing it already set
@@ -548,7 +547,10 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
         * into user buffers.
         */
        skb_set_owner_r(skb, sk);
-       return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
+       err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
+       if (err < 0)
+               kfree(msg);
+       return err;
 }
 
 /* Puts an skb on the ingress queue of the socket already assigned to the
@@ -559,12 +561,16 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
 {
        struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
        struct sock *sk = psock->sk;
+       int err;
 
        if (unlikely(!msg))
                return -EAGAIN;
        sk_msg_init(msg);
        skb_set_owner_r(skb, sk);
-       return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
+       err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
+       if (err < 0)
+               kfree(msg);
+       return err;
 }
 
 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
@@ -578,29 +584,42 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
        return sk_psock_skb_ingress(psock, skb);
 }
 
-static void sock_drop(struct sock *sk, struct sk_buff *skb)
+static void sk_psock_skb_state(struct sk_psock *psock,
+                              struct sk_psock_work_state *state,
+                              struct sk_buff *skb,
+                              int len, int off)
 {
-       sk_drops_add(sk, skb);
-       kfree_skb(skb);
+       spin_lock_bh(&psock->ingress_lock);
+       if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
+               state->skb = skb;
+               state->len = len;
+               state->off = off;
+       } else {
+               sock_drop(psock->sk, skb);
+       }
+       spin_unlock_bh(&psock->ingress_lock);
 }
 
 static void sk_psock_backlog(struct work_struct *work)
 {
        struct sk_psock *psock = container_of(work, struct sk_psock, work);
        struct sk_psock_work_state *state = &psock->work_state;
-       struct sk_buff *skb;
+       struct sk_buff *skb = NULL;
        bool ingress;
        u32 len, off;
        int ret;
 
        mutex_lock(&psock->work_mutex);
-       if (state->skb) {
+       if (unlikely(state->skb)) {
+               spin_lock_bh(&psock->ingress_lock);
                skb = state->skb;
                len = state->len;
                off = state->off;
                state->skb = NULL;
-               goto start;
+               spin_unlock_bh(&psock->ingress_lock);
        }
+       if (skb)
+               goto start;
 
        while ((skb = skb_dequeue(&psock->ingress_skb))) {
                len = skb->len;
@@ -615,9 +634,8 @@ start:
                                                          len, ingress);
                        if (ret <= 0) {
                                if (ret == -EAGAIN) {
-                                       state->skb = skb;
-                                       state->len = len;
-                                       state->off = off;
+                                       sk_psock_skb_state(psock, state, skb,
+                                                          len, off);
                                        goto end;
                                }
                                /* Hard errors break pipe and stop xmit. */
@@ -716,6 +734,11 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock)
                skb_bpf_redirect_clear(skb);
                sock_drop(psock->sk, skb);
        }
+       kfree_skb(psock->work_state.skb);
+       /* We null the skb here to ensure that calls to sk_psock_backlog
+        * do not pick up the free'd skb.
+        */
+       psock->work_state.skb = NULL;
        __sk_psock_purge_ingress_msg(psock);
 }
 
@@ -767,8 +790,6 @@ static void sk_psock_destroy(struct work_struct *work)
 
 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
 {
-       sk_psock_stop(psock, false);
-
        write_lock_bh(&sk->sk_callback_lock);
        sk_psock_restore_proto(sk, psock);
        rcu_assign_sk_user_data(sk, NULL);
@@ -778,6 +799,8 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
                sk_psock_stop_verdict(sk, psock);
        write_unlock_bh(&sk->sk_callback_lock);
 
+       sk_psock_stop(psock, false);
+
        INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
        queue_rcu_work(system_wq, &psock->rwork);
 }
index ba1c0f7..a3eea6e 100644 (file)
 #include <net/tcp.h>
 #include <net/busy_poll.h>
 
+#include <linux/ethtool.h>
+
 static DEFINE_MUTEX(proto_list_mutex);
 static LIST_HEAD(proto_list);
 
@@ -810,8 +812,47 @@ void sock_set_timestamp(struct sock *sk, int optname, bool valbool)
        }
 }
 
-int sock_set_timestamping(struct sock *sk, int optname, int val)
+static int sock_timestamping_bind_phc(struct sock *sk, int phc_index)
+{
+       struct net *net = sock_net(sk);
+       struct net_device *dev = NULL;
+       bool match = false;
+       int *vclock_index;
+       int i, num;
+
+       if (sk->sk_bound_dev_if)
+               dev = dev_get_by_index(net, sk->sk_bound_dev_if);
+
+       if (!dev) {
+               pr_err("%s: sock not bind to device\n", __func__);
+               return -EOPNOTSUPP;
+       }
+
+       num = ethtool_get_phc_vclocks(dev, &vclock_index);
+       for (i = 0; i < num; i++) {
+               if (*(vclock_index + i) == phc_index) {
+                       match = true;
+                       break;
+               }
+       }
+
+       if (num > 0)
+               kfree(vclock_index);
+
+       if (!match)
+               return -EINVAL;
+
+       sk->sk_bind_phc = phc_index;
+
+       return 0;
+}
+
+int sock_set_timestamping(struct sock *sk, int optname,
+                         struct so_timestamping timestamping)
 {
+       int val = timestamping.flags;
+       int ret;
+
        if (val & ~SOF_TIMESTAMPING_MASK)
                return -EINVAL;
 
@@ -832,6 +873,12 @@ int sock_set_timestamping(struct sock *sk, int optname, int val)
            !(val & SOF_TIMESTAMPING_OPT_TSONLY))
                return -EINVAL;
 
+       if (val & SOF_TIMESTAMPING_BIND_PHC) {
+               ret = sock_timestamping_bind_phc(sk, timestamping.bind_phc);
+               if (ret)
+                       return ret;
+       }
+
        sk->sk_tsflags = val;
        sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
 
@@ -907,6 +954,7 @@ EXPORT_SYMBOL(sock_set_mark);
 int sock_setsockopt(struct socket *sock, int level, int optname,
                    sockptr_t optval, unsigned int optlen)
 {
+       struct so_timestamping timestamping;
        struct sock_txtime sk_txtime;
        struct sock *sk = sock->sk;
        int val;
@@ -1068,12 +1116,22 @@ set_sndbuf:
        case SO_TIMESTAMP_NEW:
        case SO_TIMESTAMPNS_OLD:
        case SO_TIMESTAMPNS_NEW:
-               sock_set_timestamp(sk, valbool, optname);
+               sock_set_timestamp(sk, optname, valbool);
                break;
 
        case SO_TIMESTAMPING_NEW:
        case SO_TIMESTAMPING_OLD:
-               ret = sock_set_timestamping(sk, optname, val);
+               if (optlen == sizeof(timestamping)) {
+                       if (copy_from_sockptr(&timestamping, optval,
+                                             sizeof(timestamping))) {
+                               ret = -EFAULT;
+                               break;
+                       }
+               } else {
+                       memset(&timestamping, 0, sizeof(timestamping));
+                       timestamping.flags = val;
+               }
+               ret = sock_set_timestamping(sk, optname, timestamping);
                break;
 
        case SO_RCVLOWAT:
@@ -1201,7 +1259,7 @@ set_sndbuf:
                        if (val < 0)
                                ret = -EINVAL;
                        else
-                               sk->sk_ll_usec = val;
+                               WRITE_ONCE(sk->sk_ll_usec, val);
                }
                break;
        case SO_PREFER_BUSY_POLL:
@@ -1348,6 +1406,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                struct __kernel_old_timeval tm;
                struct  __kernel_sock_timeval stm;
                struct sock_txtime txtime;
+               struct so_timestamping timestamping;
        } v;
 
        int lv = sizeof(int);
@@ -1451,7 +1510,9 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                break;
 
        case SO_TIMESTAMPING_OLD:
-               v.val = sk->sk_tsflags;
+               lv = sizeof(v.timestamping);
+               v.timestamping.flags = sk->sk_tsflags;
+               v.timestamping.bind_phc = sk->sk_bind_phc;
                break;
 
        case SO_RCVTIMEO_OLD:
index 5dbd45d..dc92a67 100644 (file)
@@ -816,7 +816,7 @@ static int dn_auto_bind(struct socket *sock)
 static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
 {
        struct dn_scp *scp = DN_SK(sk);
-       DEFINE_WAIT(wait);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        int err;
 
        if (scp->state != DN_CR)
@@ -826,11 +826,11 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
        scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk));
        dn_send_conn_conf(sk, allocation);
 
-       prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+       add_wait_queue(sk_sleep(sk), &wait);
        for(;;) {
                release_sock(sk);
                if (scp->state == DN_CC)
-                       *timeo = schedule_timeout(*timeo);
+                       *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
                lock_sock(sk);
                err = 0;
                if (scp->state == DN_RUN)
@@ -844,9 +844,8 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
                err = -EAGAIN;
                if (!*timeo)
                        break;
-               prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
        }
-       finish_wait(sk_sleep(sk), &wait);
+       remove_wait_queue(sk_sleep(sk), &wait);
        if (err == 0) {
                sk->sk_socket->state = SS_CONNECTED;
        } else if (scp->state != DN_CC) {
@@ -858,7 +857,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
 static int dn_wait_run(struct sock *sk, long *timeo)
 {
        struct dn_scp *scp = DN_SK(sk);
-       DEFINE_WAIT(wait);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        int err = 0;
 
        if (scp->state == DN_RUN)
@@ -867,11 +866,11 @@ static int dn_wait_run(struct sock *sk, long *timeo)
        if (!*timeo)
                return -EALREADY;
 
-       prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+       add_wait_queue(sk_sleep(sk), &wait);
        for(;;) {
                release_sock(sk);
                if (scp->state == DN_CI || scp->state == DN_CC)
-                       *timeo = schedule_timeout(*timeo);
+                       *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
                lock_sock(sk);
                err = 0;
                if (scp->state == DN_RUN)
@@ -885,9 +884,8 @@ static int dn_wait_run(struct sock *sk, long *timeo)
                err = -ETIMEDOUT;
                if (!*timeo)
                        break;
-               prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
        }
-       finish_wait(sk_sleep(sk), &wait);
+       remove_wait_queue(sk_sleep(sk), &wait);
 out:
        if (err == 0) {
                sk->sk_socket->state = SS_CONNECTED;
@@ -1032,16 +1030,16 @@ static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
 
 static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
 {
-       DEFINE_WAIT(wait);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        struct sk_buff *skb = NULL;
        int err = 0;
 
-       prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+       add_wait_queue(sk_sleep(sk), &wait);
        for(;;) {
                release_sock(sk);
                skb = skb_dequeue(&sk->sk_receive_queue);
                if (skb == NULL) {
-                       *timeo = schedule_timeout(*timeo);
+                       *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
                        skb = skb_dequeue(&sk->sk_receive_queue);
                }
                lock_sock(sk);
@@ -1056,9 +1054,8 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
                err = -EAGAIN;
                if (!*timeo)
                        break;
-               prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
        }
-       finish_wait(sk_sleep(sk), &wait);
+       remove_wait_queue(sk_sleep(sk), &wait);
 
        return skb == NULL ? ERR_PTR(err) : skb;
 }
index ffbba1e..532085d 100644 (file)
@@ -1808,6 +1808,7 @@ void dsa_slave_setup_tagger(struct net_device *slave)
        struct dsa_slave_priv *p = netdev_priv(slave);
        const struct dsa_port *cpu_dp = dp->cpu_dp;
        struct net_device *master = cpu_dp->master;
+       const struct dsa_switch *ds = dp->ds;
 
        slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
        slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
@@ -1819,6 +1820,14 @@ void dsa_slave_setup_tagger(struct net_device *slave)
        slave->needed_tailroom += master->needed_tailroom;
 
        p->xmit = cpu_dp->tag_ops->xmit;
+
+       slave->features = master->vlan_features | NETIF_F_HW_TC;
+       if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
+               slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+       slave->hw_features |= NETIF_F_HW_TC;
+       slave->features |= NETIF_F_LLTX;
+       if (slave->needed_tailroom)
+               slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
 }
 
 static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
@@ -1881,11 +1890,6 @@ int dsa_slave_create(struct dsa_port *port)
        if (slave_dev == NULL)
                return -ENOMEM;
 
-       slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
-       if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
-               slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-       slave_dev->hw_features |= NETIF_F_HW_TC;
-       slave_dev->features |= NETIF_F_LLTX;
        slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
        if (!is_zero_ether_addr(port->mac))
                ether_addr_copy(slave_dev->dev_addr, port->mac);
index af71b86..5ece05d 100644 (file)
@@ -113,11 +113,11 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
        int err, port;
 
        if (dst->index == info->tree_index && ds->index == info->sw_index &&
-           ds->ops->port_bridge_join)
+           ds->ops->port_bridge_leave)
                ds->ops->port_bridge_leave(ds, info->port, info->br);
 
        if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
-           ds->ops->crosschip_bridge_join)
+           ds->ops->crosschip_bridge_leave)
                ds->ops->crosschip_bridge_leave(ds, info->tree_index,
                                                info->sw_index, info->port,
                                                info->br);
@@ -427,7 +427,7 @@ static int dsa_switch_lag_join(struct dsa_switch *ds,
                                                   info->port, info->lag,
                                                   info->info);
 
-       return 0;
+       return -EOPNOTSUPP;
 }
 
 static int dsa_switch_lag_leave(struct dsa_switch *ds,
@@ -440,7 +440,7 @@ static int dsa_switch_lag_leave(struct dsa_switch *ds,
                return ds->ops->crosschip_lag_leave(ds, info->sw_index,
                                                    info->port, info->lag);
 
-       return 0;
+       return -EOPNOTSUPP;
 }
 
 static int dsa_switch_mdb_add(struct dsa_switch *ds,
index 53565f4..a201ccf 100644 (file)
@@ -53,6 +53,9 @@ static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev)
        u8 *tag;
        u8 *addr;
 
+       if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
+               return NULL;
+
        /* Tag encoding */
        tag = skb_put(skb, KSZ_INGRESS_TAG_LEN);
        addr = skb_mac_header(skb);
@@ -114,6 +117,9 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
        u8 *addr;
        u16 val;
 
+       if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
+               return NULL;
+
        /* Tag encoding */
        tag = skb_put(skb, KSZ9477_INGRESS_TAG_LEN);
        addr = skb_mac_header(skb);
@@ -164,6 +170,9 @@ static struct sk_buff *ksz9893_xmit(struct sk_buff *skb,
        u8 *addr;
        u8 *tag;
 
+       if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
+               return NULL;
+
        /* Tag encoding */
        tag = skb_put(skb, KSZ_INGRESS_TAG_LEN);
        addr = skb_mac_header(skb);
index 723c9a8..0a19470 100644 (file)
@@ -7,4 +7,4 @@ obj-$(CONFIG_ETHTOOL_NETLINK)   += ethtool_nl.o
 ethtool_nl-y   := netlink.o bitset.o strset.o linkinfo.o linkmodes.o \
                   linkstate.o debug.o wol.o features.o privflags.o rings.o \
                   channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \
-                  tunnels.o fec.o eeprom.o stats.o
+                  tunnels.o fec.o eeprom.o stats.o phc_vclocks.o
index f9dcbad..c63e073 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/net_tstamp.h>
 #include <linux/phy.h>
 #include <linux/rtnetlink.h>
+#include <linux/ptp_clock_kernel.h>
 
 #include "common.h"
 
@@ -397,6 +398,7 @@ const char sof_timestamping_names[][ETH_GSTRING_LEN] = {
        [const_ilog2(SOF_TIMESTAMPING_OPT_STATS)]    = "option-stats",
        [const_ilog2(SOF_TIMESTAMPING_OPT_PKTINFO)]  = "option-pktinfo",
        [const_ilog2(SOF_TIMESTAMPING_OPT_TX_SWHW)]  = "option-tx-swhw",
+       [const_ilog2(SOF_TIMESTAMPING_BIND_PHC)]     = "bind-phc",
 };
 static_assert(ARRAY_SIZE(sof_timestamping_names) == __SOF_TIMESTAMPING_CNT);
 
@@ -554,6 +556,18 @@ int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
        return 0;
 }
 
+int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index)
+{
+       struct ethtool_ts_info info = { };
+       int num = 0;
+
+       if (!__ethtool_get_ts_info(dev, &info))
+               num = ptp_get_vclocks_index(info.phc_index, vclock_index);
+
+       return num;
+}
+EXPORT_SYMBOL(ethtool_get_phc_vclocks);
+
 const struct ethtool_phy_ops *ethtool_phy_ops;
 
 void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops)
index a734634..73e0f5b 100644 (file)
@@ -248,6 +248,7 @@ ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = {
        [ETHTOOL_MSG_TSINFO_GET]        = &ethnl_tsinfo_request_ops,
        [ETHTOOL_MSG_MODULE_EEPROM_GET] = &ethnl_module_eeprom_request_ops,
        [ETHTOOL_MSG_STATS_GET]         = &ethnl_stats_request_ops,
+       [ETHTOOL_MSG_PHC_VCLOCKS_GET]   = &ethnl_phc_vclocks_request_ops,
 };
 
 static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb)
@@ -958,6 +959,15 @@ static const struct genl_ops ethtool_genl_ops[] = {
                .policy = ethnl_stats_get_policy,
                .maxattr = ARRAY_SIZE(ethnl_stats_get_policy) - 1,
        },
+       {
+               .cmd    = ETHTOOL_MSG_PHC_VCLOCKS_GET,
+               .doit   = ethnl_default_doit,
+               .start  = ethnl_default_start,
+               .dumpit = ethnl_default_dumpit,
+               .done   = ethnl_default_done,
+               .policy = ethnl_phc_vclocks_get_policy,
+               .maxattr = ARRAY_SIZE(ethnl_phc_vclocks_get_policy) - 1,
+       },
 };
 
 static const struct genl_multicast_group ethtool_nl_mcgrps[] = {
index 3e25a47..3fc395c 100644 (file)
@@ -347,6 +347,7 @@ extern const struct ethnl_request_ops ethnl_tsinfo_request_ops;
 extern const struct ethnl_request_ops ethnl_fec_request_ops;
 extern const struct ethnl_request_ops ethnl_module_eeprom_request_ops;
 extern const struct ethnl_request_ops ethnl_stats_request_ops;
+extern const struct ethnl_request_ops ethnl_phc_vclocks_request_ops;
 
 extern const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_FLAGS + 1];
 extern const struct nla_policy ethnl_header_policy_stats[ETHTOOL_A_HEADER_FLAGS + 1];
@@ -382,6 +383,7 @@ extern const struct nla_policy ethnl_fec_get_policy[ETHTOOL_A_FEC_HEADER + 1];
 extern const struct nla_policy ethnl_fec_set_policy[ETHTOOL_A_FEC_AUTO + 1];
 extern const struct nla_policy ethnl_module_eeprom_get_policy[ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS + 1];
 extern const struct nla_policy ethnl_stats_get_policy[ETHTOOL_A_STATS_GROUPS + 1];
+extern const struct nla_policy ethnl_phc_vclocks_get_policy[ETHTOOL_A_PHC_VCLOCKS_HEADER + 1];
 
 int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info);
 int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/ethtool/phc_vclocks.c b/net/ethtool/phc_vclocks.c
new file mode 100644 (file)
index 0000000..637b2f5
--- /dev/null
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 NXP
+ */
+#include "netlink.h"
+#include "common.h"
+
+struct phc_vclocks_req_info {
+       struct ethnl_req_info           base;
+};
+
+struct phc_vclocks_reply_data {
+       struct ethnl_reply_data         base;
+       int                             num;
+       int                             *index;
+};
+
+#define PHC_VCLOCKS_REPDATA(__reply_base) \
+       container_of(__reply_base, struct phc_vclocks_reply_data, base)
+
+const struct nla_policy ethnl_phc_vclocks_get_policy[] = {
+       [ETHTOOL_A_PHC_VCLOCKS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+};
+
+static int phc_vclocks_prepare_data(const struct ethnl_req_info *req_base,
+                                   struct ethnl_reply_data *reply_base,
+                                   struct genl_info *info)
+{
+       struct phc_vclocks_reply_data *data = PHC_VCLOCKS_REPDATA(reply_base);
+       struct net_device *dev = reply_base->dev;
+       int ret;
+
+       ret = ethnl_ops_begin(dev);
+       if (ret < 0)
+               return ret;
+       data->num = ethtool_get_phc_vclocks(dev, &data->index);
+       ethnl_ops_complete(dev);
+
+       return ret;
+}
+
+static int phc_vclocks_reply_size(const struct ethnl_req_info *req_base,
+                                 const struct ethnl_reply_data *reply_base)
+{
+       const struct phc_vclocks_reply_data *data =
+               PHC_VCLOCKS_REPDATA(reply_base);
+       int len = 0;
+
+       if (data->num > 0) {
+               len += nla_total_size(sizeof(u32));
+               len += nla_total_size(sizeof(s32) * data->num);
+       }
+
+       return len;
+}
+
+static int phc_vclocks_fill_reply(struct sk_buff *skb,
+                                 const struct ethnl_req_info *req_base,
+                                 const struct ethnl_reply_data *reply_base)
+{
+       const struct phc_vclocks_reply_data *data =
+               PHC_VCLOCKS_REPDATA(reply_base);
+
+       if (data->num <= 0)
+               return 0;
+
+       if (nla_put_u32(skb, ETHTOOL_A_PHC_VCLOCKS_NUM, data->num) ||
+           nla_put(skb, ETHTOOL_A_PHC_VCLOCKS_INDEX,
+                   sizeof(s32) * data->num, data->index))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
+static void phc_vclocks_cleanup_data(struct ethnl_reply_data *reply_base)
+{
+       const struct phc_vclocks_reply_data *data =
+               PHC_VCLOCKS_REPDATA(reply_base);
+
+       kfree(data->index);
+}
+
+const struct ethnl_request_ops ethnl_phc_vclocks_request_ops = {
+       .request_cmd            = ETHTOOL_MSG_PHC_VCLOCKS_GET,
+       .reply_cmd              = ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY,
+       .hdr_attr               = ETHTOOL_A_PHC_VCLOCKS_HEADER,
+       .req_info_size          = sizeof(struct phc_vclocks_req_info),
+       .reply_data_size        = sizeof(struct phc_vclocks_reply_data),
+
+       .prepare_data           = phc_vclocks_prepare_data,
+       .reply_size             = phc_vclocks_reply_size,
+       .fill_reply             = phc_vclocks_fill_reply,
+       .cleanup_data           = phc_vclocks_cleanup_data,
+};
index a933bd6..9fe13e4 100644 (file)
@@ -1376,7 +1376,7 @@ static void nl_fib_input(struct sk_buff *skb)
        portid = NETLINK_CB(skb).portid;      /* netlink portid */
        NETLINK_CB(skb).portid = 0;        /* from kernel */
        NETLINK_CB(skb).dst_group = 0;  /* unicast */
-       netlink_unicast(net->ipv4.fibnl, skb, portid, MSG_DONTWAIT);
+       nlmsg_unicast(net->ipv4.fibnl, skb, portid);
 }
 
 static int __net_init nl_fib_lookup_init(struct net *net)
index e65f4ef..ef78972 100644 (file)
@@ -580,10 +580,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
                nlmsg_free(rep);
                goto out;
        }
-       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
-                             MSG_DONTWAIT);
-       if (err > 0)
-               err = 0;
+       err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
 
 out:
        if (sk)
index f6cc26d..be75b40 100644 (file)
@@ -317,7 +317,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
        }
 
        dev->needed_headroom = t_hlen + hlen;
-       mtu -= t_hlen;
+       mtu -= t_hlen + (dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0);
 
        if (mtu < IPV4_MIN_MTU)
                mtu = IPV4_MIN_MTU;
@@ -348,6 +348,9 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
        t_hlen = nt->hlen + sizeof(struct iphdr);
        dev->min_mtu = ETH_MIN_MTU;
        dev->max_mtu = IP_MAX_MTU - t_hlen;
+       if (dev->type == ARPHRD_ETHER)
+               dev->max_mtu -= dev->hard_header_len;
+
        ip_tunnel_add(itn, nt);
        return nt;
 
@@ -387,7 +390,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
                tunnel->i_seqno = ntohl(tpi->seq) + 1;
        }
 
-       skb_reset_network_header(skb);
+       skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
 
        err = IP_ECN_decapsulate(iph, skb);
        if (unlikely(err)) {
@@ -489,11 +492,14 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
 
        tunnel_hlen = md ? tunnel_hlen : tunnel->hlen;
        pkt_size = skb->len - tunnel_hlen;
+       pkt_size -= dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0;
 
-       if (df)
+       if (df) {
                mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen);
-       else
+               mtu -= dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0;
+       } else {
                mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
+       }
 
        if (skb_valid_dst(skb))
                skb_dst_update_pmtu_no_confirm(skb, mtu);
@@ -972,6 +978,9 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
        int t_hlen = tunnel->hlen + sizeof(struct iphdr);
        int max_mtu = IP_MAX_MTU - t_hlen;
 
+       if (dev->type == ARPHRD_ETHER)
+               max_mtu -= dev->hard_header_len;
+
        if (new_mtu < ETH_MIN_MTU)
                return -EINVAL;
 
@@ -1149,6 +1158,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
        if (tb[IFLA_MTU]) {
                unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr));
 
+               if (dev->type == ARPHRD_ETHER)
+                       max -= dev->hard_header_len;
+
                mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max);
        }
 
index 7b12a40..2dda856 100644 (file)
@@ -2119,7 +2119,7 @@ int ip_mr_input(struct sk_buff *skb)
                                raw_rcv(mroute_sk, skb);
                                return 0;
                        }
-                   }
+               }
        }
 
        /* already under rcu_read_lock() */
index 1b5b8af..ccacbde 100644 (file)
@@ -119,11 +119,8 @@ static int raw_diag_dump_one(struct netlink_callback *cb,
                return err;
        }
 
-       err = netlink_unicast(net->diag_nlsk, rep,
-                             NETLINK_CB(in_skb).portid,
-                             MSG_DONTWAIT);
-       if (err > 0)
-               err = 0;
+       err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+
        return err;
 }
 
index d5ab5f2..8cb4404 100644 (file)
@@ -1375,6 +1375,9 @@ new_segment:
                        }
                        pfrag->offset += copy;
                } else {
+                       if (!sk_wmem_schedule(sk, copy))
+                               goto wait_for_space;
+
                        err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
                        if (err == -EMSGSIZE || err == -EEXIST) {
                                tcp_mark_push(tp, skb);
index f26916a..d3e9386 100644 (file)
@@ -503,7 +503,7 @@ static int __init tcp_bpf_v4_build_proto(void)
        tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot);
        return 0;
 }
-core_initcall(tcp_bpf_v4_build_proto);
+late_initcall(tcp_bpf_v4_build_proto);
 
 static int tcp_bpf_assert_proto_ops(struct proto *ops)
 {
index 47c3260..25fa4c0 100644 (file)
@@ -507,8 +507,18 @@ void tcp_fastopen_active_disable(struct sock *sk)
 {
        struct net *net = sock_net(sk);
 
+       if (!sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout)
+               return;
+
+       /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
+       WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies);
+
+       /* Paired with smp_rmb() in tcp_fastopen_active_should_disable().
+        * We want net->ipv4.tfo_active_disable_stamp to be updated first.
+        */
+       smp_mb__before_atomic();
        atomic_inc(&net->ipv4.tfo_active_disable_times);
-       net->ipv4.tfo_active_disable_stamp = jiffies;
+
        NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
 }
 
@@ -519,17 +529,27 @@ void tcp_fastopen_active_disable(struct sock *sk)
 bool tcp_fastopen_active_should_disable(struct sock *sk)
 {
        unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout;
-       int tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
        unsigned long timeout;
+       int tfo_da_times;
        int multiplier;
 
+       if (!tfo_bh_timeout)
+               return false;
+
+       tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
        if (!tfo_da_times)
                return false;
 
+       /* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */
+       smp_rmb();
+
        /* Limit timeout to max: 2^6 * initial timeout */
        multiplier = 1 << min(tfo_da_times - 1, 6);
-       timeout = multiplier * tfo_bh_timeout * HZ;
-       if (time_before(jiffies, sock_net(sk)->ipv4.tfo_active_disable_stamp + timeout))
+
+       /* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */
+       timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) +
+                 multiplier * tfo_bh_timeout * HZ;
+       if (time_before(jiffies, timeout))
                return true;
 
        /* Mark check bit so we can check for successful active TFO
index e6ca5a1..149ceb5 100644 (file)
@@ -4247,6 +4247,9 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb)
 {
        trace_tcp_receive_reset(sk);
 
+       /* mptcp can't tell us to ignore reset pkts,
+        * so just ignore the return value of mptcp_incoming_options().
+        */
        if (sk_is_mptcp(sk))
                mptcp_incoming_options(sk, skb);
 
@@ -4941,8 +4944,13 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
        bool fragstolen;
        int eaten;
 
-       if (sk_is_mptcp(sk))
-               mptcp_incoming_options(sk, skb);
+       /* If a subflow has been reset, the packet should not continue
+        * to be processed, drop the packet.
+        */
+       if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb)) {
+               __kfree_skb(skb);
+               return;
+       }
 
        if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
                __kfree_skb(skb);
@@ -5922,8 +5930,8 @@ void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb)
                tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
        tp->snd_cwnd_stamp = tcp_jiffies32;
 
-       icsk->icsk_ca_initialized = 0;
        bpf_skops_established(sk, bpf_op, skb);
+       /* Initialize congestion control unless BPF initialized it already: */
        if (!icsk->icsk_ca_initialized)
                tcp_init_congestion_control(sk);
        tcp_init_buffer_space(sk);
@@ -6523,8 +6531,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
        case TCP_CLOSING:
        case TCP_LAST_ACK:
                if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
-                       if (sk_is_mptcp(sk))
-                               mptcp_incoming_options(sk, skb);
+                       /* If a subflow has been reset, the packet should not
+                        * continue to be processed, drop the packet.
+                        */
+                       if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb))
+                               goto discard;
                        break;
                }
                fallthrough;
index e66ad6b..a692626 100644 (file)
@@ -342,7 +342,7 @@ void tcp_v4_mtu_reduced(struct sock *sk)
 
        if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
                return;
-       mtu = tcp_sk(sk)->mtu_info;
+       mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
        dst = inet_csk_update_pmtu(sk, mtu);
        if (!dst)
                return;
@@ -546,7 +546,7 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
                        if (sk->sk_state == TCP_LISTEN)
                                goto out;
 
-                       tp->mtu_info = info;
+                       WRITE_ONCE(tp->mtu_info, info);
                        if (!sock_owned_by_user(sk)) {
                                tcp_v4_mtu_reduced(sk);
                        } else {
@@ -2965,7 +2965,7 @@ static int __net_init tcp_sk_init(struct net *net)
        net->ipv4.sysctl_tcp_comp_sack_nr = 44;
        net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
        spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
-       net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
+       net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0;
        atomic_set(&net->ipv4.tfo_active_disable_times, 0);
 
        /* Reno is always built in */
index bde781f..29553fc 100644 (file)
@@ -1732,6 +1732,7 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu)
        return __tcp_mtu_to_mss(sk, pmtu) -
               (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
 }
+EXPORT_SYMBOL(tcp_mtu_to_mss);
 
 /* Inverse of above */
 int tcp_mss_to_mtu(struct sock *sk, int mss)
index 6268280..1a742b7 100644 (file)
@@ -645,10 +645,12 @@ static struct sock *__udp4_lib_err_encap(struct net *net,
                                         const struct iphdr *iph,
                                         struct udphdr *uh,
                                         struct udp_table *udptable,
+                                        struct sock *sk,
                                         struct sk_buff *skb, u32 info)
 {
+       int (*lookup)(struct sock *sk, struct sk_buff *skb);
        int network_offset, transport_offset;
-       struct sock *sk;
+       struct udp_sock *up;
 
        network_offset = skb_network_offset(skb);
        transport_offset = skb_transport_offset(skb);
@@ -659,18 +661,28 @@ static struct sock *__udp4_lib_err_encap(struct net *net,
        /* Transport header needs to point to the UDP header */
        skb_set_transport_header(skb, iph->ihl << 2);
 
+       if (sk) {
+               up = udp_sk(sk);
+
+               lookup = READ_ONCE(up->encap_err_lookup);
+               if (lookup && lookup(sk, skb))
+                       sk = NULL;
+
+               goto out;
+       }
+
        sk = __udp4_lib_lookup(net, iph->daddr, uh->source,
                               iph->saddr, uh->dest, skb->dev->ifindex, 0,
                               udptable, NULL);
        if (sk) {
-               int (*lookup)(struct sock *sk, struct sk_buff *skb);
-               struct udp_sock *up = udp_sk(sk);
+               up = udp_sk(sk);
 
                lookup = READ_ONCE(up->encap_err_lookup);
                if (!lookup || lookup(sk, skb))
                        sk = NULL;
        }
 
+out:
        if (!sk)
                sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info));
 
@@ -707,15 +719,16 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
        sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
                               iph->saddr, uh->source, skb->dev->ifindex,
                               inet_sdif(skb), udptable, NULL);
+
        if (!sk || udp_sk(sk)->encap_type) {
                /* No socket for error: try tunnels before discarding */
-               sk = ERR_PTR(-ENOENT);
                if (static_branch_unlikely(&udp_encap_needed_key)) {
-                       sk = __udp4_lib_err_encap(net, iph, uh, udptable, skb,
+                       sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb,
                                                  info);
                        if (!sk)
                                return 0;
-               }
+               } else
+                       sk = ERR_PTR(-ENOENT);
 
                if (IS_ERR(sk)) {
                        __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
@@ -1102,7 +1115,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        }
 
        ipcm_init_sk(&ipc, inet);
-       ipc.gso_size = up->gso_size;
+       ipc.gso_size = READ_ONCE(up->gso_size);
 
        if (msg->msg_controllen) {
                err = udp_cmsg_send(sk, msg, &ipc.gso_size);
@@ -2695,7 +2708,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
        case UDP_SEGMENT:
                if (val < 0 || val > USHRT_MAX)
                        return -EINVAL;
-               up->gso_size = val;
+               WRITE_ONCE(up->gso_size, val);
                break;
 
        case UDP_GRO:
@@ -2790,7 +2803,7 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
                break;
 
        case UDP_SEGMENT:
-               val = up->gso_size;
+               val = READ_ONCE(up->gso_size);
                break;
 
        case UDP_GRO:
index 45b8782..9f5a5cd 100644 (file)
@@ -134,7 +134,7 @@ static int __init udp_bpf_v4_build_proto(void)
        udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV4], &udp_prot);
        return 0;
 }
-core_initcall(udp_bpf_v4_build_proto);
+late_initcall(udp_bpf_v4_build_proto);
 
 int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
 {
index b2cee9a..1ed8c4d 100644 (file)
@@ -77,10 +77,8 @@ static int udp_dump_one(struct udp_table *tbl,
                kfree_skb(rep);
                goto out;
        }
-       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
-                             MSG_DONTWAIT);
-       if (err > 0)
-               err = 0;
+       err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+
 out:
        if (sk)
                sock_put(sk);
index 54e06b8..9dde1e5 100644 (file)
@@ -525,8 +525,10 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
 
                if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
                    (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist)
-                       pp = call_gro_receive(udp_gro_receive_segment, head, skb);
-               return pp;
+                       return call_gro_receive(udp_gro_receive_segment, head, skb);
+
+               /* no GRO, be sure flush the current packet */
+               goto out;
        }
 
        if (NAPI_GRO_CB(skb)->encap_mark ||
index 984050f..8e6ca9a 100644 (file)
@@ -60,10 +60,38 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
 {
        struct dst_entry *dst = skb_dst(skb);
        struct net_device *dev = dst->dev;
+       unsigned int hh_len = LL_RESERVED_SPACE(dev);
+       int delta = hh_len - skb_headroom(skb);
        const struct in6_addr *nexthop;
        struct neighbour *neigh;
        int ret;
 
+       /* Be paranoid, rather than too clever. */
+       if (unlikely(delta > 0) && dev->header_ops) {
+               /* pskb_expand_head() might crash, if skb is shared */
+               if (skb_shared(skb)) {
+                       struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+                       if (likely(nskb)) {
+                               if (skb->sk)
+                                       skb_set_owner_w(nskb, skb->sk);
+                               consume_skb(skb);
+                       } else {
+                               kfree_skb(skb);
+                       }
+                       skb = nskb;
+               }
+               if (skb &&
+                   pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
+                       kfree_skb(skb);
+                       skb = NULL;
+               }
+               if (!skb) {
+                       IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
+                       return -ENOMEM;
+               }
+       }
+
        if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
                struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 
@@ -479,7 +507,9 @@ int ip6_forward(struct sk_buff *skb)
        if (skb_warn_if_lro(skb))
                goto drop;
 
-       if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
+       if (!net->ipv6.devconf_all->disable_policy &&
+           !idev->cnf.disable_policy &&
+           !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
                __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
                goto drop;
        }
@@ -519,9 +549,10 @@ int ip6_forward(struct sk_buff *skb)
        if (net->ipv6.devconf_all->proxy_ndp &&
            pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
                int proxied = ip6_forward_proxy_check(skb);
-               if (proxied > 0)
+               if (proxied > 0) {
+                       hdr->hop_limit--;
                        return ip6_input(skb);
-               else if (proxied < 0) {
+               else if (proxied < 0) {
                        __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
                        goto drop;
                }
index 7b756a7..b6ddf23 100644 (file)
@@ -3769,7 +3769,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
                err = PTR_ERR(rt->fib6_metrics);
                /* Do not leave garbage there. */
                rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
-               goto out;
+               goto out_free;
        }
 
        if (cfg->fc_flags & RTF_ADDRCONF)
index 578ab63..0ce52d4 100644 (file)
@@ -348,11 +348,20 @@ failure:
 static void tcp_v6_mtu_reduced(struct sock *sk)
 {
        struct dst_entry *dst;
+       u32 mtu;
 
        if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
                return;
 
-       dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
+       mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
+
+       /* Drop requests trying to increase our current mss.
+        * Check done in __ip6_rt_update_pmtu() is too late.
+        */
+       if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
+               return;
+
+       dst = inet6_csk_update_pmtu(sk, mtu);
        if (!dst)
                return;
 
@@ -433,6 +442,8 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        }
 
        if (type == ICMPV6_PKT_TOOBIG) {
+               u32 mtu = ntohl(info);
+
                /* We are not interested in TCP_LISTEN and open_requests
                 * (SYN-ACKs send out by Linux are always <576bytes so
                 * they should go through unfragmented).
@@ -443,7 +454,11 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                if (!ip6_sk_accept_pmtu(sk))
                        goto out;
 
-               tp->mtu_info = ntohl(info);
+               if (mtu < IPV6_MIN_MTU)
+                       goto out;
+
+               WRITE_ONCE(tp->mtu_info, mtu);
+
                if (!sock_owned_by_user(sk))
                        tcp_v6_mtu_reduced(sk);
                else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
@@ -540,7 +555,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
                opt = ireq->ipv6_opt;
                if (!opt)
                        opt = rcu_dereference(np->opt);
-               err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt,
+               err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt,
                               tclass, sk->sk_priority);
                rcu_read_unlock();
                err = net_xmit_eval(err);
index 368972d..c5e15e9 100644 (file)
@@ -502,12 +502,14 @@ static struct sock *__udp6_lib_err_encap(struct net *net,
                                         const struct ipv6hdr *hdr, int offset,
                                         struct udphdr *uh,
                                         struct udp_table *udptable,
+                                        struct sock *sk,
                                         struct sk_buff *skb,
                                         struct inet6_skb_parm *opt,
                                         u8 type, u8 code, __be32 info)
 {
+       int (*lookup)(struct sock *sk, struct sk_buff *skb);
        int network_offset, transport_offset;
-       struct sock *sk;
+       struct udp_sock *up;
 
        network_offset = skb_network_offset(skb);
        transport_offset = skb_transport_offset(skb);
@@ -518,18 +520,28 @@ static struct sock *__udp6_lib_err_encap(struct net *net,
        /* Transport header needs to point to the UDP header */
        skb_set_transport_header(skb, offset);
 
+       if (sk) {
+               up = udp_sk(sk);
+
+               lookup = READ_ONCE(up->encap_err_lookup);
+               if (lookup && lookup(sk, skb))
+                       sk = NULL;
+
+               goto out;
+       }
+
        sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
                               &hdr->saddr, uh->dest,
                               inet6_iif(skb), 0, udptable, skb);
        if (sk) {
-               int (*lookup)(struct sock *sk, struct sk_buff *skb);
-               struct udp_sock *up = udp_sk(sk);
+               up = udp_sk(sk);
 
                lookup = READ_ONCE(up->encap_err_lookup);
                if (!lookup || lookup(sk, skb))
                        sk = NULL;
        }
 
+out:
        if (!sk) {
                sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
                                                        offset, info));
@@ -558,16 +570,17 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
                               inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
+
        if (!sk || udp_sk(sk)->encap_type) {
                /* No socket for error: try tunnels before discarding */
-               sk = ERR_PTR(-ENOENT);
                if (static_branch_unlikely(&udpv6_encap_needed_key)) {
                        sk = __udp6_lib_err_encap(net, hdr, offset, uh,
-                                                 udptable, skb,
+                                                 udptable, sk, skb,
                                                  opt, type, code, info);
                        if (!sk)
                                return 0;
-               }
+               } else
+                       sk = ERR_PTR(-ENOENT);
 
                if (IS_ERR(sk)) {
                        __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
@@ -1296,7 +1309,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
 
        ipcm6_init(&ipc6);
-       ipc6.gso_size = up->gso_size;
+       ipc6.gso_size = READ_ONCE(up->gso_size);
        ipc6.sockc.tsflags = sk->sk_tsflags;
        ipc6.sockc.mark = sk->sk_mark;
 
index 57fa27c..d0d2800 100644 (file)
@@ -49,7 +49,7 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct xfrm_state *x = dst->xfrm;
-       int mtu;
+       unsigned int mtu;
        bool toobig;
 
 #ifdef CONFIG_NETFILTER
index 349c6ac..e6795d5 100644 (file)
@@ -1635,14 +1635,16 @@ struct iucv_message_pending {
        u8  iptype;
        u32 ipmsgid;
        u32 iptrgcls;
-       union {
-               u32 iprmmsg1_u32;
-               u8  iprmmsg1[4];
-       } ln1msg1;
-       union {
-               u32 ipbfln1f;
-               u8  iprmmsg2[4];
-       } ln1msg2;
+       struct {
+               union {
+                       u32 iprmmsg1_u32;
+                       u8  iprmmsg1[4];
+               } ln1msg1;
+               union {
+                       u32 ipbfln1f;
+                       u8  iprmmsg2[4];
+               } ln1msg2;
+       } rmmsg;
        u32 res1[3];
        u32 ipbfln2f;
        u8  ippollfg;
@@ -1660,10 +1662,10 @@ static void iucv_message_pending(struct iucv_irq_data *data)
                msg.id = imp->ipmsgid;
                msg.class = imp->iptrgcls;
                if (imp->ipflags1 & IUCV_IPRMDATA) {
-                       memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8);
+                       memcpy(msg.rmmsg, &imp->rmmsg, 8);
                        msg.length = 8;
                } else
-                       msg.length = imp->ln1msg2.ipbfln1f;
+                       msg.length = imp->rmmsg.ln1msg2.ipbfln1f;
                msg.reply_size = imp->ipbfln2f;
                path->handler->message_pending(path, &msg);
        }
index 7180979..ac5cadd 100644 (file)
@@ -98,8 +98,16 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr)
 {
        u8 rc = LLC_PDU_LEN_U;
 
-       if (addr->sllc_test || addr->sllc_xid)
+       if (addr->sllc_test)
                rc = LLC_PDU_LEN_U;
+       else if (addr->sllc_xid)
+               /* We need to expand header to sizeof(struct llc_xid_info)
+                * since llc_pdu_init_as_xid_cmd() sets 4,5,6 bytes of LLC header
+                * as XID PDU. In llc_ui_sendmsg() we reserved header size and then
+                * filled all other space with user data. If we won't reserve this
+                * bytes, llc_pdu_init_as_xid_cmd() will overwrite user data
+                */
+               rc = LLC_PDU_LEN_U_XID;
        else if (sk->sk_type == SOCK_STREAM)
                rc = LLC_PDU_LEN_I;
        return rc;
index b554f26..79d1cef 100644 (file)
@@ -79,7 +79,7 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
        struct llc_sap_state_ev *ev = llc_sap_ev(skb);
        int rc;
 
-       llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
+       llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap,
                            ev->daddr.lsap, LLC_PDU_CMD);
        llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
        rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
index 84cc773..4e6f11e 100644 (file)
@@ -152,6 +152,8 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
                                  struct vif_params *params)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
+       struct sta_info *sta;
        int ret;
 
        ret = ieee80211_if_change_type(sdata, type);
@@ -162,7 +164,24 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
                RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
                ieee80211_check_fast_rx_iface(sdata);
        } else if (type == NL80211_IFTYPE_STATION && params->use_4addr >= 0) {
+               struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+
+               if (params->use_4addr == ifmgd->use_4addr)
+                       return 0;
+
                sdata->u.mgd.use_4addr = params->use_4addr;
+               if (!ifmgd->associated)
+                       return 0;
+
+               mutex_lock(&local->sta_mtx);
+               sta = sta_info_get(sdata, ifmgd->bssid);
+               if (sta)
+                       drv_sta_set_4addr(local, sdata, &sta->sta,
+                                         params->use_4addr);
+               mutex_unlock(&local->sta_mtx);
+
+               if (params->use_4addr)
+                       ieee80211_send_4addr_nullfunc(local, sdata);
        }
 
        if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
index 22549b9..30ce6d2 100644 (file)
@@ -2201,6 +2201,8 @@ void ieee80211_dynamic_ps_timer(struct timer_list *t);
 void ieee80211_send_nullfunc(struct ieee80211_local *local,
                             struct ieee80211_sub_if_data *sdata,
                             bool powersave);
+void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
+                                  struct ieee80211_sub_if_data *sdata);
 void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
                             struct ieee80211_hdr *hdr, bool ack, u16 tx_time);
 
index a00f11a..c0ea3b1 100644 (file)
@@ -1095,8 +1095,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
        ieee80211_tx_skb(sdata, skb);
 }
 
-static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
-                                         struct ieee80211_sub_if_data *sdata)
+void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
+                                  struct ieee80211_sub_if_data *sdata)
 {
        struct sk_buff *skb;
        struct ieee80211_hdr *nullfunc;
index 771921c..2563473 100644 (file)
@@ -730,7 +730,8 @@ ieee80211_make_monitor_skb(struct ieee80211_local *local,
                 * Need to make a copy and possibly remove radiotap header
                 * and FCS from the original.
                 */
-               skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC);
+               skb = skb_copy_expand(*origskb, needed_headroom + NET_SKB_PAD,
+                                     0, GFP_ATOMIC);
 
                if (!skb)
                        return NULL;
index e969811..8509778 100644 (file)
@@ -1147,6 +1147,29 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
        return queued;
 }
 
+static void
+ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata,
+                    struct sta_info *sta,
+                    struct sk_buff *skb)
+{
+       struct rate_control_ref *ref = sdata->local->rate_ctrl;
+       u16 tid;
+
+       if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER))
+               return;
+
+       if (!sta || !sta->sta.ht_cap.ht_supported ||
+           !sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO ||
+           skb->protocol == sdata->control_port_protocol)
+               return;
+
+       tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+       if (likely(sta->ampdu_mlme.tid_tx[tid]))
+               return;
+
+       ieee80211_start_tx_ba_session(&sta->sta, tid, 0);
+}
+
 /*
  * initialises @tx
  * pass %NULL for the station if unknown, a valid pointer if known
@@ -1160,6 +1183,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_hdr *hdr;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       bool aggr_check = false;
        int tid;
 
        memset(tx, 0, sizeof(*tx));
@@ -1188,8 +1212,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
                } else if (tx->sdata->control_port_protocol == tx->skb->protocol) {
                        tx->sta = sta_info_get_bss(sdata, hdr->addr1);
                }
-               if (!tx->sta && !is_multicast_ether_addr(hdr->addr1))
+               if (!tx->sta && !is_multicast_ether_addr(hdr->addr1)) {
                        tx->sta = sta_info_get(sdata, hdr->addr1);
+                       aggr_check = true;
+               }
        }
 
        if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
@@ -1199,8 +1225,12 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
                struct tid_ampdu_tx *tid_tx;
 
                tid = ieee80211_get_tid(hdr);
-
                tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
+               if (!tid_tx && aggr_check) {
+                       ieee80211_aggr_check(sdata, tx->sta, skb);
+                       tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
+               }
+
                if (tid_tx) {
                        bool queued;
 
@@ -4120,29 +4150,6 @@ void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
 }
 EXPORT_SYMBOL(ieee80211_txq_schedule_start);
 
-static void
-ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata,
-                    struct sta_info *sta,
-                    struct sk_buff *skb)
-{
-       struct rate_control_ref *ref = sdata->local->rate_ctrl;
-       u16 tid;
-
-       if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER))
-               return;
-
-       if (!sta || !sta->sta.ht_cap.ht_supported ||
-           !sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO ||
-           skb->protocol == sdata->control_port_protocol)
-               return;
-
-       tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
-       if (likely(sta->ampdu_mlme.tid_tx[tid]))
-               return;
-
-       ieee80211_start_tx_ba_session(&sta->sta, tid, 0);
-}
-
 void __ieee80211_subif_start_xmit(struct sk_buff *skb,
                                  struct net_device *dev,
                                  u32 info_flags,
index 52ea251..ff2cc0e 100644 (file)
@@ -44,6 +44,7 @@ static const struct snmp_mib mptcp_snmp_list[] = {
        SNMP_MIB_ITEM("RmSubflow", MPTCP_MIB_RMSUBFLOW),
        SNMP_MIB_ITEM("MPPrioTx", MPTCP_MIB_MPPRIOTX),
        SNMP_MIB_ITEM("MPPrioRx", MPTCP_MIB_MPPRIORX),
+       SNMP_MIB_ITEM("RcvPruned", MPTCP_MIB_RCVPRUNED),
        SNMP_MIB_SENTINEL
 };
 
index 193466c..0663cb1 100644 (file)
@@ -37,6 +37,7 @@ enum linux_mptcp_mib_field {
        MPTCP_MIB_RMSUBFLOW,            /* Remove a subflow */
        MPTCP_MIB_MPPRIOTX,             /* Transmit a MP_PRIO */
        MPTCP_MIB_MPPRIORX,             /* Received a MP_PRIO */
+       MPTCP_MIB_RCVPRUNED,            /* Incoming packet dropped due to memory limit */
        __MPTCP_MIB_MAX
 };
 
index 8f88dde..f48eb63 100644 (file)
@@ -57,10 +57,8 @@ static int mptcp_diag_dump_one(struct netlink_callback *cb,
                kfree_skb(rep);
                goto out;
        }
-       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
-                             MSG_DONTWAIT);
-       if (err > 0)
-               err = 0;
+       err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+
 out:
        sock_put(sk);
 
index b5850af..4452455 100644 (file)
@@ -1035,7 +1035,8 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk,
        return hmac == mp_opt->ahmac;
 }
 
-void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
+/* Return false if a subflow has been reset, else return true */
+bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
 {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
        struct mptcp_sock *msk = mptcp_sk(subflow->conn);
@@ -1053,12 +1054,16 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
                        __mptcp_check_push(subflow->conn, sk);
                __mptcp_data_acked(subflow->conn);
                mptcp_data_unlock(subflow->conn);
-               return;
+               return true;
        }
 
        mptcp_get_options(sk, skb, &mp_opt);
+
+       /* The subflow can be in close state only if check_fully_established()
+        * just sent a reset. If so, tell the caller to ignore the current packet.
+        */
        if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))
-               return;
+               return sk->sk_state != TCP_CLOSE;
 
        if (mp_opt.fastclose &&
            msk->local_key == mp_opt.rcvr_key) {
@@ -1100,7 +1105,7 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
        }
 
        if (!mp_opt.dss)
-               return;
+               return true;
 
        /* we can't wait for recvmsg() to update the ack_seq, otherwise
         * monodirectional flows will stuck
@@ -1119,12 +1124,12 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
                    schedule_work(&msk->work))
                        sock_hold(subflow->conn);
 
-               return;
+               return true;
        }
 
        mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
        if (!mpext)
-               return;
+               return true;
 
        memset(mpext, 0, sizeof(*mpext));
 
@@ -1153,6 +1158,8 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
                if (mpext->csum_reqd)
                        mpext->csum = mp_opt.csum;
        }
+
+       return true;
 }
 
 static void mptcp_set_rwin(const struct tcp_sock *tp)
index 7a5afa8..a889249 100644 (file)
@@ -474,7 +474,7 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
        bool cleanup, rx_empty;
 
        cleanup = (space > 0) && (space >= (old_space << 1));
-       rx_empty = !atomic_read(&sk->sk_rmem_alloc);
+       rx_empty = !__mptcp_rmem(sk);
 
        mptcp_for_each_subflow(msk, subflow) {
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
@@ -720,8 +720,10 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
                sk_rbuf = ssk_rbuf;
 
        /* over limit? can't append more skbs to msk, Also, no need to wake-up*/
-       if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf)
+       if (__mptcp_rmem(sk) > sk_rbuf) {
+               MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
                return;
+       }
 
        /* Wake-up the reader only for in-sequence data */
        mptcp_data_lock(sk);
@@ -1754,7 +1756,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
                if (!(flags & MSG_PEEK)) {
                        /* we will bulk release the skb memory later */
                        skb->destructor = NULL;
-                       msk->rmem_released += skb->truesize;
+                       WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize);
                        __skb_unlink(skb, &msk->receive_queue);
                        __kfree_skb(skb);
                }
@@ -1873,7 +1875,7 @@ static void __mptcp_update_rmem(struct sock *sk)
 
        atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc);
        sk_mem_uncharge(sk, msk->rmem_released);
-       msk->rmem_released = 0;
+       WRITE_ONCE(msk->rmem_released, 0);
 }
 
 static void __mptcp_splice_receive_queue(struct sock *sk)
@@ -2380,7 +2382,7 @@ static int __mptcp_init_sock(struct sock *sk)
        msk->out_of_order_queue = RB_ROOT;
        msk->first_pending = NULL;
        msk->wmem_reserved = 0;
-       msk->rmem_released = 0;
+       WRITE_ONCE(msk->rmem_released, 0);
        msk->tx_pending_data = 0;
 
        msk->first = NULL;
index 426ed80..0f0c026 100644 (file)
@@ -296,9 +296,17 @@ static inline struct mptcp_sock *mptcp_sk(const struct sock *sk)
        return (struct mptcp_sock *)sk;
 }
 
+/* the msk socket don't use the backlog, also account for the bulk
+ * free memory
+ */
+static inline int __mptcp_rmem(const struct sock *sk)
+{
+       return atomic_read(&sk->sk_rmem_alloc) - READ_ONCE(mptcp_sk(sk)->rmem_released);
+}
+
 static inline int __mptcp_space(const struct sock *sk)
 {
-       return tcp_space(sk) + READ_ONCE(mptcp_sk(sk)->rmem_released);
+       return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - __mptcp_rmem(sk));
 }
 
 static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk)
index 092d1f6..8c03afa 100644 (file)
@@ -157,19 +157,7 @@ static int mptcp_setsockopt_sol_socket_tstamp(struct mptcp_sock *msk, int optnam
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
                bool slow = lock_sock_fast(ssk);
 
-               switch (optname) {
-               case SO_TIMESTAMP_OLD:
-               case SO_TIMESTAMP_NEW:
-               case SO_TIMESTAMPNS_OLD:
-               case SO_TIMESTAMPNS_NEW:
-                       sock_set_timestamp(sk, optname, !!val);
-                       break;
-               case SO_TIMESTAMPING_NEW:
-               case SO_TIMESTAMPING_OLD:
-                       sock_set_timestamping(sk, optname, val);
-                       break;
-               }
-
+               sock_set_timestamp(sk, optname, !!val);
                unlock_sock_fast(ssk, slow);
        }
 
@@ -178,7 +166,8 @@ static int mptcp_setsockopt_sol_socket_tstamp(struct mptcp_sock *msk, int optnam
 }
 
 static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname,
-                                          sockptr_t optval, unsigned int optlen)
+                                          sockptr_t optval,
+                                          unsigned int optlen)
 {
        int val, ret;
 
@@ -205,14 +194,56 @@ static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname,
        case SO_TIMESTAMP_NEW:
        case SO_TIMESTAMPNS_OLD:
        case SO_TIMESTAMPNS_NEW:
-       case SO_TIMESTAMPING_OLD:
-       case SO_TIMESTAMPING_NEW:
                return mptcp_setsockopt_sol_socket_tstamp(msk, optname, val);
        }
 
        return -ENOPROTOOPT;
 }
 
+static int mptcp_setsockopt_sol_socket_timestamping(struct mptcp_sock *msk,
+                                                   int optname,
+                                                   sockptr_t optval,
+                                                   unsigned int optlen)
+{
+       struct mptcp_subflow_context *subflow;
+       struct sock *sk = (struct sock *)msk;
+       struct so_timestamping timestamping;
+       int ret;
+
+       if (optlen == sizeof(timestamping)) {
+               if (copy_from_sockptr(&timestamping, optval,
+                                     sizeof(timestamping)))
+                       return -EFAULT;
+       } else if (optlen == sizeof(int)) {
+               memset(&timestamping, 0, sizeof(timestamping));
+
+               if (copy_from_sockptr(&timestamping.flags, optval, sizeof(int)))
+                       return -EFAULT;
+       } else {
+               return -EINVAL;
+       }
+
+       ret = sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname,
+                             KERNEL_SOCKPTR(&timestamping),
+                             sizeof(timestamping));
+       if (ret)
+               return ret;
+
+       lock_sock(sk);
+
+       mptcp_for_each_subflow(msk, subflow) {
+               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+               bool slow = lock_sock_fast(ssk);
+
+               sock_set_timestamping(sk, optname, timestamping);
+               unlock_sock_fast(ssk, slow);
+       }
+
+       release_sock(sk);
+
+       return 0;
+}
+
 static int mptcp_setsockopt_sol_socket_linger(struct mptcp_sock *msk, sockptr_t optval,
                                              unsigned int optlen)
 {
@@ -299,9 +330,12 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
        case SO_TIMESTAMP_NEW:
        case SO_TIMESTAMPNS_OLD:
        case SO_TIMESTAMPNS_NEW:
+               return mptcp_setsockopt_sol_socket_int(msk, optname, optval,
+                                                      optlen);
        case SO_TIMESTAMPING_OLD:
        case SO_TIMESTAMPING_NEW:
-               return mptcp_setsockopt_sol_socket_int(msk, optname, optval, optlen);
+               return mptcp_setsockopt_sol_socket_timestamping(msk, optname,
+                                                               optval, optlen);
        case SO_LINGER:
                return mptcp_setsockopt_sol_socket_linger(msk, optval, optlen);
        case SO_RCVLOWAT:
index 66d0b18..966f777 100644 (file)
@@ -214,11 +214,6 @@ again:
                                 ntohs(inet_sk(sk_listener)->inet_sport),
                                 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
                        if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
-                               sock_put((struct sock *)subflow_req->msk);
-                               mptcp_token_destroy_request(req);
-                               tcp_request_sock_ops.destructor(req);
-                               subflow_req->msk = NULL;
-                               subflow_req->mp_join = 0;
                                SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
                                return -EPERM;
                        }
@@ -230,6 +225,8 @@ again:
                if (unlikely(req->syncookie)) {
                        if (mptcp_can_accept_new_subflow(subflow_req->msk))
                                subflow_init_req_cookie_join_save(subflow_req, skb);
+                       else
+                               return -EPERM;
                }
 
                pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
@@ -269,9 +266,7 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req,
                if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
                        return -EINVAL;
 
-               if (mptcp_can_accept_new_subflow(subflow_req->msk))
-                       subflow_req->mp_join = 1;
-
+               subflow_req->mp_join = 1;
                subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
        }
 
index abe0fd0..3712778 100644 (file)
@@ -37,7 +37,21 @@ static spinlock_t join_entry_locks[COOKIE_JOIN_SLOTS] __cacheline_aligned_in_smp
 
 static u32 mptcp_join_entry_hash(struct sk_buff *skb, struct net *net)
 {
-       u32 i = skb_get_hash(skb) ^ net_hash_mix(net);
+       static u32 mptcp_join_hash_secret __read_mostly;
+       struct tcphdr *th = tcp_hdr(skb);
+       u32 seq, i;
+
+       net_get_random_once(&mptcp_join_hash_secret,
+                           sizeof(mptcp_join_hash_secret));
+
+       if (th->syn)
+               seq = TCP_SKB_CB(skb)->seq;
+       else
+               seq = TCP_SKB_CB(skb)->seq - 1;
+
+       i = jhash_3words(seq, net_hash_mix(net),
+                        (__force __u32)th->source << 16 | (__force __u32)th->dest,
+                        mptcp_join_hash_secret);
 
        return i % ARRAY_SIZE(join_entries);
 }
index 9330908..ea1dd32 100644 (file)
@@ -17,3 +17,9 @@ config NCSI_OEM_CMD_GET_MAC
        help
          This allows to get MAC address from NCSI firmware and set them back to
                controller.
+config NCSI_OEM_CMD_KEEP_PHY
+       bool "Keep PHY Link up"
+       depends on NET_NCSI
+       help
+         This allows to keep PHY link up and prevents any channel resets during
+         the host load.
index cbbb0de..0b6cfd3 100644 (file)
@@ -78,6 +78,9 @@ enum {
 /* OEM Vendor Manufacture ID */
 #define NCSI_OEM_MFR_MLX_ID             0x8119
 #define NCSI_OEM_MFR_BCM_ID             0x113d
+#define NCSI_OEM_MFR_INTEL_ID           0x157
+/* Intel specific OEM command */
+#define NCSI_OEM_INTEL_CMD_KEEP_PHY     0x20   /* CMD ID for Keep PHY up */
 /* Broadcom specific OEM Command */
 #define NCSI_OEM_BCM_CMD_GMA            0x01   /* CMD ID for Get MAC */
 /* Mellanox specific OEM Command */
@@ -86,6 +89,7 @@ enum {
 #define NCSI_OEM_MLX_CMD_SMAF           0x01   /* CMD ID for Set MC Affinity */
 #define NCSI_OEM_MLX_CMD_SMAF_PARAM     0x07   /* Parameter for SMAF         */
 /* OEM Command payload lengths*/
+#define NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN 7
 #define NCSI_OEM_BCM_CMD_GMA_LEN        12
 #define NCSI_OEM_MLX_CMD_GMA_LEN        8
 #define NCSI_OEM_MLX_CMD_SMAF_LEN        60
@@ -271,6 +275,7 @@ enum {
        ncsi_dev_state_probe_mlx_gma,
        ncsi_dev_state_probe_mlx_smaf,
        ncsi_dev_state_probe_cis,
+       ncsi_dev_state_probe_keep_phy,
        ncsi_dev_state_probe_gvi,
        ncsi_dev_state_probe_gc,
        ncsi_dev_state_probe_gls,
index ca04b6d..89c7742 100644 (file)
@@ -689,6 +689,35 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
+
+static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
+{
+       unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN];
+       int ret = 0;
+
+       nca->payload = NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN;
+
+       memset(data, 0, NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN);
+       *(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
+
+       data[4] = NCSI_OEM_INTEL_CMD_KEEP_PHY;
+
+       /* PHY Link up attribute */
+       data[6] = 0x1;
+
+       nca->data = data;
+
+       ret = ncsi_xmit_cmd(nca);
+       if (ret)
+               netdev_err(nca->ndp->ndev.dev,
+                          "NCSI: Failed to transmit cmd 0x%x during configure\n",
+                          nca->type);
+       return ret;
+}
+
+#endif
+
 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
 
 /* NCSI OEM Command APIs */
@@ -700,7 +729,7 @@ static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
        nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
 
        memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
-       *(unsigned int *)data = ntohl(NCSI_OEM_MFR_BCM_ID);
+       *(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_BCM_ID);
        data[5] = NCSI_OEM_BCM_CMD_GMA;
 
        nca->data = data;
@@ -724,7 +753,7 @@ static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
        nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
 
        memset(&u, 0, sizeof(u));
-       u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
+       u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
        u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
        u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
 
@@ -747,7 +776,7 @@ static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca)
        int ret = 0;
 
        memset(&u, 0, sizeof(u));
-       u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
+       u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
        u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF;
        u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM;
        memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET],
@@ -1392,7 +1421,23 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
                }
 
                nd->state = ncsi_dev_state_probe_gvi;
+               if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
+                       nd->state = ncsi_dev_state_probe_keep_phy;
+               break;
+#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
+       case ncsi_dev_state_probe_keep_phy:
+               ndp->pending_req_num = 1;
+
+               nca.type = NCSI_PKT_CMD_OEM;
+               nca.package = ndp->active_package->id;
+               nca.channel = 0;
+               ret = ncsi_oem_keep_phy_intel(&nca);
+               if (ret)
+                       goto error;
+
+               nd->state = ncsi_dev_state_probe_gvi;
                break;
+#endif /* CONFIG_NCSI_OEM_CMD_KEEP_PHY */
        case ncsi_dev_state_probe_gvi:
        case ncsi_dev_state_probe_gc:
        case ncsi_dev_state_probe_gls:
index 888ccc2..d483748 100644 (file)
@@ -403,7 +403,7 @@ static int ncsi_rsp_handler_ev(struct ncsi_request *nr)
        /* Update to VLAN mode */
        cmd = (struct ncsi_cmd_ev_pkt *)skb_network_header(nr->cmd);
        ncm->enable = 1;
-       ncm->data[0] = ntohl(cmd->mode);
+       ncm->data[0] = ntohl((__force __be32)cmd->mode);
 
        return 0;
 }
@@ -699,12 +699,19 @@ static int ncsi_rsp_handler_oem_bcm(struct ncsi_request *nr)
        return 0;
 }
 
+/* Response handler for Intel card */
+static int ncsi_rsp_handler_oem_intel(struct ncsi_request *nr)
+{
+       return 0;
+}
+
 static struct ncsi_rsp_oem_handler {
        unsigned int    mfr_id;
        int             (*handler)(struct ncsi_request *nr);
 } ncsi_rsp_oem_handlers[] = {
        { NCSI_OEM_MFR_MLX_ID, ncsi_rsp_handler_oem_mlx },
-       { NCSI_OEM_MFR_BCM_ID, ncsi_rsp_handler_oem_bcm }
+       { NCSI_OEM_MFR_BCM_ID, ncsi_rsp_handler_oem_bcm },
+       { NCSI_OEM_MFR_INTEL_ID, ncsi_rsp_handler_oem_intel }
 };
 
 /* Response handler for OEM command */
index 96ba19f..5c03e51 100644 (file)
@@ -149,7 +149,15 @@ static void nf_conntrack_all_lock(void)
 
        spin_lock(&nf_conntrack_locks_all_lock);
 
-       nf_conntrack_locks_all = true;
+       /* For nf_contrack_locks_all, only the latest time when another
+        * CPU will see an update is controlled, by the "release" of the
+        * spin_lock below.
+        * The earliest time is not controlled, an thus KCSAN could detect
+        * a race when nf_conntract_lock() reads the variable.
+        * WRITE_ONCE() is used to ensure the compiler will not
+        * optimize the write.
+        */
+       WRITE_ONCE(nf_conntrack_locks_all, true);
 
        for (i = 0; i < CONNTRACK_LOCKS; i++) {
                spin_lock(&nf_conntrack_locks[i]);
@@ -662,8 +670,13 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
                return false;
 
        tstamp = nf_conn_tstamp_find(ct);
-       if (tstamp && tstamp->stop == 0)
+       if (tstamp) {
+               s32 timeout = ct->timeout - nfct_time_stamp;
+
                tstamp->stop = ktime_get_real_ns();
+               if (timeout < 0)
+                       tstamp->stop -= jiffies_to_nsecs(-timeout);
+       }
 
        if (nf_conntrack_event_report(IPCT_DESTROY, ct,
                                    portid, report) < 0) {
@@ -2457,7 +2470,6 @@ i_see_dead_people:
        }
 
        list_for_each_entry(net, net_exit_list, exit_list) {
-               nf_conntrack_proto_pernet_fini(net);
                nf_conntrack_ecache_pernet_fini(net);
                nf_conntrack_expect_pernet_fini(net);
                free_percpu(net->ct.stat);
index 4e1a9db..e81af33 100644 (file)
@@ -218,6 +218,7 @@ static int ctnetlink_dump_helpinfo(struct sk_buff *skb,
        if (!help)
                return 0;
 
+       rcu_read_lock();
        helper = rcu_dereference(help->helper);
        if (!helper)
                goto out;
@@ -233,9 +234,11 @@ static int ctnetlink_dump_helpinfo(struct sk_buff *skb,
 
        nla_nest_end(skb, nest_helper);
 out:
+       rcu_read_unlock();
        return 0;
 
 nla_put_failure:
+       rcu_read_unlock();
        return -1;
 }
 
index 5564740..8f7a983 100644 (file)
@@ -697,13 +697,6 @@ void nf_conntrack_proto_pernet_init(struct net *net)
 #endif
 }
 
-void nf_conntrack_proto_pernet_fini(struct net *net)
-{
-#ifdef CONFIG_NF_CT_PROTO_GRE
-       nf_ct_gre_keymap_flush(net);
-#endif
-}
-
 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
                  &nf_conntrack_htable_size, 0600);
 
index db11e40..728eeb0 100644 (file)
@@ -55,19 +55,6 @@ static inline struct nf_gre_net *gre_pernet(struct net *net)
        return &net->ct.nf_ct_proto.gre;
 }
 
-void nf_ct_gre_keymap_flush(struct net *net)
-{
-       struct nf_gre_net *net_gre = gre_pernet(net);
-       struct nf_ct_gre_keymap *km, *tmp;
-
-       spin_lock_bh(&keymap_lock);
-       list_for_each_entry_safe(km, tmp, &net_gre->keymap_list, list) {
-               list_del_rcu(&km->list);
-               kfree_rcu(km, rcu);
-       }
-       spin_unlock_bh(&keymap_lock);
-}
-
 static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km,
                                const struct nf_conntrack_tuple *t)
 {
index f7e8baf..3259416 100644 (file)
@@ -823,6 +823,22 @@ static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
        return true;
 }
 
+static bool tcp_can_early_drop(const struct nf_conn *ct)
+{
+       switch (ct->proto.tcp.state) {
+       case TCP_CONNTRACK_FIN_WAIT:
+       case TCP_CONNTRACK_LAST_ACK:
+       case TCP_CONNTRACK_TIME_WAIT:
+       case TCP_CONNTRACK_CLOSE:
+       case TCP_CONNTRACK_CLOSE_WAIT:
+               return true;
+       default:
+               break;
+       }
+
+       return false;
+}
+
 /* Returns verdict for packet, or -1 for invalid. */
 int nf_conntrack_tcp_packet(struct nf_conn *ct,
                            struct sk_buff *skb,
@@ -1030,10 +1046,30 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
                if (index != TCP_RST_SET)
                        break;
 
-               if (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) {
+               /* If we are closing, tuple might have been re-used already.
+                * last_index, last_ack, and all other ct fields used for
+                * sequence/window validation are outdated in that case.
+                *
+                * As the conntrack can already be expired by GC under pressure,
+                * just skip validation checks.
+                */
+               if (tcp_can_early_drop(ct))
+                       goto in_window;
+
+               /* td_maxack might be outdated if we let a SYN through earlier */
+               if ((ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) &&
+                   ct->proto.tcp.last_index != TCP_SYN_SET) {
                        u32 seq = ntohl(th->seq);
 
-                       if (before(seq, ct->proto.tcp.seen[!dir].td_maxack)) {
+                       /* If we are not in established state and SEQ=0 this is most
+                        * likely an answer to a SYN we let go through above (last_index
+                        * can be updated due to out-of-order ACKs).
+                        */
+                       if (seq == 0 && !nf_conntrack_tcp_established(ct))
+                               break;
+
+                       if (before(seq, ct->proto.tcp.seen[!dir].td_maxack) &&
+                           !tn->tcp_ignore_invalid_rst) {
                                /* Invalid RST  */
                                spin_unlock_bh(&ct->lock);
                                nf_ct_l4proto_log_invalid(skb, ct, state, "invalid rst");
@@ -1134,6 +1170,16 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
                        nf_ct_kill_acct(ct, ctinfo, skb);
                        return NF_ACCEPT;
                }
+
+               if (index == TCP_SYN_SET && old_state == TCP_CONNTRACK_SYN_SENT) {
+                       /* do not renew timeout on SYN retransmit.
+                        *
+                        * Else port reuse by client or NAT middlebox can keep
+                        * entry alive indefinitely (including nat info).
+                        */
+                       return NF_ACCEPT;
+               }
+
                /* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
                 * pickup with loose=1. Avoid large ESTABLISHED timeout.
                 */
@@ -1155,22 +1201,6 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
        return NF_ACCEPT;
 }
 
-static bool tcp_can_early_drop(const struct nf_conn *ct)
-{
-       switch (ct->proto.tcp.state) {
-       case TCP_CONNTRACK_FIN_WAIT:
-       case TCP_CONNTRACK_LAST_ACK:
-       case TCP_CONNTRACK_TIME_WAIT:
-       case TCP_CONNTRACK_CLOSE:
-       case TCP_CONNTRACK_CLOSE_WAIT:
-               return true;
-       default:
-               break;
-       }
-
-       return false;
-}
-
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
@@ -1437,6 +1467,9 @@ void nf_conntrack_tcp_init_net(struct net *net)
         */
        tn->tcp_be_liberal = 0;
 
+       /* If it's non-zero, we turn off RST sequence number check */
+       tn->tcp_ignore_invalid_rst = 0;
+
        /* Max number of the retransmitted packets without receiving an (acceptable)
         * ACK from the destination. If this number is reached, a shorter timer
         * will be started.
index f57a951..214d9f9 100644 (file)
@@ -579,6 +579,7 @@ enum nf_ct_sysctl_index {
 #endif
        NF_SYSCTL_CT_PROTO_TCP_LOOSE,
        NF_SYSCTL_CT_PROTO_TCP_LIBERAL,
+       NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST,
        NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS,
        NF_SYSCTL_CT_PROTO_TIMEOUT_UDP,
        NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM,
@@ -798,6 +799,14 @@ static struct ctl_table nf_ct_sysctl_table[] = {
                .extra1         = SYSCTL_ZERO,
                .extra2         = SYSCTL_ONE,
        },
+       [NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST] = {
+               .procname       = "nf_conntrack_tcp_ignore_invalid_rst",
+               .maxlen         = sizeof(u8),
+               .mode           = 0644,
+               .proc_handler   = proc_dou8vec_minmax,
+               .extra1         = SYSCTL_ZERO,
+               .extra2         = SYSCTL_ONE,
+       },
        [NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS] = {
                .procname       = "nf_conntrack_tcp_max_retrans",
                .maxlen         = sizeof(u8),
@@ -1004,6 +1013,7 @@ static void nf_conntrack_standalone_init_tcp_sysctl(struct net *net,
        XASSIGN(LOOSE, &tn->tcp_loose);
        XASSIGN(LIBERAL, &tn->tcp_be_liberal);
        XASSIGN(MAX_RETRANS, &tn->tcp_max_retrans);
+       XASSIGN(IGNORE_INVALID_RST, &tn->tcp_ignore_invalid_rst);
 #undef XASSIGN
 
 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
index 1e50908..551976e 100644 (file)
@@ -331,7 +331,11 @@ EXPORT_SYMBOL_GPL(flow_offload_add);
 void flow_offload_refresh(struct nf_flowtable *flow_table,
                          struct flow_offload *flow)
 {
-       flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
+       u32 timeout;
+
+       timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
+       if (READ_ONCE(flow->timeout) != timeout)
+               WRITE_ONCE(flow->timeout, timeout);
 
        if (likely(!nf_flowtable_hw_offload(flow_table)))
                return;
index 390d446..081437d 100644 (file)
@@ -3446,7 +3446,8 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
        return 0;
 
 err_destroy_flow_rule:
-       nft_flow_rule_destroy(flow);
+       if (flow)
+               nft_flow_rule_destroy(flow);
 err_release_rule:
        nf_tables_rule_release(&ctx, rule);
 err_release_expr:
@@ -8444,6 +8445,16 @@ static int nf_tables_commit_audit_alloc(struct list_head *adl,
        return 0;
 }
 
+static void nf_tables_commit_audit_free(struct list_head *adl)
+{
+       struct nft_audit_data *adp, *adn;
+
+       list_for_each_entry_safe(adp, adn, adl, list) {
+               list_del(&adp->list);
+               kfree(adp);
+       }
+}
+
 static void nf_tables_commit_audit_collect(struct list_head *adl,
                                           struct nft_table *table, u32 op)
 {
@@ -8508,6 +8519,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                ret = nf_tables_commit_audit_alloc(&adl, trans->ctx.table);
                if (ret) {
                        nf_tables_commit_chain_prepare_cancel(net);
+                       nf_tables_commit_audit_free(&adl);
                        return ret;
                }
                if (trans->msg_type == NFT_MSG_NEWRULE ||
@@ -8517,6 +8529,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                        ret = nf_tables_commit_chain_prepare(net, chain);
                        if (ret < 0) {
                                nf_tables_commit_chain_prepare_cancel(net);
+                               nf_tables_commit_audit_free(&adl);
                                return ret;
                        }
                }
index 50b4e3c..202f57d 100644 (file)
@@ -174,7 +174,9 @@ static const struct nf_hook_entries *
 nfnl_hook_entries_head(u8 pf, unsigned int hook, struct net *net, const char *dev)
 {
        const struct nf_hook_entries *hook_head = NULL;
+#ifdef CONFIG_NETFILTER_INGRESS
        struct net_device *netdev;
+#endif
 
        switch (pf) {
        case NFPROTO_IPV4:
index 913ac45..304e33c 100644 (file)
@@ -23,15 +23,21 @@ static int nft_last_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
 {
        struct nft_last_priv *priv = nft_expr_priv(expr);
        u64 last_jiffies;
+       u32 last_set = 0;
        int err;
 
-       if (tb[NFTA_LAST_MSECS]) {
+       if (tb[NFTA_LAST_SET]) {
+               last_set = ntohl(nla_get_be32(tb[NFTA_LAST_SET]));
+               if (last_set == 1)
+                       priv->last_set = 1;
+       }
+
+       if (last_set && tb[NFTA_LAST_MSECS]) {
                err = nf_msecs_to_jiffies64(tb[NFTA_LAST_MSECS], &last_jiffies);
                if (err < 0)
                        return err;
 
-               priv->last_jiffies = jiffies + (unsigned long)last_jiffies;
-               priv->last_set = 1;
+               priv->last_jiffies = jiffies - (unsigned long)last_jiffies;
        }
 
        return 0;
@@ -42,24 +48,30 @@ static void nft_last_eval(const struct nft_expr *expr,
 {
        struct nft_last_priv *priv = nft_expr_priv(expr);
 
-       priv->last_jiffies = jiffies;
-       priv->last_set = 1;
+       if (READ_ONCE(priv->last_jiffies) != jiffies)
+               WRITE_ONCE(priv->last_jiffies, jiffies);
+       if (READ_ONCE(priv->last_set) == 0)
+               WRITE_ONCE(priv->last_set, 1);
 }
 
 static int nft_last_dump(struct sk_buff *skb, const struct nft_expr *expr)
 {
        struct nft_last_priv *priv = nft_expr_priv(expr);
+       unsigned long last_jiffies = READ_ONCE(priv->last_jiffies);
+       u32 last_set = READ_ONCE(priv->last_set);
        __be64 msecs;
 
-       if (time_before(jiffies, priv->last_jiffies))
-               priv->last_set = 0;
+       if (time_before(jiffies, last_jiffies)) {
+               WRITE_ONCE(priv->last_set, 0);
+               last_set = 0;
+       }
 
-       if (priv->last_set)
-               msecs = nf_jiffies64_to_msecs(jiffies - priv->last_jiffies);
+       if (last_set)
+               msecs = nf_jiffies64_to_msecs(jiffies - last_jiffies);
        else
                msecs = 0;
 
-       if (nla_put_be32(skb, NFTA_LAST_SET, htonl(priv->last_set)) ||
+       if (nla_put_be32(skb, NFTA_LAST_SET, htonl(last_set)) ||
            nla_put_be64(skb, NFTA_LAST_MSECS, msecs, NFTA_LAST_PAD))
                goto nla_put_failure;
 
index 0840c63..be1595d 100644 (file)
@@ -201,7 +201,9 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                alen = sizeof_field(struct nf_nat_range, min_addr.ip6);
                break;
        default:
-               return -EAFNOSUPPORT;
+               if (tb[NFTA_NAT_REG_ADDR_MIN])
+                       return -EAFNOSUPPORT;
+               break;
        }
        priv->family = family;
 
index d233ac4..380f95a 100644 (file)
@@ -2471,7 +2471,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
 
        nlmsg_end(skb, rep);
 
-       netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
+       nlmsg_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid);
 }
 EXPORT_SYMBOL(netlink_ack);
 
index 9115f8a..a8da88d 100644 (file)
@@ -121,11 +121,9 @@ static void nr_heartbeat_expiry(struct timer_list *t)
                   is accepted() it isn't 'dead' so doesn't get removed. */
                if (sock_flag(sk, SOCK_DESTROY) ||
                    (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
-                       sock_hold(sk);
                        bh_unlock_sock(sk);
                        nr_destroy_socket(sk);
-                       sock_put(sk);
-                       return;
+                       goto out;
                }
                break;
 
@@ -146,6 +144,8 @@ static void nr_heartbeat_expiry(struct timer_list *t)
 
        nr_start_heartbeat(sk);
        bh_unlock_sock(sk);
+out:
+       sock_put(sk);
 }
 
 static void nr_t2timer_expiry(struct timer_list *t)
@@ -159,6 +159,7 @@ static void nr_t2timer_expiry(struct timer_list *t)
                nr_enquiry_response(sk);
        }
        bh_unlock_sock(sk);
+       sock_put(sk);
 }
 
 static void nr_t4timer_expiry(struct timer_list *t)
@@ -169,6 +170,7 @@ static void nr_t4timer_expiry(struct timer_list *t)
        bh_lock_sock(sk);
        nr_sk(sk)->condition &= ~NR_COND_PEER_RX_BUSY;
        bh_unlock_sock(sk);
+       sock_put(sk);
 }
 
 static void nr_idletimer_expiry(struct timer_list *t)
@@ -197,6 +199,7 @@ static void nr_idletimer_expiry(struct timer_list *t)
                sock_set_flag(sk, SOCK_DEAD);
        }
        bh_unlock_sock(sk);
+       sock_put(sk);
 }
 
 static void nr_t1timer_expiry(struct timer_list *t)
@@ -209,8 +212,7 @@ static void nr_t1timer_expiry(struct timer_list *t)
        case NR_STATE_1:
                if (nr->n2count == nr->n2) {
                        nr_disconnect(sk, ETIMEDOUT);
-                       bh_unlock_sock(sk);
-                       return;
+                       goto out;
                } else {
                        nr->n2count++;
                        nr_write_internal(sk, NR_CONNREQ);
@@ -220,8 +222,7 @@ static void nr_t1timer_expiry(struct timer_list *t)
        case NR_STATE_2:
                if (nr->n2count == nr->n2) {
                        nr_disconnect(sk, ETIMEDOUT);
-                       bh_unlock_sock(sk);
-                       return;
+                       goto out;
                } else {
                        nr->n2count++;
                        nr_write_internal(sk, NR_DISCREQ);
@@ -231,8 +232,7 @@ static void nr_t1timer_expiry(struct timer_list *t)
        case NR_STATE_3:
                if (nr->n2count == nr->n2) {
                        nr_disconnect(sk, ETIMEDOUT);
-                       bh_unlock_sock(sk);
-                       return;
+                       goto out;
                } else {
                        nr->n2count++;
                        nr_requeue_frames(sk);
@@ -241,5 +241,7 @@ static void nr_t1timer_expiry(struct timer_list *t)
        }
 
        nr_start_t1timer(sk);
+out:
        bh_unlock_sock(sk);
+       sock_put(sk);
 }
index c89c8da..d4a2db0 100644 (file)
@@ -670,13 +670,13 @@ static bool cmp_key(const struct sw_flow_key *key1,
 {
        const long *cp1 = (const long *)((const u8 *)key1 + key_start);
        const long *cp2 = (const long *)((const u8 *)key2 + key_start);
-       long diffs = 0;
        int i;
 
        for (i = key_start; i < key_end; i += sizeof(long))
-               diffs |= *cp1++ ^ *cp2++;
+               if (*cp1++ ^ *cp2++)
+                       return false;
 
-       return diffs == 0;
+       return true;
 }
 
 static bool flow_cmp_masked_key(const struct sw_flow *flow,
index e6f4a62..171b7f3 100644 (file)
@@ -518,8 +518,10 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
                if (!ipc)
                        goto err;
 
-               if (sock_queue_rcv_skb(&ipc->sk, skb))
+               if (sock_queue_rcv_skb(&ipc->sk, skb)) {
+                       qrtr_port_put(ipc);
                        goto err;
+               }
 
                qrtr_port_put(ipc);
        }
@@ -839,6 +841,8 @@ static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
 
        ipc = qrtr_port_lookup(to->sq_port);
        if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
+               if (ipc)
+                       qrtr_port_put(ipc);
                kfree_skb(skb);
                return -ENODEV;
        }
index a656baa..1b4b351 100644 (file)
@@ -322,11 +322,22 @@ err_alloc:
 
 static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
 {
+       struct flow_block_cb *block_cb, *tmp_cb;
        struct tcf_ct_flow_table *ct_ft;
+       struct flow_block *block;
 
        ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
                             rwork);
        nf_flow_table_free(&ct_ft->nf_ft);
+
+       /* Remove any remaining callbacks before cleanup */
+       block = &ct_ft->nf_ft.flow_block;
+       down_write(&ct_ft->nf_ft.flow_block_lock);
+       list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
+               list_del(&block_cb->list);
+               flow_block_cb_free(block_cb);
+       }
+       up_write(&ct_ft->nf_ft.flow_block_lock);
        kfree(ct_ft);
 
        module_put(THIS_MODULE);
@@ -1026,7 +1037,8 @@ do_nat:
                /* This will take care of sending queued events
                 * even if the connection is already confirmed.
                 */
-               nf_conntrack_confirm(skb);
+               if (nf_conntrack_confirm(skb) != NF_ACCEPT)
+                       goto drop;
        }
 
        if (!skip_add)
index 81a1c67..8d17a54 100644 (file)
@@ -6,6 +6,7 @@
 */
 
 #include <linux/module.h>
+#include <linux/if_arp.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/skbuff.h>
@@ -33,6 +34,13 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a,
        tcf_lastuse_update(&d->tcf_tm);
        bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
 
+       action = READ_ONCE(d->tcf_action);
+       if (unlikely(action == TC_ACT_SHOT))
+               goto drop;
+
+       if (!skb->dev || skb->dev->type != ARPHRD_ETHER)
+               return action;
+
        /* XXX: if you are going to edit more fields beyond ethernet header
         * (example when you add IP header replacement or vlan swap)
         * then MAX_EDIT_LEN needs to change appropriately
@@ -41,10 +49,6 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a,
        if (unlikely(err)) /* best policy is to drop on the floor */
                goto drop;
 
-       action = READ_ONCE(d->tcf_action);
-       if (unlikely(action == TC_ACT_SHOT))
-               goto drop;
-
        p = rcu_dereference_bh(d->skbmod_p);
        flags = p->flags;
        if (flags & SKBMOD_F_DMAC)
index d73b5c5..e3e79e9 100644 (file)
@@ -2904,7 +2904,7 @@ replay:
                break;
        case RTM_GETCHAIN:
                err = tc_chain_notify(chain, skb, n->nlmsg_seq,
-                                     n->nlmsg_seq, n->nlmsg_type, true);
+                                     n->nlmsg_flags, n->nlmsg_type, true);
                if (err < 0)
                        NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
                break;
index 5b27453..e9a8a2c 100644 (file)
@@ -278,6 +278,8 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r,
                             TCA_TCINDEX_POLICE);
 }
 
+static void tcindex_free_perfect_hash(struct tcindex_data *cp);
+
 static void tcindex_partial_destroy_work(struct work_struct *work)
 {
        struct tcindex_data *p = container_of(to_rcu_work(work),
@@ -285,7 +287,8 @@ static void tcindex_partial_destroy_work(struct work_struct *work)
                                              rwork);
 
        rtnl_lock();
-       kfree(p->perfect);
+       if (p->perfect)
+               tcindex_free_perfect_hash(p);
        kfree(p);
        rtnl_unlock();
 }
index 66fe2b8..07b30d0 100644 (file)
@@ -564,7 +564,7 @@ static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
        /* if there's no entry, it means that the schedule didn't
         * start yet, so force all gates to be open, this is in
         * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
-        * "AdminGateSates"
+        * "AdminGateStates"
         */
        gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
 
index 6f8319b..fe74c5f 100644 (file)
@@ -860,6 +860,8 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
        if (replace) {
                list_del_init(&shkey->key_list);
                sctp_auth_shkey_release(shkey);
+               if (asoc && asoc->active_key_id == auth_key->sca_keynumber)
+                       sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
        }
        list_add(&cur_key->key_list, sh_keys);
 
index 493fc01..760b367 100644 (file)
@@ -284,10 +284,8 @@ static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
                goto out;
        }
 
-       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
-                             MSG_DONTWAIT);
-       if (err > 0)
-               err = 0;
+       err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+
 out:
        return err;
 }
index eb3c2a3..5ef86fd 100644 (file)
@@ -1203,7 +1203,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
        if (unlikely(!af))
                return NULL;
 
-       if (af->from_addr_param(&paddr, param, peer_port, 0))
+       if (!af->from_addr_param(&paddr, param, peer_port, 0))
                return NULL;
 
        return __sctp_lookup_association(net, laddr, &paddr, transportp);
index e48dd90..470dbdc 100644 (file)
@@ -100,8 +100,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
                list_for_each_entry_safe(addr, temp,
                                        &net->sctp.local_addr_list, list) {
                        if (addr->a.sa.sa_family == AF_INET6 &&
-                                       ipv6_addr_equal(&addr->a.v6.sin6_addr,
-                                               &ifa->addr)) {
+                           ipv6_addr_equal(&addr->a.v6.sin6_addr,
+                                           &ifa->addr) &&
+                           addr->a.v6.sin6_scope_id == ifa->idev->dev->ifindex) {
                                sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
                                found = 1;
                                addr->valid = 0;
index 9032ce6..4dfb5ea 100644 (file)
@@ -104,8 +104,8 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
                if (asoc->param_flags & SPP_PMTUD_ENABLE)
                        sctp_assoc_sync_pmtu(asoc);
        } else if (!sctp_transport_pl_enabled(tp) &&
-                  !sctp_transport_pmtu_check(tp)) {
-               if (asoc->param_flags & SPP_PMTUD_ENABLE)
+                  asoc->param_flags & SPP_PMTUD_ENABLE) {
+               if (!sctp_transport_pmtu_check(tp))
                        sctp_assoc_sync_pmtu(asoc);
        }
 
index 3c1fbf3..ec0f525 100644 (file)
@@ -398,7 +398,8 @@ static enum sctp_scope sctp_v4_scope(union sctp_addr *addr)
                retval = SCTP_SCOPE_LINK;
        } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) ||
                   ipv4_is_private_172(addr->v4.sin_addr.s_addr) ||
-                  ipv4_is_private_192(addr->v4.sin_addr.s_addr)) {
+                  ipv4_is_private_192(addr->v4.sin_addr.s_addr) ||
+                  ipv4_is_test_198(addr->v4.sin_addr.s_addr)) {
                retval = SCTP_SCOPE_PRIVATE;
        } else {
                retval = SCTP_SCOPE_GLOBAL;
index 6c08e50..b8fa8f1 100644 (file)
@@ -1163,7 +1163,7 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
                                       const struct sctp_transport *transport,
                                       __u32 probe_size)
 {
-       struct sctp_sender_hb_info hbinfo;
+       struct sctp_sender_hb_info hbinfo = {};
        struct sctp_chunk *retval;
 
        retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0,
index 09a8f23..32df65f 100644 (file)
@@ -1109,12 +1109,12 @@ enum sctp_disposition sctp_sf_send_probe(struct net *net,
        if (!sctp_transport_pl_enabled(transport))
                return SCTP_DISPOSITION_CONSUME;
 
-       sctp_transport_pl_send(transport);
-
-       reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
-       if (!reply)
-               return SCTP_DISPOSITION_NOMEM;
-       sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+       if (sctp_transport_pl_send(transport)) {
+               reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
+               if (!reply)
+                       return SCTP_DISPOSITION_NOMEM;
+               sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+       }
        sctp_add_cmd_sf(commands, SCTP_CMD_PROBE_TIMER_UPDATE,
                        SCTP_TRANSPORT(transport));
 
@@ -1274,8 +1274,7 @@ enum sctp_disposition sctp_sf_backbeat_8_3(struct net *net,
                    !sctp_transport_pl_enabled(link))
                        return SCTP_DISPOSITION_DISCARD;
 
-               sctp_transport_pl_recv(link);
-               if (link->pl.state == SCTP_PL_COMPLETE)
+               if (sctp_transport_pl_recv(link))
                        return SCTP_DISPOSITION_CONSUME;
 
                return sctp_sf_send_probe(net, ep, asoc, type, link, commands);
index e64e01f..6b937bf 100644 (file)
@@ -4577,6 +4577,10 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
        }
 
        if (optlen > 0) {
+               /* Trim it to the biggest size sctp sockopt may need if necessary */
+               optlen = min_t(unsigned int, optlen,
+                              PAGE_ALIGN(USHRT_MAX +
+                                         sizeof(__u16) * sizeof(struct sctp_reset_streams)));
                kopt = memdup_sockptr(optval, optlen);
                if (IS_ERR(kopt))
                        return PTR_ERR(kopt);
index 5f23804..a3d3ca6 100644 (file)
@@ -258,16 +258,13 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
        sctp_transport_pl_update(transport);
 }
 
-void sctp_transport_pl_send(struct sctp_transport *t)
+bool sctp_transport_pl_send(struct sctp_transport *t)
 {
-       pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
-                __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
-
-       if (t->pl.probe_count < SCTP_MAX_PROBES) {
-               t->pl.probe_count++;
-               return;
-       }
+       if (t->pl.probe_count < SCTP_MAX_PROBES)
+               goto out;
 
+       t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks;
+       t->pl.probe_count = 0;
        if (t->pl.state == SCTP_PL_BASE) {
                if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */
                        t->pl.state = SCTP_PL_ERROR; /* Base -> Error */
@@ -299,14 +296,27 @@ void sctp_transport_pl_send(struct sctp_transport *t)
                        sctp_assoc_sync_pmtu(t->asoc);
                }
        }
-       t->pl.probe_count = 1;
+
+out:
+       if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count < 30 &&
+           !t->pl.probe_count && t->pl.last_rtx_chunks == t->asoc->rtx_data_chunks) {
+               t->pl.raise_count++;
+               return false;
+       }
+
+       pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
+                __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
+
+       t->pl.probe_count++;
+       return true;
 }
 
-void sctp_transport_pl_recv(struct sctp_transport *t)
+bool sctp_transport_pl_recv(struct sctp_transport *t)
 {
        pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
                 __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
 
+       t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks;
        t->pl.pmtu = t->pl.probe_size;
        t->pl.probe_count = 0;
        if (t->pl.state == SCTP_PL_BASE) {
@@ -323,7 +333,7 @@ void sctp_transport_pl_recv(struct sctp_transport *t)
                if (!t->pl.probe_high) {
                        t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP,
                                               SCTP_MAX_PLPMTU);
-                       return;
+                       return false;
                }
                t->pl.probe_size += SCTP_PL_MIN_STEP;
                if (t->pl.probe_size >= t->pl.probe_high) {
@@ -335,11 +345,13 @@ void sctp_transport_pl_recv(struct sctp_transport *t)
                        t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
                        sctp_assoc_sync_pmtu(t->asoc);
                }
-       } else if (t->pl.state == SCTP_PL_COMPLETE && ++t->pl.raise_count == 30) {
+       } else if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count == 30) {
                /* Raise probe_size again after 30 * interval in Search Complete */
                t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
                t->pl.probe_size += SCTP_PL_MIN_STEP;
        }
+
+       return t->pl.state == SCTP_PL_COMPLETE;
 }
 
 static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu)
index bd9233d..0b2dad3 100644 (file)
 #include <linux/sockios.h>
 #include <net/busy_poll.h>
 #include <linux/errqueue.h>
+#include <linux/ptp_clock_kernel.h>
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
 unsigned int sysctl_net_busy_read __read_mostly;
@@ -873,12 +874,18 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
                empty = 0;
        if (shhwtstamps &&
            (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
-           !skb_is_swtx_tstamp(skb, false_tstamp) &&
-           ktime_to_timespec64_cond(shhwtstamps->hwtstamp, tss.ts + 2)) {
-               empty = 0;
-               if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
-                   !skb_is_err_queue(skb))
-                       put_ts_pktinfo(msg, skb);
+           !skb_is_swtx_tstamp(skb, false_tstamp)) {
+               if (sk->sk_tsflags & SOF_TIMESTAMPING_BIND_PHC)
+                       ptp_convert_timestamp(shhwtstamps, sk->sk_bind_phc);
+
+               if (ktime_to_timespec64_cond(shhwtstamps->hwtstamp,
+                                            tss.ts + 2)) {
+                       empty = 0;
+
+                       if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
+                           !skb_is_err_queue(skb))
+                               put_ts_pktinfo(msg, skb);
+               }
        }
        if (!empty) {
                if (sock_flag(sk, SOCK_TSTAMP_NEW))
index e5c43d4..c9391d3 100644 (file)
@@ -898,16 +898,10 @@ static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
        if (unlikely(!aead))
                return -ENOKEY;
 
-       /* Cow skb data if needed */
-       if (likely(!skb_cloned(skb) &&
-                  (!skb_is_nonlinear(skb) || !skb_has_frag_list(skb)))) {
-               nsg = 1 + skb_shinfo(skb)->nr_frags;
-       } else {
-               nsg = skb_cow_data(skb, 0, &unused);
-               if (unlikely(nsg < 0)) {
-                       pr_err("RX: skb_cow_data() returned %d\n", nsg);
-                       return nsg;
-               }
+       nsg = skb_cow_data(skb, 0, &unused);
+       if (unlikely(nsg < 0)) {
+               pr_err("RX: skb_cow_data() returned %d\n", nsg);
+               return nsg;
        }
 
        /* Allocate memory for the AEAD operation */
index 34a97ea..75b99b7 100644 (file)
@@ -158,6 +158,7 @@ static void tipc_sk_remove(struct tipc_sock *tsk);
 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
 static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
+static int tipc_wait_for_connect(struct socket *sock, long *timeo_p);
 
 static const struct proto_ops packet_ops;
 static const struct proto_ops stream_ops;
@@ -1515,8 +1516,13 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
                rc = 0;
        }
 
-       if (unlikely(syn && !rc))
+       if (unlikely(syn && !rc)) {
                tipc_set_sk_state(sk, TIPC_CONNECTING);
+               if (timeout) {
+                       timeout = msecs_to_jiffies(timeout);
+                       tipc_wait_for_connect(sock, &timeout);
+               }
+       }
 
        return rc ? rc : dlen;
 }
@@ -1564,7 +1570,7 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
                return -EMSGSIZE;
 
        /* Handle implicit connection setup */
-       if (unlikely(dest)) {
+       if (unlikely(dest && sk->sk_state == TIPC_OPEN)) {
                rc = __tipc_sendmsg(sock, m, dlen);
                if (dlen && dlen == rc) {
                        tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
@@ -2646,7 +2652,7 @@ static int tipc_listen(struct socket *sock, int len)
 static int tipc_wait_for_accept(struct socket *sock, long timeo)
 {
        struct sock *sk = sock->sk;
-       DEFINE_WAIT(wait);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        int err;
 
        /* True wake-one mechanism for incoming connections: only
@@ -2655,12 +2661,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
         * anymore, the common case will execute the loop only once.
        */
        for (;;) {
-               prepare_to_wait_exclusive(sk_sleep(sk), &wait,
-                                         TASK_INTERRUPTIBLE);
                if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
+                       add_wait_queue(sk_sleep(sk), &wait);
                        release_sock(sk);
-                       timeo = schedule_timeout(timeo);
+                       timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
                        lock_sock(sk);
+                       remove_wait_queue(sk_sleep(sk), &wait);
                }
                err = 0;
                if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -2672,7 +2678,6 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
                if (signal_pending(current))
                        break;
        }
-       finish_wait(sk_sleep(sk), &wait);
        return err;
 }
 
@@ -2689,9 +2694,10 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
                       bool kern)
 {
        struct sock *new_sk, *sk = sock->sk;
-       struct sk_buff *buf;
        struct tipc_sock *new_tsock;
+       struct msghdr m = {NULL,};
        struct tipc_msg *msg;
+       struct sk_buff *buf;
        long timeo;
        int res;
 
@@ -2737,19 +2743,17 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
        }
 
        /*
-        * Respond to 'SYN-' by discarding it & returning 'ACK'-.
-        * Respond to 'SYN+' by queuing it on new socket.
+        * Respond to 'SYN-' by discarding it & returning 'ACK'.
+        * Respond to 'SYN+' by queuing it on new socket & returning 'ACK'.
         */
        if (!msg_data_sz(msg)) {
-               struct msghdr m = {NULL,};
-
                tsk_advance_rx_queue(sk);
-               __tipc_sendstream(new_sock, &m, 0);
        } else {
                __skb_dequeue(&sk->sk_receive_queue);
                __skb_queue_head(&new_sk->sk_receive_queue, buf);
                skb_set_owner_r(buf, new_sk);
        }
+       __tipc_sendstream(new_sock, &m, 0);
        release_sock(new_sk);
 exit:
        release_sock(sk);
index 23c92ad..ba7ced9 100644 (file)
@@ -1526,6 +1526,53 @@ out:
        return err;
 }
 
+static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
+{
+       scm->fp = scm_fp_dup(UNIXCB(skb).fp);
+
+       /*
+        * Garbage collection of unix sockets starts by selecting a set of
+        * candidate sockets which have reference only from being in flight
+        * (total_refs == inflight_refs).  This condition is checked once during
+        * the candidate collection phase, and candidates are marked as such, so
+        * that non-candidates can later be ignored.  While inflight_refs is
+        * protected by unix_gc_lock, total_refs (file count) is not, hence this
+        * is an instantaneous decision.
+        *
+        * Once a candidate, however, the socket must not be reinstalled into a
+        * file descriptor while the garbage collection is in progress.
+        *
+        * If the above conditions are met, then the directed graph of
+        * candidates (*) does not change while unix_gc_lock is held.
+        *
+        * Any operations that changes the file count through file descriptors
+        * (dup, close, sendmsg) does not change the graph since candidates are
+        * not installed in fds.
+        *
+        * Dequeing a candidate via recvmsg would install it into an fd, but
+        * that takes unix_gc_lock to decrement the inflight count, so it's
+        * serialized with garbage collection.
+        *
+        * MSG_PEEK is special in that it does not change the inflight count,
+        * yet does install the socket into an fd.  The following lock/unlock
+        * pair is to ensure serialization with garbage collection.  It must be
+        * done between incrementing the file count and installing the file into
+        * an fd.
+        *
+        * If garbage collection starts after the barrier provided by the
+        * lock/unlock, then it will see the elevated refcount and not mark this
+        * as a candidate.  If a garbage collection is already in progress
+        * before the file count was incremented, then the lock/unlock pair will
+        * ensure that garbage collection is finished before progressing to
+        * installing the fd.
+        *
+        * (*) A -> B where B is on the queue of A or B is on the queue of C
+        * which is on the queue of listening socket A.
+        */
+       spin_lock(&unix_gc_lock);
+       spin_unlock(&unix_gc_lock);
+}
+
 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
 {
        int err = 0;
@@ -2175,7 +2222,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
                sk_peek_offset_fwd(sk, size);
 
                if (UNIXCB(skb).fp)
-                       scm.fp = scm_fp_dup(UNIXCB(skb).fp);
+                       unix_peek_fds(&scm, skb);
        }
        err = (flags & MSG_TRUNC) ? skb->len - skip : size;
 
@@ -2418,7 +2465,7 @@ unlock:
                        /* It is questionable, see note in unix_dgram_recvmsg.
                         */
                        if (UNIXCB(skb).fp)
-                               scm.fp = scm_fp_dup(UNIXCB(skb).fp);
+                               unix_peek_fds(&scm, skb);
 
                        sk_peek_offset_fwd(sk, chunk);
 
index 9ff64f9..7e7d7f4 100644 (file)
@@ -295,10 +295,8 @@ again:
 
                goto again;
        }
-       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
-                             MSG_DONTWAIT);
-       if (err > 0)
-               err = 0;
+       err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+
 out:
        if (sk)
                sock_put(sk);
index 50eb405..16c88be 100644 (file)
@@ -2351,7 +2351,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
                        goto nla_put_failure;
 
                for (band = state->band_start;
-                    band < NUM_NL80211_BANDS; band++) {
+                    band < (state->split ?
+                               NUM_NL80211_BANDS :
+                               NL80211_BAND_60GHZ + 1);
+                    band++) {
                        struct ieee80211_supported_band *sband;
 
                        /* omit higher bands for ancient software */
index f03c7ac..7897b14 100644 (file)
@@ -1754,16 +1754,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
                         * be grouped with this beacon for updates ...
                         */
                        if (!cfg80211_combine_bsses(rdev, new)) {
-                               kfree(new);
+                               bss_ref_put(rdev, new);
                                goto drop;
                        }
                }
 
                if (rdev->bss_entries >= bss_entries_limit &&
                    !cfg80211_bss_expire_oldest(rdev)) {
-                       if (!list_empty(&new->hidden_list))
-                               list_del(&new->hidden_list);
-                       kfree(new);
+                       bss_ref_put(rdev, new);
                        goto drop;
                }
 
index 520434e..036998d 100644 (file)
@@ -331,6 +331,7 @@ $(obj)/%.o: $(src)/%.c
                -Wno-gnu-variable-sized-type-not-at-end \
                -Wno-address-of-packed-member -Wno-tautological-compare \
                -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
+               -fno-asynchronous-unwind-tables \
                -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
                -O2 -emit-llvm -Xclang -disable-llvm-passes -c $< -o - | \
                $(OPT) -O2 -mtriple=bpf-pc-linux | $(LLVM_DIS) | \
index 53e300f..33d0bde 100644 (file)
@@ -96,6 +96,7 @@ static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
 static int opt_timeout = 1000;
 static bool opt_need_wakeup = true;
 static u32 opt_num_xsks = 1;
+static u32 prog_id;
 static bool opt_busy_poll;
 static bool opt_reduced_cap;
 
@@ -461,6 +462,23 @@ static void *poller(void *arg)
        return NULL;
 }
 
+static void remove_xdp_program(void)
+{
+       u32 curr_prog_id = 0;
+
+       if (bpf_get_link_xdp_id(opt_ifindex, &curr_prog_id, opt_xdp_flags)) {
+               printf("bpf_get_link_xdp_id failed\n");
+               exit(EXIT_FAILURE);
+       }
+
+       if (prog_id == curr_prog_id)
+               bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
+       else if (!curr_prog_id)
+               printf("couldn't find a prog id on a given interface\n");
+       else
+               printf("program on interface changed, not removing\n");
+}
+
 static void int_exit(int sig)
 {
        benchmark_done = true;
@@ -471,6 +489,9 @@ static void __exit_with_error(int error, const char *file, const char *func,
 {
        fprintf(stderr, "%s:%s:%i: errno: %d/\"%s\"\n", file, func,
                line, error, strerror(error));
+
+       if (opt_num_xsks > 1)
+               remove_xdp_program();
        exit(EXIT_FAILURE);
 }
 
@@ -490,6 +511,9 @@ static void xdpsock_cleanup(void)
                if (write(sock, &cmd, sizeof(int)) < 0)
                        exit_with_error(errno);
        }
+
+       if (opt_num_xsks > 1)
+               remove_xdp_program();
 }
 
 static void swap_mac_addresses(void *data)
@@ -857,6 +881,10 @@ static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem,
        if (ret)
                exit_with_error(-ret);
 
+       ret = bpf_get_link_xdp_id(opt_ifindex, &prog_id, opt_xdp_flags);
+       if (ret)
+               exit_with_error(-ret);
+
        xsk->app_stats.rx_empty_polls = 0;
        xsk->app_stats.fill_fail_polls = 0;
        xsk->app_stats.copy_tx_sendtos = 0;
index 10b2f23..02197cb 100644 (file)
@@ -386,7 +386,7 @@ ifeq ($(CONFIG_LTO_CLANG) $(CONFIG_MODVERSIONS),y y)
       cmd_update_lto_symversions =                                     \
        rm -f $@.symversions                                            \
        $(foreach n, $(filter-out FORCE,$^),                            \
-               $(if $(wildcard $(n).symversions),                      \
+               $(if $(shell test -s $(n).symversions && echo y),       \
                        ; cat $(n).symversions >> $@.symversions))
 else
       cmd_update_lto_symversions = echo >/dev/null
index 151f049..6b54e46 100755 (executable)
@@ -131,11 +131,14 @@ res="${res}${CONFIG_LOCALVERSION}${LOCALVERSION}"
 if test "$CONFIG_LOCALVERSION_AUTO" = "y"; then
        # full scm version string
        res="$res$(scm_version)"
-elif [ -z "${LOCALVERSION}" ]; then
-       # append a plus sign if the repository is not in a clean
-       # annotated or signed tagged state (as git describe only
-       # looks at signed or annotated tags - git tag -a/-s) and
-       # LOCALVERSION= is not specified
+elif [ "${LOCALVERSION+set}" != "set" ]; then
+       # If the variable LOCALVERSION is not set, append a plus
+       # sign if the repository is not in a clean annotated or
+       # signed tagged state (as git describe only looks at signed
+       # or annotated tags - git tag -a/-s).
+       #
+       # If the variable LOCALVERSION is set (including being set
+       # to an empty string), we don't want to append a plus sign.
        scm=$(scm_version --short)
        res="$res${scm:++}"
 fi
index 3e784cf..ebd06ae 100755 (executable)
@@ -44,7 +44,7 @@ def read_spdxdata(repo):
                 continue
 
             exception = None
-            for l in open(el.path).readlines():
+            for l in open(el.path, encoding="utf-8").readlines():
                 if l.startswith('Valid-License-Identifier:'):
                     lid = l.split(':')[1].strip().upper()
                     if lid in spdx.licenses:
index 14e3282..6a2971a 100644 (file)
@@ -246,12 +246,18 @@ static bool hw_support_mmap(struct snd_pcm_substream *substream)
        if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
                return false;
 
-       if (substream->ops->mmap ||
-           (substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV &&
-            substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV_UC))
+       if (substream->ops->mmap)
                return true;
 
-       return dma_can_mmap(substream->dma_buffer.dev.dev);
+       switch (substream->dma_buffer.dev.type) {
+       case SNDRV_DMA_TYPE_UNKNOWN:
+               return false;
+       case SNDRV_DMA_TYPE_CONTINUOUS:
+       case SNDRV_DMA_TYPE_VMALLOC:
+               return true;
+       default:
+               return dma_can_mmap(substream->dma_buffer.dev.dev);
+       }
 }
 
 static int constrain_mask_params(struct snd_pcm_substream *substream,
@@ -3063,9 +3069,14 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
                boundary = 0x7fffffff;
        snd_pcm_stream_lock_irq(substream);
        /* FIXME: we should consider the boundary for the sync from app */
-       if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
-               control->appl_ptr = scontrol.appl_ptr;
-       else
+       if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) {
+               err = pcm_lib_apply_appl_ptr(substream,
+                               scontrol.appl_ptr);
+               if (err < 0) {
+                       snd_pcm_stream_unlock_irq(substream);
+                       return err;
+               }
+       } else
                scontrol.appl_ptr = control->appl_ptr % boundary;
        if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
                control->avail_min = scontrol.avail_min;
@@ -3664,6 +3675,8 @@ static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
        if (substream->ops->page)
                page = substream->ops->page(substream, offset);
+       else if (!snd_pcm_get_dma_buf(substream))
+               page = virt_to_page(runtime->dma_area + offset);
        else
                page = snd_sgbuf_get_page(snd_pcm_get_dma_buf(substream), offset);
        if (!page)
index d8be146..c9d0ba3 100644 (file)
@@ -319,6 +319,10 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
                .device = 0x4b55,
        },
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+               .device = 0x4b58,
+       },
 #endif
 
 /* Alder Lake */
index 5bbe669..7ad8c5f 100644 (file)
@@ -816,6 +816,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel
        mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
        snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
        snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
+       spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
 
        spin_lock(&p->chip->reg_lock);
        set_mode_register(p->chip, 0xc0);       /* c0 = STOP */
@@ -855,6 +856,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel
        spin_unlock(&p->chip->reg_lock);
 
        /* restore PCM volume */
+       spin_lock_irqsave(&p->chip->mixer_lock, flags);
        snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
        snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
        spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
@@ -880,6 +882,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p)
        mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
        snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
        snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
+       spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
 
        spin_lock(&p->chip->reg_lock);
        if (p->running & SNDRV_SB_CSP_ST_QSOUND) {
@@ -894,6 +897,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p)
        spin_unlock(&p->chip->reg_lock);
 
        /* restore PCM volume */
+       spin_lock_irqsave(&p->chip->mixer_lock, flags);
        snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
        snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
        spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
index 4b2cc8c..e143e69 100644 (file)
@@ -1940,6 +1940,8 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
 static const struct snd_pci_quirk force_connect_list[] = {
        SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
        SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
+       SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1),
+       SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1),
        {}
 };
 
index 1389cfd..caaf0e8 100644 (file)
@@ -8626,6 +8626,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
        SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
        SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
        SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
index 84e3906..9449fb4 100644 (file)
@@ -576,6 +576,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
                                | SND_SOC_DAIFMT_CBM_CFM,
                .init = cz_rt5682_init,
                .dpcm_playback = 1,
+               .stop_dma_first = 1,
                .ops = &cz_rt5682_play_ops,
                SND_SOC_DAILINK_REG(designware1, rt5682, platform),
        },
@@ -585,6 +586,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_capture = 1,
+               .stop_dma_first = 1,
                .ops = &cz_rt5682_cap_ops,
                SND_SOC_DAILINK_REG(designware2, rt5682, platform),
        },
@@ -594,6 +596,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_playback = 1,
+               .stop_dma_first = 1,
                .ops = &cz_rt5682_max_play_ops,
                SND_SOC_DAILINK_REG(designware3, mx, platform),
        },
@@ -604,6 +607,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_capture = 1,
+               .stop_dma_first = 1,
                .ops = &cz_rt5682_dmic0_cap_ops,
                SND_SOC_DAILINK_REG(designware3, adau, platform),
        },
@@ -614,6 +618,7 @@ static struct snd_soc_dai_link cz_dai_5682_98357[] = {
                .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
                                | SND_SOC_DAIFMT_CBM_CFM,
                .dpcm_capture = 1,
+               .stop_dma_first = 1,
                .ops = &cz_rt5682_dmic1_cap_ops,
                SND_SOC_DAILINK_REG(designware2, adau, platform),
        },
index 7ebae3f..a3b784e 100644 (file)
@@ -1325,7 +1325,7 @@ config SND_SOC_SSM2305
          high-efficiency mono Class-D audio power amplifiers.
 
 config SND_SOC_SSM2518
-       tristate
+       tristate "Analog Devices SSM2518 Class-D Amplifier"
        depends on I2C
 
 config SND_SOC_SSM2602
@@ -1557,6 +1557,7 @@ config SND_SOC_WCD934X
          Qualcomm SoCs like SDM845.
 
 config SND_SOC_WCD938X
+       depends on SND_SOC_WCD938X_SDW
        tristate
 
 config SND_SOC_WCD938X_SDW
@@ -1813,11 +1814,6 @@ config SND_SOC_ZL38060
          which consists of a Digital Signal Processor (DSP), several Digital
          Audio Interfaces (DAIs), analog outputs, and a block of 14 GPIOs.
 
-config SND_SOC_ZX_AUD96P22
-       tristate "ZTE ZX AUD96P22 CODEC"
-       depends on I2C
-       select REGMAP_I2C
-
 # Amp
 config SND_SOC_LM4857
        tristate
index 3000bc1..38356ea 100644 (file)
@@ -1695,6 +1695,8 @@ static const struct regmap_config rt5631_regmap_config = {
        .reg_defaults = rt5631_reg,
        .num_reg_defaults = ARRAY_SIZE(rt5631_reg),
        .cache_type = REGCACHE_RBTREE,
+       .use_single_read = true,
+       .use_single_write = true,
 };
 
 static int rt5631_i2c_probe(struct i2c_client *i2c,
index e4c9157..abcd6f4 100644 (file)
@@ -973,10 +973,14 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
                rt5682_enable_push_button_irq(component, false);
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
                        RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW);
-               if (!snd_soc_dapm_get_pin_status(dapm, "MICBIAS"))
+               if (!snd_soc_dapm_get_pin_status(dapm, "MICBIAS") &&
+                       !snd_soc_dapm_get_pin_status(dapm, "PLL1") &&
+                       !snd_soc_dapm_get_pin_status(dapm, "PLL2B"))
                        snd_soc_component_update_bits(component,
                                RT5682_PWR_ANLG_1, RT5682_PWR_MB, 0);
-               if (!snd_soc_dapm_get_pin_status(dapm, "Vref2"))
+               if (!snd_soc_dapm_get_pin_status(dapm, "Vref2") &&
+                       !snd_soc_dapm_get_pin_status(dapm, "PLL1") &&
+                       !snd_soc_dapm_get_pin_status(dapm, "PLL2B"))
                        snd_soc_component_update_bits(component,
                                RT5682_PWR_ANLG_1, RT5682_PWR_VREF2, 0);
                snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
index 51870d5..b504d63 100644 (file)
@@ -1604,6 +1604,8 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
                        ret);
                return ret;
        }
+       regcache_cache_only(aic31xx->regmap, true);
+
        aic31xx->dev = &i2c->dev;
        aic31xx->irq = i2c->irq;
 
index 8195298..2513922 100644 (file)
@@ -151,8 +151,8 @@ struct aic31xx_pdata {
 #define AIC31XX_WORD_LEN_24BITS                0x02
 #define AIC31XX_WORD_LEN_32BITS                0x03
 #define AIC31XX_IFACE1_MASTER_MASK     GENMASK(3, 2)
-#define AIC31XX_BCLK_MASTER            BIT(2)
-#define AIC31XX_WCLK_MASTER            BIT(3)
+#define AIC31XX_BCLK_MASTER            BIT(3)
+#define AIC31XX_WCLK_MASTER            BIT(2)
 
 /* AIC31XX_DATA_OFFSET */
 #define AIC31XX_DATA_OFFSET_MASK       GENMASK(7, 0)
index c63b717..dcd8aeb 100644 (file)
@@ -250,8 +250,8 @@ static DECLARE_TLV_DB_SCALE(tlv_pcm, -6350, 50, 0);
 static DECLARE_TLV_DB_SCALE(tlv_driver_gain, -600, 100, 0);
 /* -12dB min, 0.5dB steps */
 static DECLARE_TLV_DB_SCALE(tlv_adc_vol, -1200, 50, 0);
-
-static DECLARE_TLV_DB_LINEAR(tlv_spk_vol, TLV_DB_GAIN_MUTE, 0);
+/* -6dB min, 1dB steps */
+static DECLARE_TLV_DB_SCALE(tlv_tas_driver_gain, -5850, 50, 0);
 static DECLARE_TLV_DB_SCALE(tlv_amp_vol, 0, 600, 1);
 
 static const char * const lo_cm_text[] = {
@@ -1063,21 +1063,20 @@ static const struct snd_soc_component_driver soc_component_dev_aic32x4 = {
 };
 
 static const struct snd_kcontrol_new aic32x4_tas2505_snd_controls[] = {
-       SOC_DOUBLE_R_S_TLV("PCM Playback Volume", AIC32X4_LDACVOL,
-                       AIC32X4_LDACVOL, 0, -0x7f, 0x30, 7, 0, tlv_pcm),
+       SOC_SINGLE_S8_TLV("PCM Playback Volume",
+                         AIC32X4_LDACVOL, -0x7f, 0x30, tlv_pcm),
        SOC_ENUM("DAC Playback PowerTune Switch", l_ptm_enum),
-       SOC_DOUBLE_R_S_TLV("HP Driver Playback Volume", AIC32X4_HPLGAIN,
-                       AIC32X4_HPLGAIN, 0, -0x6, 0x1d, 5, 0,
-                       tlv_driver_gain),
-       SOC_DOUBLE_R("HP DAC Playback Switch", AIC32X4_HPLGAIN,
-                       AIC32X4_HPLGAIN, 6, 0x01, 1),
 
-       SOC_SINGLE("Auto-mute Switch", AIC32X4_DACMUTE, 4, 7, 0),
+       SOC_SINGLE_TLV("HP Driver Gain Volume",
+                       AIC32X4_HPLGAIN, 0, 0x74, 1, tlv_tas_driver_gain),
+       SOC_SINGLE("HP DAC Playback Switch", AIC32X4_HPLGAIN, 6, 1, 1),
 
-       SOC_SINGLE_RANGE_TLV("Speaker Driver Playback Volume", TAS2505_SPKVOL1,
-                       0, 0, 117, 1, tlv_spk_vol),
-       SOC_SINGLE_TLV("Speaker Amplifier Playback Volume", TAS2505_SPKVOL2,
-                       4, 5, 0, tlv_amp_vol),
+       SOC_SINGLE_TLV("Speaker Driver Playback Volume",
+                       TAS2505_SPKVOL1, 0, 0x74, 1, tlv_tas_driver_gain),
+       SOC_SINGLE_TLV("Speaker Amplifier Playback Volume",
+                       TAS2505_SPKVOL2, 4, 5, 0, tlv_amp_vol),
+
+       SOC_SINGLE("Auto-mute Switch", AIC32X4_DACMUTE, 4, 7, 0),
 };
 
 static const struct snd_kcontrol_new hp_output_mixer_controls[] = {
index 78b76ec..2fcc973 100644 (file)
@@ -3317,13 +3317,6 @@ static int wcd938x_soc_codec_probe(struct snd_soc_component *component)
                             (WCD938X_DIGITAL_INTR_LEVEL_0 + i), 0);
        }
 
-       ret = wcd938x_irq_init(wcd938x, component->dev);
-       if (ret) {
-               dev_err(component->dev, "%s: IRQ init failed: %d\n",
-                       __func__, ret);
-               return ret;
-       }
-
        wcd938x->hphr_pdm_wd_int = regmap_irq_get_virq(wcd938x->irq_chip,
                                                       WCD938X_IRQ_HPHR_PDM_WD_INT);
        wcd938x->hphl_pdm_wd_int = regmap_irq_get_virq(wcd938x->irq_chip,
@@ -3553,7 +3546,6 @@ static int wcd938x_bind(struct device *dev)
        }
        wcd938x->sdw_priv[AIF1_PB] = dev_get_drvdata(wcd938x->rxdev);
        wcd938x->sdw_priv[AIF1_PB]->wcd938x = wcd938x;
-       wcd938x->sdw_priv[AIF1_PB]->slave_irq = wcd938x->virq;
 
        wcd938x->txdev = wcd938x_sdw_device_get(wcd938x->txnode);
        if (!wcd938x->txdev) {
@@ -3562,7 +3554,6 @@ static int wcd938x_bind(struct device *dev)
        }
        wcd938x->sdw_priv[AIF1_CAP] = dev_get_drvdata(wcd938x->txdev);
        wcd938x->sdw_priv[AIF1_CAP]->wcd938x = wcd938x;
-       wcd938x->sdw_priv[AIF1_CAP]->slave_irq = wcd938x->virq;
        wcd938x->tx_sdw_dev = dev_to_sdw_dev(wcd938x->txdev);
        if (!wcd938x->tx_sdw_dev) {
                dev_err(dev, "could not get txslave with matching of dev\n");
@@ -3595,6 +3586,15 @@ static int wcd938x_bind(struct device *dev)
                return PTR_ERR(wcd938x->regmap);
        }
 
+       ret = wcd938x_irq_init(wcd938x, dev);
+       if (ret) {
+               dev_err(dev, "%s: IRQ init failed: %d\n", __func__, ret);
+               return ret;
+       }
+
+       wcd938x->sdw_priv[AIF1_PB]->slave_irq = wcd938x->virq;
+       wcd938x->sdw_priv[AIF1_CAP]->slave_irq = wcd938x->virq;
+
        ret = wcd938x_set_micbias_data(wcd938x);
        if (ret < 0) {
                dev_err(dev, "%s: bad micbias pdata\n", __func__);
index 37aa020..549d982 100644 (file)
 /*
  * HALO_CCM_CORE_CONTROL
  */
+#define HALO_CORE_RESET                     0x00000200
 #define HALO_CORE_EN                        0x00000001
 
 /*
@@ -1213,7 +1214,7 @@ static int wm_coeff_tlv_get(struct snd_kcontrol *kctl,
 
        mutex_lock(&ctl->dsp->pwr_lock);
 
-       ret = wm_coeff_read_ctrl_raw(ctl, ctl->cache, size);
+       ret = wm_coeff_read_ctrl(ctl, ctl->cache, size);
 
        if (!ret && copy_to_user(bytes, ctl->cache, size))
                ret = -EFAULT;
@@ -3333,7 +3334,8 @@ static int wm_halo_start_core(struct wm_adsp *dsp)
 {
        return regmap_update_bits(dsp->regmap,
                                  dsp->base + HALO_CCM_CORE_CONTROL,
-                                 HALO_CORE_EN, HALO_CORE_EN);
+                                 HALO_CORE_RESET | HALO_CORE_EN,
+                                 HALO_CORE_RESET | HALO_CORE_EN);
 }
 
 static void wm_halo_stop_core(struct wm_adsp *dsp)
index 0e7ed90..25daef9 100644 (file)
@@ -55,43 +55,68 @@ static int spk_init(struct snd_soc_pcm_runtime *rtd)
        return ret;
 }
 
-static int max98373_sdw_trigger(struct snd_pcm_substream *substream, int cmd)
+static int mx8373_enable_spk_pin(struct snd_pcm_substream *substream, bool enable)
 {
+       struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+       struct snd_soc_dai *codec_dai;
+       struct snd_soc_dai *cpu_dai;
        int ret;
+       int j;
 
-       switch (cmd) {
-       case SNDRV_PCM_TRIGGER_START:
-       case SNDRV_PCM_TRIGGER_RESUME:
-       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-               /* enable max98373 first */
-               ret = max_98373_trigger(substream, cmd);
-               if (ret < 0)
-                       break;
-
-               ret = sdw_trigger(substream, cmd);
-               break;
-       case SNDRV_PCM_TRIGGER_STOP:
-       case SNDRV_PCM_TRIGGER_SUSPEND:
-       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               ret = sdw_trigger(substream, cmd);
-               if (ret < 0)
-                       break;
-
-               ret = max_98373_trigger(substream, cmd);
-               break;
-       default:
-               ret = -EINVAL;
-               break;
+       /* set spk pin by playback only */
+       if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+               return 0;
+
+       cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+       for_each_rtd_codec_dais(rtd, j, codec_dai) {
+               struct snd_soc_dapm_context *dapm =
+                               snd_soc_component_get_dapm(cpu_dai->component);
+               char pin_name[16];
+
+               snprintf(pin_name, ARRAY_SIZE(pin_name), "%s Spk",
+                        codec_dai->component->name_prefix);
+
+               if (enable)
+                       ret = snd_soc_dapm_enable_pin(dapm, pin_name);
+               else
+                       ret = snd_soc_dapm_disable_pin(dapm, pin_name);
+
+               if (!ret)
+                       snd_soc_dapm_sync(dapm);
        }
 
-       return ret;
+       return 0;
+}
+
+static int mx8373_sdw_prepare(struct snd_pcm_substream *substream)
+{
+       int ret = 0;
+
+       /* according to soc_pcm_prepare dai link prepare is called first */
+       ret = sdw_prepare(substream);
+       if (ret < 0)
+               return ret;
+
+       return mx8373_enable_spk_pin(substream, true);
+}
+
+static int mx8373_sdw_hw_free(struct snd_pcm_substream *substream)
+{
+       int ret = 0;
+
+       /* according to soc_pcm_hw_free dai link free is called first */
+       ret = sdw_hw_free(substream);
+       if (ret < 0)
+               return ret;
+
+       return mx8373_enable_spk_pin(substream, false);
 }
 
 static const struct snd_soc_ops max_98373_sdw_ops = {
        .startup = sdw_startup,
-       .prepare = sdw_prepare,
-       .trigger = max98373_sdw_trigger,
-       .hw_free = sdw_hw_free,
+       .prepare = mx8373_sdw_prepare,
+       .trigger = sdw_trigger,
+       .hw_free = mx8373_sdw_hw_free,
        .shutdown = sdw_shutdown,
 };
 
index 2b758a1..5b8a274 100644 (file)
@@ -341,6 +341,7 @@ static int set_mtkaif_rx(struct mtk_base_afe *afe)
        case MT8183_MTKAIF_PROTOCOL_1:
                regmap_write(afe->regmap, AFE_AUD_PAD_TOP, 0x31);
                regmap_write(afe->regmap, AFE_ADDA_MTKAIF_CFG0, 0x0);
+               break;
        default:
                break;
        }
index 46513bb..d1c570c 100644 (file)
@@ -1015,6 +1015,7 @@ out:
 
 static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
 {
+       struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
        int ret = -EINVAL, _ret = 0;
        int rollback = 0;
 
@@ -1055,14 +1056,23 @@ start_err:
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback);
-               if (ret < 0)
-                       break;
+               if (rtd->dai_link->stop_dma_first) {
+                       ret = snd_soc_pcm_component_trigger(substream, cmd, rollback);
+                       if (ret < 0)
+                               break;
 
-               ret = snd_soc_pcm_component_trigger(substream, cmd, rollback);
-               if (ret < 0)
-                       break;
+                       ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback);
+                       if (ret < 0)
+                               break;
+               } else {
+                       ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback);
+                       if (ret < 0)
+                               break;
 
+                       ret = snd_soc_pcm_component_trigger(substream, cmd, rollback);
+                       if (ret < 0)
+                               break;
+               }
                ret = snd_soc_link_trigger(substream, cmd, rollback);
                break;
        }
index a002621..d04ce84 100644 (file)
@@ -89,6 +89,7 @@ static const struct sof_dev_desc adls_desc = {
 static const struct sof_dev_desc adl_desc = {
        .machines               = snd_soc_acpi_intel_adl_machines,
        .alt_machines           = snd_soc_acpi_intel_adl_sdw_machines,
+       .use_acpi_target_states = true,
        .resindex_lpe_base      = 0,
        .resindex_pcicfg_base   = -1,
        .resindex_imr_base      = -1,
index 573374b..d3276b4 100644 (file)
@@ -213,19 +213,19 @@ snd_pcm_uframes_t tegra_pcm_pointer(struct snd_soc_component *component,
 }
 EXPORT_SYMBOL_GPL(tegra_pcm_pointer);
 
-static int tegra_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream,
+static int tegra_pcm_preallocate_dma_buffer(struct device *dev, struct snd_pcm *pcm, int stream,
                                            size_t size)
 {
        struct snd_pcm_substream *substream = pcm->streams[stream].substream;
        struct snd_dma_buffer *buf = &substream->dma_buffer;
 
-       buf->area = dma_alloc_wc(pcm->card->dev, size, &buf->addr, GFP_KERNEL);
+       buf->area = dma_alloc_wc(dev, size, &buf->addr, GFP_KERNEL);
        if (!buf->area)
                return -ENOMEM;
 
        buf->private_data = NULL;
        buf->dev.type = SNDRV_DMA_TYPE_DEV;
-       buf->dev.dev = pcm->card->dev;
+       buf->dev.dev = dev;
        buf->bytes = size;
 
        return 0;
@@ -244,31 +244,28 @@ static void tegra_pcm_deallocate_dma_buffer(struct snd_pcm *pcm, int stream)
        if (!buf->area)
                return;
 
-       dma_free_wc(pcm->card->dev, buf->bytes, buf->area, buf->addr);
+       dma_free_wc(buf->dev.dev, buf->bytes, buf->area, buf->addr);
        buf->area = NULL;
 }
 
-static int tegra_pcm_dma_allocate(struct snd_soc_pcm_runtime *rtd,
+static int tegra_pcm_dma_allocate(struct device *dev, struct snd_soc_pcm_runtime *rtd,
                                  size_t size)
 {
-       struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
        int ret;
 
-       ret = dma_set_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
        if (ret < 0)
                return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
-               ret = tegra_pcm_preallocate_dma_buffer(pcm,
-                       SNDRV_PCM_STREAM_PLAYBACK, size);
+               ret = tegra_pcm_preallocate_dma_buffer(dev, pcm, SNDRV_PCM_STREAM_PLAYBACK, size);
                if (ret)
                        goto err;
        }
 
        if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
-               ret = tegra_pcm_preallocate_dma_buffer(pcm,
-                       SNDRV_PCM_STREAM_CAPTURE, size);
+               ret = tegra_pcm_preallocate_dma_buffer(dev, pcm, SNDRV_PCM_STREAM_CAPTURE, size);
                if (ret)
                        goto err_free_play;
        }
@@ -284,7 +281,16 @@ err:
 int tegra_pcm_construct(struct snd_soc_component *component,
                        struct snd_soc_pcm_runtime *rtd)
 {
-       return tegra_pcm_dma_allocate(rtd, tegra_pcm_hardware.buffer_bytes_max);
+       struct device *dev = component->dev;
+
+       /*
+        * Fallback for backwards-compatibility with older device trees that
+        * have the iommus property in the virtual, top-level "sound" node.
+        */
+       if (!of_get_property(dev->of_node, "iommus", NULL))
+               dev = rtd->card->snd_card->dev;
+
+       return tegra_pcm_dma_allocate(dev, rtd, tegra_pcm_hardware.buffer_bytes_max);
 }
 EXPORT_SYMBOL_GPL(tegra_pcm_construct);
 
index a7c0484..265bbc5 100644 (file)
@@ -197,7 +197,7 @@ static int j721e_configure_refclk(struct j721e_priv *priv,
                return ret;
        }
 
-       if (priv->hsdiv_rates[domain->parent_clk_id] != scki) {
+       if (domain->parent_clk_id == -1 || priv->hsdiv_rates[domain->parent_clk_id] != scki) {
                dev_dbg(priv->dev,
                        "%s configuration for %u Hz: %s, %dxFS (SCKI: %u Hz)\n",
                        audio_domain == J721E_AUDIO_DOMAIN_CPB ? "CPB" : "IVI",
@@ -278,23 +278,29 @@ static int j721e_audio_startup(struct snd_pcm_substream *substream)
                                          j721e_rule_rate, &priv->rate_range,
                                          SNDRV_PCM_HW_PARAM_RATE, -1);
 
-       mutex_unlock(&priv->mutex);
 
        if (ret)
-               return ret;
+               goto out;
 
        /* Reset TDM slots to 32 */
        ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0x3, 0x3, 2, 32);
        if (ret && ret != -ENOTSUPP)
-               return ret;
+               goto out;
 
        for_each_rtd_codec_dais(rtd, i, codec_dai) {
                ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x3, 0x3, 2, 32);
                if (ret && ret != -ENOTSUPP)
-                       return ret;
+                       goto out;
        }
 
-       return 0;
+       if (ret == -ENOTSUPP)
+               ret = 0;
+out:
+       if (ret)
+               domain->active--;
+       mutex_unlock(&priv->mutex);
+
+       return ret;
 }
 
 static int j721e_audio_hw_params(struct snd_pcm_substream *substream,
index 30b3e12..f4cdaf1 100644 (file)
@@ -3295,7 +3295,15 @@ static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer,
 {
        struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
        static const char * const val_types[] = {
-               "BOOLEAN", "INV_BOOLEAN", "S8", "U8", "S16", "U16", "S32", "U32",
+               [USB_MIXER_BOOLEAN] = "BOOLEAN",
+               [USB_MIXER_INV_BOOLEAN] = "INV_BOOLEAN",
+               [USB_MIXER_S8] = "S8",
+               [USB_MIXER_U8] = "U8",
+               [USB_MIXER_S16] = "S16",
+               [USB_MIXER_U16] = "U16",
+               [USB_MIXER_S32] = "S32",
+               [USB_MIXER_U32] = "U32",
+               [USB_MIXER_BESPOKEN] = "BESPOKEN",
        };
        snd_iprintf(buffer, "    Info: id=%i, control=%i, cmask=0x%x, "
                            "channels=%i, type=\"%s\"\n", cval->head.id,
index 8b8bee3..e7accd8 100644 (file)
@@ -1897,6 +1897,9 @@ static const struct registration_quirk registration_quirks[] = {
        REG_QUIRK_ENTRY(0x0951, 0x16d8, 2),     /* Kingston HyperX AMP */
        REG_QUIRK_ENTRY(0x0951, 0x16ed, 2),     /* Kingston HyperX Cloud Alpha S */
        REG_QUIRK_ENTRY(0x0951, 0x16ea, 2),     /* Kingston HyperX Cloud Flight S */
+       REG_QUIRK_ENTRY(0x0ecb, 0x1f46, 2),     /* JBL Quantum 600 */
+       REG_QUIRK_ENTRY(0x0ecb, 0x2039, 2),     /* JBL Quantum 400 */
+       REG_QUIRK_ENTRY(0x0ecb, 0x203e, 2),     /* JBL Quantum 800 */
        { 0 }                                   /* terminator */
 };
 
index f83a70e..ce2ee8f 100644 (file)
@@ -20,5 +20,6 @@
 #define __ARCH_WANT_SET_GET_RLIMIT
 #define __ARCH_WANT_TIME32_SYSCALLS
 #define __ARCH_WANT_SYS_CLONE3
+#define __ARCH_WANT_MEMFD_SECRET
 
 #include <asm-generic/unistd.h>
index 39bb322..b11cfc8 100644 (file)
@@ -97,7 +97,7 @@ clean: bpftool_clean runqslower_clean resolve_btfids_clean
        $(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpf
        $(Q)$(RM) -r -- $(OUTPUT)feature
 
-install: $(PROGS) bpftool_install runqslower_install
+install: $(PROGS) bpftool_install
        $(call QUIET_INSTALL, bpf_jit_disasm)
        $(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/bin
        $(Q)$(INSTALL) $(OUTPUT)bpf_jit_disasm $(DESTDIR)$(prefix)/bin/bpf_jit_disasm
@@ -118,9 +118,6 @@ bpftool_clean:
 runqslower:
        $(call descend,runqslower)
 
-runqslower_install:
-       $(call descend,runqslower,install)
-
 runqslower_clean:
        $(call descend,runqslower,clean)
 
@@ -131,5 +128,5 @@ resolve_btfids_clean:
        $(call descend,resolve_btfids,clean)
 
 .PHONY: all install clean bpftool bpftool_install bpftool_clean \
-       runqslower runqslower_install runqslower_clean \
+       runqslower runqslower_clean \
        resolve_btfids resolve_btfids_clean
index 1828bba..dc6daa1 100644 (file)
@@ -222,6 +222,11 @@ int mount_bpffs_for_pin(const char *name)
        int err = 0;
 
        file = malloc(strlen(name) + 1);
+       if (!file) {
+               p_err("mem alloc failed");
+               return -1;
+       }
+
        strcpy(file, name);
        dir = dirname(file);
 
index e7e7eee..24734f2 100644 (file)
@@ -43,11 +43,13 @@ static int fprintf_json(void *out, const char *fmt, ...)
 {
        va_list ap;
        char *s;
+       int err;
 
        va_start(ap, fmt);
-       if (vasprintf(&s, fmt, ap) < 0)
-               return -1;
+       err = vasprintf(&s, fmt, ap);
        va_end(ap);
+       if (err < 0)
+               return -1;
 
        if (!oper_count) {
                int i;
index 645530c..ab9353f 100644 (file)
@@ -74,7 +74,7 @@ int handle__sched_switch(u64 *ctx)
        u32 pid;
 
        /* ivcsw: treat like an enqueue event and store timestamp */
-       if (prev->state == TASK_RUNNING)
+       if (prev->__state == TASK_RUNNING)
                trace_enqueue(prev);
 
        pid = next->pid;
index 1555a0c..13b86bd 100644 (file)
@@ -4,12 +4,6 @@
 
 /* CONFIG_CC_VERSION_TEXT (Do not delete this comment. See help in Kconfig) */
 
-#ifdef CONFIG_CPU_BIG_ENDIAN
-#define __BIG_ENDIAN 4321
-#else
-#define __LITTLE_ENDIAN 1234
-#endif
-
 #define __ARG_PLACEHOLDER_1 0,
 #define __take_second_arg(__ignored, val, ...) val
 
index f211961..a9d6fcd 100644 (file)
@@ -873,8 +873,13 @@ __SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule)
 #define __NR_landlock_restrict_self 446
 __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
 
+#ifdef __ARCH_WANT_MEMFD_SECRET
+#define __NR_memfd_secret 447
+__SYSCALL(__NR_memfd_secret, sys_memfd_secret)
+#endif
+
 #undef __NR_syscalls
-#define __NR_syscalls 447
+#define __NR_syscalls 448
 
 /*
  * 32 bit systems traditionally used different
index 1e04ce7..6f5e275 100644 (file)
@@ -10136,7 +10136,7 @@ int bpf_link__unpin(struct bpf_link *link)
 
        err = unlink(link->pin_path);
        if (err != 0)
-               return libbpf_err_errno(err);
+               return -errno;
 
        pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
        zfree(&link->pin_path);
@@ -11197,7 +11197,7 @@ int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
 
        cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
        if (cnt < 0)
-               return libbpf_err_errno(cnt);
+               return -errno;
 
        for (i = 0; i < cnt; i++) {
                struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
index af973e4..f6b5779 100644 (file)
 444    common  landlock_create_ruleset sys_landlock_create_ruleset
 445    common  landlock_add_rule       sys_landlock_add_rule
 446    common  landlock_restrict_self  sys_landlock_restrict_self
+447    common  memfd_secret            sys_memfd_secret
 
 #
 # Due to a historical design error, certain syscalls are numbered differently
index 5d6f583..c88c61e 100644 (file)
@@ -361,9 +361,10 @@ static struct dso *findnew_dso(int pid, int tid, const char *filename,
                dso = machine__findnew_dso_id(machine, filename, id);
        }
 
-       if (dso)
+       if (dso) {
+               nsinfo__put(dso->nsinfo);
                dso->nsinfo = nsi;
-       else
+       else
                nsinfo__put(nsi);
 
        thread__put(thread);
@@ -992,8 +993,10 @@ int cmd_inject(int argc, const char **argv)
 
        data.path = inject.input_name;
        inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool);
-       if (IS_ERR(inject.session))
-               return PTR_ERR(inject.session);
+       if (IS_ERR(inject.session)) {
+               ret = PTR_ERR(inject.session);
+               goto out_close_output;
+       }
 
        if (zstd_init(&(inject.session->zstd_data), 0) < 0)
                pr_warning("Decompression initialization failed.\n");
@@ -1035,6 +1038,8 @@ int cmd_inject(int argc, const char **argv)
 out_delete:
        zstd_fini(&(inject.session->zstd_data));
        perf_session__delete(inject.session);
+out_close_output:
+       perf_data__close(&inject.output);
        free(inject.itrace_synth_opts.vm_tm_corr_args);
        return ret;
 }
index 6386af6..dc0364f 100644 (file)
@@ -1175,6 +1175,8 @@ int cmd_report(int argc, const char **argv)
                .annotation_opts         = annotation__default_options,
                .skip_empty              = true,
        };
+       char *sort_order_help = sort_help("sort by key(s):");
+       char *field_order_help = sort_help("output field(s): overhead period sample ");
        const struct option options[] = {
        OPT_STRING('i', "input", &input_name, "file",
                    "input file name"),
@@ -1209,9 +1211,9 @@ int cmd_report(int argc, const char **argv)
        OPT_BOOLEAN(0, "header-only", &report.header_only,
                    "Show only data header."),
        OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
-                  sort_help("sort by key(s):")),
+                  sort_order_help),
        OPT_STRING('F', "fields", &field_order, "key[,keys...]",
-                  sort_help("output field(s): overhead period sample ")),
+                  field_order_help),
        OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
                    "Show sample percentage for different cpu modes"),
        OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
@@ -1344,11 +1346,11 @@ int cmd_report(int argc, const char **argv)
        char sort_tmp[128];
 
        if (ret < 0)
-               return ret;
+               goto exit;
 
        ret = perf_config(report__config, &report);
        if (ret)
-               return ret;
+               goto exit;
 
        argc = parse_options(argc, argv, options, report_usage, 0);
        if (argc) {
@@ -1362,8 +1364,10 @@ int cmd_report(int argc, const char **argv)
                report.symbol_filter_str = argv[0];
        }
 
-       if (annotate_check_args(&report.annotation_opts) < 0)
-               return -EINVAL;
+       if (annotate_check_args(&report.annotation_opts) < 0) {
+               ret = -EINVAL;
+               goto exit;
+       }
 
        if (report.mmaps_mode)
                report.tasks_mode = true;
@@ -1377,12 +1381,14 @@ int cmd_report(int argc, const char **argv)
        if (symbol_conf.vmlinux_name &&
            access(symbol_conf.vmlinux_name, R_OK)) {
                pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto exit;
        }
        if (symbol_conf.kallsyms_name &&
            access(symbol_conf.kallsyms_name, R_OK)) {
                pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto exit;
        }
 
        if (report.inverted_callchain)
@@ -1406,12 +1412,14 @@ int cmd_report(int argc, const char **argv)
 
 repeat:
        session = perf_session__new(&data, false, &report.tool);
-       if (IS_ERR(session))
-               return PTR_ERR(session);
+       if (IS_ERR(session)) {
+               ret = PTR_ERR(session);
+               goto exit;
+       }
 
        ret = evswitch__init(&report.evswitch, session->evlist, stderr);
        if (ret)
-               return ret;
+               goto exit;
 
        if (zstd_init(&(session->zstd_data), 0) < 0)
                pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
@@ -1646,5 +1654,8 @@ error:
 
        zstd_fini(&(session->zstd_data));
        perf_session__delete(session);
+exit:
+       free(sort_order_help);
+       free(field_order_help);
        return ret;
 }
index 954ce2f..1ff10d4 100644 (file)
@@ -670,7 +670,7 @@ static void create_tasks(struct perf_sched *sched)
        err = pthread_attr_init(&attr);
        BUG_ON(err);
        err = pthread_attr_setstacksize(&attr,
-                       (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
+                       (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
        BUG_ON(err);
        err = pthread_mutex_lock(&sched->start_work_mutex);
        BUG_ON(err);
@@ -3335,6 +3335,16 @@ static void setup_sorting(struct perf_sched *sched, const struct option *options
        sort_dimension__add("pid", &sched->cmp_pid);
 }
 
+static bool schedstat_events_exposed(void)
+{
+       /*
+        * Select "sched:sched_stat_wait" event to check
+        * whether schedstat tracepoints are exposed.
+        */
+       return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ?
+               false : true;
+}
+
 static int __cmd_record(int argc, const char **argv)
 {
        unsigned int rec_argc, i, j;
@@ -3346,21 +3356,33 @@ static int __cmd_record(int argc, const char **argv)
                "-m", "1024",
                "-c", "1",
                "-e", "sched:sched_switch",
-               "-e", "sched:sched_stat_wait",
-               "-e", "sched:sched_stat_sleep",
-               "-e", "sched:sched_stat_iowait",
                "-e", "sched:sched_stat_runtime",
                "-e", "sched:sched_process_fork",
                "-e", "sched:sched_wakeup_new",
                "-e", "sched:sched_migrate_task",
        };
+
+       /*
+        * The tracepoints trace_sched_stat_{wait, sleep, iowait}
+        * are not exposed to user if CONFIG_SCHEDSTATS is not set,
+        * to prevent "perf sched record" execution failure, determine
+        * whether to record schedstat events according to actual situation.
+        */
+       const char * const schedstat_args[] = {
+               "-e", "sched:sched_stat_wait",
+               "-e", "sched:sched_stat_sleep",
+               "-e", "sched:sched_stat_iowait",
+       };
+       unsigned int schedstat_argc = schedstat_events_exposed() ?
+               ARRAY_SIZE(schedstat_args) : 0;
+
        struct tep_event *waking_event;
 
        /*
         * +2 for either "-e", "sched:sched_wakeup" or
         * "-e", "sched:sched_waking"
         */
-       rec_argc = ARRAY_SIZE(record_args) + 2 + argc - 1;
+       rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
        rec_argv = calloc(rec_argc + 1, sizeof(char *));
 
        if (rec_argv == NULL)
@@ -3376,6 +3398,9 @@ static int __cmd_record(int argc, const char **argv)
        else
                rec_argv[i++] = strdup("sched:sched_wakeup");
 
+       for (j = 0; j < schedstat_argc; j++)
+               rec_argv[i++] = strdup(schedstat_args[j]);
+
        for (j = 1; j < (unsigned int)argc; j++, i++)
                rec_argv[i] = argv[j];
 
index 8c03a98..064da7f 100644 (file)
@@ -2601,6 +2601,12 @@ static void perf_script__exit_per_event_dump_stats(struct perf_script *script)
        }
 }
 
+static void perf_script__exit(struct perf_script *script)
+{
+       perf_thread_map__put(script->threads);
+       perf_cpu_map__put(script->cpus);
+}
+
 static int __cmd_script(struct perf_script *script)
 {
        int ret;
@@ -4143,8 +4149,10 @@ out_delete:
                zfree(&script.ptime_range);
        }
 
+       zstd_fini(&(session->zstd_data));
        evlist__free_stats(session->evlist);
        perf_session__delete(session);
+       perf_script__exit(&script);
 
        if (script_started)
                cleanup_scripting();
index d25cb80..6343759 100644 (file)
@@ -2445,9 +2445,6 @@ int cmd_stat(int argc, const char **argv)
 
        evlist__check_cpu_maps(evsel_list);
 
-       if (perf_pmu__has_hybrid())
-               stat_config.no_merge = true;
-
        /*
         * Initialize thread_map with comm names,
         * so we could print it out on output.
index 7ec18ff..9c265fa 100644 (file)
@@ -2266,6 +2266,14 @@ static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sam
        return augmented_args;
 }
 
+static void syscall__exit(struct syscall *sc)
+{
+       if (!sc)
+               return;
+
+       free(sc->arg_fmt);
+}
+
 static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
                            union perf_event *event __maybe_unused,
                            struct perf_sample *sample)
@@ -3095,6 +3103,21 @@ static struct evsel *evsel__new_pgfault(u64 config)
        return evsel;
 }
 
+static void evlist__free_syscall_tp_fields(struct evlist *evlist)
+{
+       struct evsel *evsel;
+
+       evlist__for_each_entry(evlist, evsel) {
+               struct evsel_trace *et = evsel->priv;
+
+               if (!et || !evsel->tp_format || strcmp(evsel->tp_format->system, "syscalls"))
+                       continue;
+
+               free(et->fmt);
+               free(et);
+       }
+}
+
 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
 {
        const u32 type = event->header.type;
@@ -4130,7 +4153,7 @@ out_disable:
 
 out_delete_evlist:
        trace__symbols__exit(trace);
-
+       evlist__free_syscall_tp_fields(evlist);
        evlist__delete(evlist);
        cgroup__put(trace->cgroup);
        trace->evlist = NULL;
@@ -4636,6 +4659,9 @@ do_concat:
                err = parse_events_option(&o, lists[0], 0);
        }
 out:
+       free(strace_groups_dir);
+       free(lists[0]);
+       free(lists[1]);
        if (sep)
                *sep = ',';
 
@@ -4701,6 +4727,21 @@ out:
        return err;
 }
 
+static void trace__exit(struct trace *trace)
+{
+       int i;
+
+       strlist__delete(trace->ev_qualifier);
+       free(trace->ev_qualifier_ids.entries);
+       if (trace->syscalls.table) {
+               for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
+                       syscall__exit(&trace->syscalls.table[i]);
+               free(trace->syscalls.table);
+       }
+       syscalltbl__delete(trace->sctbl);
+       zfree(&trace->perfconfig_events);
+}
+
 int cmd_trace(int argc, const char **argv)
 {
        const char *trace_usage[] = {
@@ -5135,6 +5176,6 @@ out_close:
        if (output_name != NULL)
                fclose(trace.output);
 out:
-       zfree(&trace.perfconfig_events);
+       trace__exit(&trace);
        return err;
 }
index 33bda9c..dbf5f52 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <errno.h>
 #include <stdio.h>
+#include <stdlib.h>
 #include <sys/epoll.h>
 #include <sys/types.h>
 #include <sys/stat.h>
@@ -276,6 +277,7 @@ static int __test__bpf(int idx)
        }
 
 out:
+       free(obj_buf);
        bpf__clear();
        return ret;
 }
index 6562181..44a5052 100644 (file)
@@ -88,6 +88,7 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu
        struct evsel *evsel;
        struct event_name tmp;
        struct evlist *evlist = evlist__new_default();
+       char *unit = strdup("KRAVA");
 
        TEST_ASSERT_VAL("failed to get evlist", evlist);
 
@@ -98,7 +99,7 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu
 
        perf_evlist__id_add(&evlist->core, &evsel->core, 0, 0, 123);
 
-       evsel->unit = strdup("KRAVA");
+       evsel->unit = unit;
 
        TEST_ASSERT_VAL("failed to synthesize attr update unit",
                        !perf_event__synthesize_event_update_unit(NULL, evsel, process_event_unit));
@@ -118,6 +119,7 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu
        TEST_ASSERT_VAL("failed to synthesize attr update cpus",
                        !perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus));
 
-       perf_cpu_map__put(evsel->core.own_cpus);
+       free(unit);
+       evlist__delete(evlist);
        return 0;
 }
index 5ebf563..4e09f0a 100644 (file)
@@ -5,6 +5,7 @@
 #include "tests.h"
 #include "debug.h"
 #include "pmu.h"
+#include "pmu-hybrid.h"
 #include <errno.h>
 #include <linux/kernel.h>
 
@@ -102,7 +103,7 @@ int test__perf_evsel__roundtrip_name_test(struct test *test __maybe_unused, int
 {
        int err = 0, ret = 0;
 
-       if (perf_pmu__has_hybrid())
+       if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom"))
                return perf_evsel__name_array_test(evsel__hw_names, 2);
 
        err = perf_evsel__name_array_test(evsel__hw_names, 1);
index edcbc70..1ac7291 100644 (file)
@@ -116,5 +116,7 @@ int test__maps__merge_in(struct test *t __maybe_unused, int subtest __maybe_unus
 
        ret = check_maps(merged3, ARRAY_SIZE(merged3), &maps);
        TEST_ASSERT_VAL("merge check failed", !ret);
+
+       maps__exit(&maps);
        return TEST_OK;
 }
index 56a7b6a..8d48667 100644 (file)
@@ -6,6 +6,7 @@
 #include "tests.h"
 #include "debug.h"
 #include "pmu.h"
+#include "pmu-hybrid.h"
 #include <dirent.h>
 #include <errno.h>
 #include <sys/types.h>
@@ -1596,6 +1597,13 @@ static int test__hybrid_raw1(struct evlist *evlist)
 {
        struct evsel *evsel = evlist__first(evlist);
 
+       if (!perf_pmu__hybrid_mounted("cpu_atom")) {
+               TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+               TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+               TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
+               return 0;
+       }
+
        TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
        TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
        TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
@@ -1620,13 +1628,9 @@ static int test__hybrid_cache_event(struct evlist *evlist)
 {
        struct evsel *evsel = evlist__first(evlist);
 
-       TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+       TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
        TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type);
        TEST_ASSERT_VAL("wrong config", 0x2 == (evsel->core.attr.config & 0xffffffff));
-
-       evsel = evsel__next(evsel);
-       TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type);
-       TEST_ASSERT_VAL("wrong config", 0x10002 == (evsel->core.attr.config & 0xffffffff));
        return 0;
 }
 
@@ -2028,7 +2032,7 @@ static struct evlist_test test__hybrid_events[] = {
                .id    = 7,
        },
        {
-               .name  = "cpu_core/LLC-loads/,cpu_atom/LLC-load-misses/",
+               .name  = "cpu_core/LLC-loads/",
                .check = test__hybrid_cache_event,
                .id    = 8,
        },
index 85d75b9..7c56bc1 100644 (file)
@@ -21,6 +21,7 @@
 #include "mmap.h"
 #include "tests.h"
 #include "pmu.h"
+#include "pmu-hybrid.h"
 
 #define CHECK__(x) {                           \
        while ((x) < 0) {                       \
@@ -93,7 +94,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
         * For hybrid "cycles:u", it creates two events.
         * Init the second evsel here.
         */
-       if (perf_pmu__has_hybrid()) {
+       if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) {
                evsel = evsel__next(evsel);
                evsel->core.attr.comm = 1;
                evsel->core.attr.disabled = 1;
index ec4e3b2..b5efe67 100644 (file)
@@ -61,6 +61,7 @@ static int session_write_header(char *path)
        TEST_ASSERT_VAL("failed to write header",
                        !perf_session__write_header(session, session->evlist, data.file.fd, true));
 
+       evlist__delete(session->evlist);
        perf_session__delete(session);
 
        return 0;
index 32ad92d..bc1f648 100644 (file)
@@ -2434,6 +2434,22 @@ static int cs_etm__process_event(struct perf_session *session,
        return 0;
 }
 
+static void dump_queued_data(struct cs_etm_auxtrace *etm,
+                            struct perf_record_auxtrace *event)
+{
+       struct auxtrace_buffer *buf;
+       unsigned int i;
+       /*
+        * Find all buffers with same reference in the queues and dump them.
+        * This is because the queues can contain multiple entries of the same
+        * buffer that were split on aux records.
+        */
+       for (i = 0; i < etm->queues.nr_queues; ++i)
+               list_for_each_entry(buf, &etm->queues.queue_array[i].head, list)
+                       if (buf->reference == event->reference)
+                               cs_etm__dump_event(etm, buf);
+}
+
 static int cs_etm__process_auxtrace_event(struct perf_session *session,
                                          union perf_event *event,
                                          struct perf_tool *tool __maybe_unused)
@@ -2466,7 +2482,8 @@ static int cs_etm__process_auxtrace_event(struct perf_session *session,
                                cs_etm__dump_event(etm, buffer);
                                auxtrace_buffer__put_data(buffer);
                        }
-       }
+       } else if (dump_trace)
+               dump_queued_data(etm, &event->auxtrace);
 
        return 0;
 }
@@ -2683,6 +2700,172 @@ static u64 *cs_etm__create_meta_blk(u64 *buff_in, int *buff_in_offset,
        return metadata;
 }
 
+/**
+ * Puts a fragment of an auxtrace buffer into the auxtrace queues based
+ * on the bounds of aux_event, if it matches with the buffer that's at
+ * file_offset.
+ *
+ * Normally, whole auxtrace buffers would be added to the queue. But we
+ * want to reset the decoder for every PERF_RECORD_AUX event, and the decoder
+ * is reset across each buffer, so splitting the buffers up in advance has
+ * the same effect.
+ */
+static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_offset, size_t sz,
+                                     struct perf_record_aux *aux_event, struct perf_sample *sample)
+{
+       int err;
+       char buf[PERF_SAMPLE_MAX_SIZE];
+       union perf_event *auxtrace_event_union;
+       struct perf_record_auxtrace *auxtrace_event;
+       union perf_event auxtrace_fragment;
+       __u64 aux_offset, aux_size;
+
+       struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
+                                                  struct cs_etm_auxtrace,
+                                                  auxtrace);
+
+       /*
+        * There should be a PERF_RECORD_AUXTRACE event at the file_offset that we got
+        * from looping through the auxtrace index.
+        */
+       err = perf_session__peek_event(session, file_offset, buf,
+                                      PERF_SAMPLE_MAX_SIZE, &auxtrace_event_union, NULL);
+       if (err)
+               return err;
+       auxtrace_event = &auxtrace_event_union->auxtrace;
+       if (auxtrace_event->header.type != PERF_RECORD_AUXTRACE)
+               return -EINVAL;
+
+       if (auxtrace_event->header.size < sizeof(struct perf_record_auxtrace) ||
+               auxtrace_event->header.size != sz) {
+               return -EINVAL;
+       }
+
+       /*
+        * In per-thread mode, CPU is set to -1, but TID will be set instead. See
+        * auxtrace_mmap_params__set_idx(). Return 'not found' if neither CPU nor TID match.
+        */
+       if ((auxtrace_event->cpu == (__u32) -1 && auxtrace_event->tid != sample->tid) ||
+                       auxtrace_event->cpu != sample->cpu)
+               return 1;
+
+       if (aux_event->flags & PERF_AUX_FLAG_OVERWRITE) {
+               /*
+                * Clamp size in snapshot mode. The buffer size is clamped in
+                * __auxtrace_mmap__read() for snapshots, so the aux record size doesn't reflect
+                * the buffer size.
+                */
+               aux_size = min(aux_event->aux_size, auxtrace_event->size);
+
+               /*
+                * In this mode, the head also points to the end of the buffer so aux_offset
+                * needs to have the size subtracted so it points to the beginning as in normal mode
+                */
+               aux_offset = aux_event->aux_offset - aux_size;
+       } else {
+               aux_size = aux_event->aux_size;
+               aux_offset = aux_event->aux_offset;
+       }
+
+       if (aux_offset >= auxtrace_event->offset &&
+           aux_offset + aux_size <= auxtrace_event->offset + auxtrace_event->size) {
+               /*
+                * If this AUX event was inside this buffer somewhere, create a new auxtrace event
+                * based on the sizes of the aux event, and queue that fragment.
+                */
+               auxtrace_fragment.auxtrace = *auxtrace_event;
+               auxtrace_fragment.auxtrace.size = aux_size;
+               auxtrace_fragment.auxtrace.offset = aux_offset;
+               file_offset += aux_offset - auxtrace_event->offset + auxtrace_event->header.size;
+
+               pr_debug3("CS ETM: Queue buffer size: %#"PRI_lx64" offset: %#"PRI_lx64
+                         " tid: %d cpu: %d\n", aux_size, aux_offset, sample->tid, sample->cpu);
+               return auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment,
+                                                 file_offset, NULL);
+       }
+
+       /* Wasn't inside this buffer, but there were no parse errors. 1 == 'not found' */
+       return 1;
+}
+
+static int cs_etm__queue_aux_records_cb(struct perf_session *session, union perf_event *event,
+                                       u64 offset __maybe_unused, void *data __maybe_unused)
+{
+       struct perf_sample sample;
+       int ret;
+       struct auxtrace_index_entry *ent;
+       struct auxtrace_index *auxtrace_index;
+       struct evsel *evsel;
+       size_t i;
+
+       /* Don't care about any other events, we're only queuing buffers for AUX events */
+       if (event->header.type != PERF_RECORD_AUX)
+               return 0;
+
+       if (event->header.size < sizeof(struct perf_record_aux))
+               return -EINVAL;
+
+       /* Truncated Aux records can have 0 size and shouldn't result in anything being queued. */
+       if (!event->aux.aux_size)
+               return 0;
+
+       /*
+        * Parse the sample, we need the sample_id_all data that comes after the event so that the
+        * CPU or PID can be matched to an AUXTRACE buffer's CPU or PID.
+        */
+       evsel = evlist__event2evsel(session->evlist, event);
+       if (!evsel)
+               return -EINVAL;
+       ret = evsel__parse_sample(evsel, event, &sample);
+       if (ret)
+               return ret;
+
+       /*
+        * Loop through the auxtrace index to find the buffer that matches up with this aux event.
+        */
+       list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
+               for (i = 0; i < auxtrace_index->nr; i++) {
+                       ent = &auxtrace_index->entries[i];
+                       ret = cs_etm__queue_aux_fragment(session, ent->file_offset,
+                                                        ent->sz, &event->aux, &sample);
+                       /*
+                        * Stop search on error or successful values. Continue search on
+                        * 1 ('not found')
+                        */
+                       if (ret != 1)
+                               return ret;
+               }
+       }
+
+       /*
+        * Couldn't find the buffer corresponding to this aux record, something went wrong. Warn but
+        * don't exit with an error because it will still be possible to decode other aux records.
+        */
+       pr_err("CS ETM: Couldn't find auxtrace buffer for aux_offset: %#"PRI_lx64
+              " tid: %d cpu: %d\n", event->aux.aux_offset, sample.tid, sample.cpu);
+       return 0;
+}
+
+static int cs_etm__queue_aux_records(struct perf_session *session)
+{
+       struct auxtrace_index *index = list_first_entry_or_null(&session->auxtrace_index,
+                                                               struct auxtrace_index, list);
+       if (index && index->nr > 0)
+               return perf_session__peek_events(session, session->header.data_offset,
+                                                session->header.data_size,
+                                                cs_etm__queue_aux_records_cb, NULL);
+
+       /*
+        * We would get here if there are no entries in the index (either no auxtrace
+        * buffers or no index at all). Fail silently as there is the possibility of
+        * queueing them in cs_etm__process_auxtrace_event() if etm->data_queued is still
+        * false.
+        *
+        * In that scenario, buffers will not be split by AUX records.
+        */
+       return 0;
+}
+
 int cs_etm__process_auxtrace_info(union perf_event *event,
                                  struct perf_session *session)
 {
@@ -2876,14 +3059,13 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
 
        if (dump_trace) {
                cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
-               return 0;
        }
 
        err = cs_etm__synth_events(etm, session);
        if (err)
                goto err_delete_thread;
 
-       err = auxtrace_queues__process_index(&etm->queues, session);
+       err = cs_etm__queue_aux_records(session);
        if (err)
                goto err_delete_thread;
 
index a9c102e..f5d260b 100644 (file)
@@ -20,7 +20,7 @@
 
 static void close_dir(struct perf_data_file *files, int nr)
 {
-       while (--nr >= 1) {
+       while (--nr >= 0) {
                close(files[nr].fd);
                zfree(&files[nr].path);
        }
index d786cf6..ee15db2 100644 (file)
@@ -1154,8 +1154,10 @@ struct map *dso__new_map(const char *name)
        struct map *map = NULL;
        struct dso *dso = dso__new(name);
 
-       if (dso)
+       if (dso) {
                map = map__new2(0, dso);
+               dso__put(dso);
+       }
 
        return map;
 }
index 7d2ba84..609ca16 100644 (file)
@@ -113,14 +113,14 @@ static Dwarf_Line *cu_getsrc_die(Dwarf_Die *cu_die, Dwarf_Addr addr)
  *
  * Find a line number and file name for @addr in @cu_die.
  */
-int cu_find_lineinfo(Dwarf_Die *cu_die, unsigned long addr,
-                   const char **fname, int *lineno)
+int cu_find_lineinfo(Dwarf_Die *cu_die, Dwarf_Addr addr,
+                    const char **fname, int *lineno)
 {
        Dwarf_Line *line;
        Dwarf_Die die_mem;
        Dwarf_Addr faddr;
 
-       if (die_find_realfunc(cu_die, (Dwarf_Addr)addr, &die_mem)
+       if (die_find_realfunc(cu_die, addr, &die_mem)
            && die_entrypc(&die_mem, &faddr) == 0 &&
            faddr == addr) {
                *fname = dwarf_decl_file(&die_mem);
@@ -128,7 +128,7 @@ int cu_find_lineinfo(Dwarf_Die *cu_die, unsigned long addr,
                goto out;
        }
 
-       line = cu_getsrc_die(cu_die, (Dwarf_Addr)addr);
+       line = cu_getsrc_die(cu_die, addr);
        if (line && dwarf_lineno(line, lineno) == 0) {
                *fname = dwarf_linesrc(line, NULL, NULL);
                if (!*fname)
index cb99646..7ee0fa1 100644 (file)
@@ -19,7 +19,7 @@ const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname);
 const char *cu_get_comp_dir(Dwarf_Die *cu_die);
 
 /* Get a line number and file name for given address */
-int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
+int cu_find_lineinfo(Dwarf_Die *cudie, Dwarf_Addr addr,
                     const char **fname, int *lineno);
 
 /* Walk on functions at given address */
index ebc5e9a..cec2e6c 100644 (file)
@@ -186,10 +186,12 @@ void perf_env__exit(struct perf_env *env)
        zfree(&env->cpuid);
        zfree(&env->cmdline);
        zfree(&env->cmdline_argv);
+       zfree(&env->sibling_dies);
        zfree(&env->sibling_cores);
        zfree(&env->sibling_threads);
        zfree(&env->pmu_mappings);
        zfree(&env->cpu);
+       zfree(&env->cpu_pmu_caps);
        zfree(&env->numa_map);
 
        for (i = 0; i < env->nr_numa_nodes; i++)
index 39062df..51424cd 100644 (file)
@@ -69,7 +69,7 @@ int lzma_decompress_to_file(const char *input, int output_fd)
 
                        if (ferror(infile)) {
                                pr_err("lzma: read error: %s\n", strerror(errno));
-                               goto err_fclose;
+                               goto err_lzma_end;
                        }
 
                        if (feof(infile))
@@ -83,7 +83,7 @@ int lzma_decompress_to_file(const char *input, int output_fd)
 
                        if (writen(output_fd, buf_out, write_size) != write_size) {
                                pr_err("lzma: write error: %s\n", strerror(errno));
-                               goto err_fclose;
+                               goto err_lzma_end;
                        }
 
                        strm.next_out  = buf_out;
@@ -95,11 +95,13 @@ int lzma_decompress_to_file(const char *input, int output_fd)
                                break;
 
                        pr_err("lzma: failed %s\n", lzma_strerror(ret));
-                       goto err_fclose;
+                       goto err_lzma_end;
                }
        }
 
        err = 0;
+err_lzma_end:
+       lzma_end(&strm);
 err_fclose:
        fclose(infile);
        return err;
index dd9ed56..756295d 100644 (file)
@@ -99,7 +99,7 @@ int parse_libpfm_events_option(const struct option *opt, const char *str,
                        grp_leader = evsel;
 
                if (grp_evt > -1) {
-                       evsel->leader = grp_leader;
+                       evsel__set_leader(evsel, grp_leader);
                        grp_leader->core.nr_members++;
                        grp_evt++;
                }
index 44b90d6..fc683bc 100644 (file)
@@ -742,9 +742,13 @@ struct pmu_events_map *__weak pmu_events_map__find(void)
        return perf_pmu__find_map(NULL);
 }
 
-static bool perf_pmu__valid_suffix(char *pmu_name, char *tok)
+/*
+ * Suffix must be in form tok_{digits}, or tok{digits}, or same as pmu_name
+ * to be valid.
+ */
+static bool perf_pmu__valid_suffix(const char *pmu_name, char *tok)
 {
-       char *p;
+       const char *p;
 
        if (strncmp(pmu_name, tok, strlen(tok)))
                return false;
@@ -753,12 +757,16 @@ static bool perf_pmu__valid_suffix(char *pmu_name, char *tok)
        if (*p == 0)
                return true;
 
-       if (*p != '_')
-               return false;
+       if (*p == '_')
+               ++p;
 
-       ++p;
-       if (*p == 0 || !isdigit(*p))
-               return false;
+       /* Ensure we end in a number */
+       while (1) {
+               if (!isdigit(*p))
+                       return false;
+               if (*(++p) == 0)
+                       break;
+       }
 
        return true;
 }
@@ -789,12 +797,19 @@ bool pmu_uncore_alias_match(const char *pmu_name, const char *name)
         *          match "socket" in "socketX_pmunameY" and then "pmuname" in
         *          "pmunameY".
         */
-       for (; tok; name += strlen(tok), tok = strtok_r(NULL, ",", &tmp)) {
+       while (1) {
+               char *next_tok = strtok_r(NULL, ",", &tmp);
+
                name = strstr(name, tok);
-               if (!name || !perf_pmu__valid_suffix((char *)name, tok)) {
+               if (!name ||
+                   (!next_tok && !perf_pmu__valid_suffix(name, tok))) {
                        res = false;
                        goto out;
                }
+               if (!next_tok)
+                       break;
+               tok = next_tok;
+               name += strlen(tok);
        }
 
        res = true;
@@ -950,6 +965,13 @@ static struct perf_pmu *pmu_lookup(const char *name)
        LIST_HEAD(format);
        LIST_HEAD(aliases);
        __u32 type;
+       bool is_hybrid = perf_pmu__hybrid_mounted(name);
+
+       /*
+        * Check pmu name for hybrid and the pmu may be invalid in sysfs
+        */
+       if (!strncmp(name, "cpu_", 4) && !is_hybrid)
+               return NULL;
 
        /*
         * The pmu data we store & need consists of the pmu
@@ -978,7 +1000,7 @@ static struct perf_pmu *pmu_lookup(const char *name)
        pmu->is_uncore = pmu_is_uncore(name);
        if (pmu->is_uncore)
                pmu->id = pmu_id(name);
-       pmu->is_hybrid = perf_pmu__hybrid_mounted(name);
+       pmu->is_hybrid = is_hybrid;
        pmu->max_precise = pmu_max_precise(name);
        pmu_add_cpu_aliases(&aliases, pmu);
        pmu_add_sys_aliases(&aliases, pmu);
index c14e1d2..b2a02c9 100644 (file)
@@ -179,8 +179,10 @@ struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user)
                struct map *map;
 
                map = dso__new_map(target);
-               if (map && map->dso)
+               if (map && map->dso) {
+                       nsinfo__put(map->dso->nsinfo);
                        map->dso->nsinfo = nsinfo__get(nsi);
+               }
                return map;
        } else {
                return kernel_get_module_map(target);
@@ -237,8 +239,8 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
                clear_probe_trace_event(tevs + i);
 }
 
-static bool kprobe_blacklist__listed(unsigned long address);
-static bool kprobe_warn_out_range(const char *symbol, unsigned long address)
+static bool kprobe_blacklist__listed(u64 address);
+static bool kprobe_warn_out_range(const char *symbol, u64 address)
 {
        struct map *map;
        bool ret = false;
@@ -398,8 +400,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo,
        pr_debug("Symbol %s address found : %" PRIx64 "\n",
                        pp->function, address);
 
-       ret = debuginfo__find_probe_point(dinfo, (unsigned long)address,
-                                         result);
+       ret = debuginfo__find_probe_point(dinfo, address, result);
        if (ret <= 0)
                ret = (!ret) ? -ENOENT : ret;
        else {
@@ -587,7 +588,7 @@ static void debuginfo_cache__exit(void)
 }
 
 
-static int get_text_start_address(const char *exec, unsigned long *address,
+static int get_text_start_address(const char *exec, u64 *address,
                                  struct nsinfo *nsi)
 {
        Elf *elf;
@@ -632,7 +633,7 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp,
                                            bool is_kprobe)
 {
        struct debuginfo *dinfo = NULL;
-       unsigned long stext = 0;
+       u64 stext = 0;
        u64 addr = tp->address;
        int ret = -ENOENT;
 
@@ -660,8 +661,7 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp,
 
        dinfo = debuginfo_cache__open(tp->module, verbose <= 0);
        if (dinfo)
-               ret = debuginfo__find_probe_point(dinfo,
-                                                (unsigned long)addr, pp);
+               ret = debuginfo__find_probe_point(dinfo, addr, pp);
        else
                ret = -ENOENT;
 
@@ -676,7 +676,7 @@ error:
 
 /* Adjust symbol name and address */
 static int post_process_probe_trace_point(struct probe_trace_point *tp,
-                                          struct map *map, unsigned long offs)
+                                          struct map *map, u64 offs)
 {
        struct symbol *sym;
        u64 addr = tp->address - offs;
@@ -719,7 +719,7 @@ post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
                                        int ntevs, const char *pathname)
 {
        struct map *map;
-       unsigned long stext = 0;
+       u64 stext = 0;
        int i, ret = 0;
 
        /* Prepare a map for offline binary */
@@ -745,7 +745,7 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
                                          struct nsinfo *nsi)
 {
        int i, ret = 0;
-       unsigned long stext = 0;
+       u64 stext = 0;
 
        if (!exec)
                return 0;
@@ -790,7 +790,7 @@ post_process_module_probe_trace_events(struct probe_trace_event *tevs,
        mod_name = find_module_name(module);
        for (i = 0; i < ntevs; i++) {
                ret = post_process_probe_trace_point(&tevs[i].point,
-                                               map, (unsigned long)text_offs);
+                                               map, text_offs);
                if (ret < 0)
                        break;
                tevs[i].point.module =
@@ -1534,7 +1534,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
                 * so tmp[1] should always valid (but could be '\0').
                 */
                if (tmp && !strncmp(tmp, "0x", 2)) {
-                       pp->abs_address = strtoul(pp->function, &tmp, 0);
+                       pp->abs_address = strtoull(pp->function, &tmp, 0);
                        if (*tmp != '\0') {
                                semantic_error("Invalid absolute address.\n");
                                return -EINVAL;
@@ -1909,7 +1909,7 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev)
                        argv[i] = NULL;
                        argc -= 1;
                } else
-                       tp->address = strtoul(fmt1_str, NULL, 0);
+                       tp->address = strtoull(fmt1_str, NULL, 0);
        } else {
                /* Only the symbol-based probe has offset */
                tp->symbol = strdup(fmt1_str);
@@ -2155,7 +2155,7 @@ synthesize_uprobe_trace_def(struct probe_trace_point *tp, struct strbuf *buf)
                return -EINVAL;
 
        /* Use the tp->address for uprobes */
-       err = strbuf_addf(buf, "%s:0x%lx", tp->module, tp->address);
+       err = strbuf_addf(buf, "%s:0x%" PRIx64, tp->module, tp->address);
 
        if (err >= 0 && tp->ref_ctr_offset) {
                if (!uprobe_ref_ctr_is_supported())
@@ -2170,7 +2170,7 @@ synthesize_kprobe_trace_def(struct probe_trace_point *tp, struct strbuf *buf)
 {
        if (!strncmp(tp->symbol, "0x", 2)) {
                /* Absolute address. See try_to_find_absolute_address() */
-               return strbuf_addf(buf, "%s%s0x%lx", tp->module ?: "",
+               return strbuf_addf(buf, "%s%s0x%" PRIx64, tp->module ?: "",
                                  tp->module ? ":" : "", tp->address);
        } else {
                return strbuf_addf(buf, "%s%s%s+%lu", tp->module ?: "",
@@ -2269,7 +2269,7 @@ static int convert_to_perf_probe_point(struct probe_trace_point *tp,
                pp->function = strdup(tp->symbol);
                pp->offset = tp->offset;
        } else {
-               ret = e_snprintf(buf, 128, "0x%" PRIx64, (u64)tp->address);
+               ret = e_snprintf(buf, 128, "0x%" PRIx64, tp->address);
                if (ret < 0)
                        return ret;
                pp->function = strdup(buf);
@@ -2450,8 +2450,8 @@ void clear_probe_trace_event(struct probe_trace_event *tev)
 
 struct kprobe_blacklist_node {
        struct list_head list;
-       unsigned long start;
-       unsigned long end;
+       u64 start;
+       u64 end;
        char *symbol;
 };
 
@@ -2496,7 +2496,7 @@ static int kprobe_blacklist__load(struct list_head *blacklist)
                }
                INIT_LIST_HEAD(&node->list);
                list_add_tail(&node->list, blacklist);
-               if (sscanf(buf, "0x%lx-0x%lx", &node->start, &node->end) != 2) {
+               if (sscanf(buf, "0x%" PRIx64 "-0x%" PRIx64, &node->start, &node->end) != 2) {
                        ret = -EINVAL;
                        break;
                }
@@ -2512,7 +2512,7 @@ static int kprobe_blacklist__load(struct list_head *blacklist)
                        ret = -ENOMEM;
                        break;
                }
-               pr_debug2("Blacklist: 0x%lx-0x%lx, %s\n",
+               pr_debug2("Blacklist: 0x%" PRIx64 "-0x%" PRIx64 ", %s\n",
                          node->start, node->end, node->symbol);
                ret++;
        }
@@ -2524,8 +2524,7 @@ static int kprobe_blacklist__load(struct list_head *blacklist)
 }
 
 static struct kprobe_blacklist_node *
-kprobe_blacklist__find_by_address(struct list_head *blacklist,
-                                 unsigned long address)
+kprobe_blacklist__find_by_address(struct list_head *blacklist, u64 address)
 {
        struct kprobe_blacklist_node *node;
 
@@ -2553,7 +2552,7 @@ static void kprobe_blacklist__release(void)
        kprobe_blacklist__delete(&kprobe_blacklist);
 }
 
-static bool kprobe_blacklist__listed(unsigned long address)
+static bool kprobe_blacklist__listed(u64 address)
 {
        return !!kprobe_blacklist__find_by_address(&kprobe_blacklist, address);
 }
@@ -3221,7 +3220,7 @@ static int try_to_find_absolute_address(struct perf_probe_event *pev,
         * In __add_probe_trace_events, a NULL symbol is interpreted as
         * invalid.
         */
-       if (asprintf(&tp->symbol, "0x%lx", tp->address) < 0)
+       if (asprintf(&tp->symbol, "0x%" PRIx64, tp->address) < 0)
                goto errout;
 
        /* For kprobe, check range */
@@ -3232,7 +3231,7 @@ static int try_to_find_absolute_address(struct perf_probe_event *pev,
                goto errout;
        }
 
-       if (asprintf(&tp->realname, "abs_%lx", tp->address) < 0)
+       if (asprintf(&tp->realname, "abs_%" PRIx64, tp->address) < 0)
                goto errout;
 
        if (pev->target) {
index 65769d7..8ad5b15 100644 (file)
@@ -33,7 +33,7 @@ struct probe_trace_point {
        char            *module;        /* Module name */
        unsigned long   offset;         /* Offset from symbol */
        unsigned long   ref_ctr_offset; /* SDT reference counter offset */
-       unsigned long   address;        /* Actual address of the trace point */
+       u64             address;        /* Actual address of the trace point */
        bool            retprobe;       /* Return probe flag */
 };
 
@@ -70,7 +70,7 @@ struct perf_probe_point {
        bool            retprobe;       /* Return probe flag */
        char            *lazy_line;     /* Lazy matching pattern */
        unsigned long   offset;         /* Offset from function entry */
-       unsigned long   abs_address;    /* Absolute address of the point */
+       u64             abs_address;    /* Absolute address of the point */
 };
 
 /* Perf probe probing argument field chain */
index f9a6cbc..3d50de3 100644 (file)
@@ -377,11 +377,11 @@ int probe_file__del_events(int fd, struct strfilter *filter)
 
        ret = probe_file__get_events(fd, filter, namelist);
        if (ret < 0)
-               return ret;
+               goto out;
 
        ret = probe_file__del_strlist(fd, namelist);
+out:
        strlist__delete(namelist);
-
        return ret;
 }
 
index 02ef0d7..50d861a 100644 (file)
@@ -668,7 +668,7 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
        }
 
        tp->offset = (unsigned long)(paddr - eaddr);
-       tp->address = (unsigned long)paddr;
+       tp->address = paddr;
        tp->symbol = strdup(symbol);
        if (!tp->symbol)
                return -ENOMEM;
@@ -1707,7 +1707,7 @@ int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
 }
 
 /* Reverse search */
-int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
+int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr,
                                struct perf_probe_point *ppt)
 {
        Dwarf_Die cudie, spdie, indie;
@@ -1720,14 +1720,14 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
                addr += baseaddr;
        /* Find cu die */
        if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
-               pr_warning("Failed to find debug information for address %lx\n",
+               pr_warning("Failed to find debug information for address %" PRIx64 "\n",
                           addr);
                ret = -EINVAL;
                goto end;
        }
 
        /* Find a corresponding line (filename and lineno) */
-       cu_find_lineinfo(&cudie, addr, &fname, &lineno);
+       cu_find_lineinfo(&cudie, (Dwarf_Addr)addr, &fname, &lineno);
        /* Don't care whether it failed or not */
 
        /* Find a corresponding function (name, baseline and baseaddr) */
@@ -1742,7 +1742,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
                }
 
                fname = dwarf_decl_file(&spdie);
-               if (addr == (unsigned long)baseaddr) {
+               if (addr == baseaddr) {
                        /* Function entry - Relative line number is 0 */
                        lineno = baseline;
                        goto post;
@@ -1788,7 +1788,7 @@ post:
        if (lineno)
                ppt->line = lineno - baseline;
        else if (basefunc) {
-               ppt->offset = addr - (unsigned long)baseaddr;
+               ppt->offset = addr - baseaddr;
                func = basefunc;
        }
 
@@ -1828,8 +1828,7 @@ static int line_range_add_line(const char *src, unsigned int lineno,
 }
 
 static int line_range_walk_cb(const char *fname, int lineno,
-                             Dwarf_Addr addr __maybe_unused,
-                             void *data)
+                             Dwarf_Addr addr, void *data)
 {
        struct line_finder *lf = data;
        const char *__fname;
index 2febb58..8bc1c80 100644 (file)
@@ -46,7 +46,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
                                 struct probe_trace_event **tevs);
 
 /* Find a perf_probe_point from debuginfo */
-int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
+int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr,
                                struct perf_probe_point *ppt);
 
 int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
index e9c929a..51f7274 100644 (file)
@@ -306,6 +306,7 @@ void perf_session__delete(struct perf_session *session)
                        evlist__delete(session->evlist);
                perf_data__close(session->data);
        }
+       trace_event__cleanup(&session->tevent);
        free(session);
 }
 
index 88ce47f..568a88c 100644 (file)
@@ -3370,7 +3370,7 @@ static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int
                add_key(sb, s[i].name, llen);
 }
 
-const char *sort_help(const char *prefix)
+char *sort_help(const char *prefix)
 {
        struct strbuf sb;
        char *s;
index 87a0926..b67c469 100644 (file)
@@ -302,7 +302,7 @@ void reset_output_field(void);
 void sort__setup_elide(FILE *fp);
 void perf_hpp__set_elide(int idx, bool elide);
 
-const char *sort_help(const char *prefix);
+char *sort_help(const char *prefix);
 
 int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
 
index 83a2bc0..5886010 100644 (file)
@@ -596,6 +596,18 @@ static void collect_all_aliases(struct perf_stat_config *config, struct evsel *c
        }
 }
 
+static bool is_uncore(struct evsel *evsel)
+{
+       struct perf_pmu *pmu = evsel__find_pmu(evsel);
+
+       return pmu && pmu->is_uncore;
+}
+
+static bool hybrid_uniquify(struct evsel *evsel)
+{
+       return perf_pmu__has_hybrid() && !is_uncore(evsel);
+}
+
 static bool collect_data(struct perf_stat_config *config, struct evsel *counter,
                            void (*cb)(struct perf_stat_config *config, struct evsel *counter, void *data,
                                       bool first),
@@ -604,7 +616,7 @@ static bool collect_data(struct perf_stat_config *config, struct evsel *counter,
        if (counter->merged_stat)
                return false;
        cb(config, counter, data, true);
-       if (config->no_merge)
+       if (config->no_merge || hybrid_uniquify(counter))
                uniquify_event_name(counter);
        else if (counter->auto_merge_stats)
                collect_all_aliases(config, counter, cb, data);
index be8d8d4..6276ce0 100755 (executable)
@@ -12,6 +12,8 @@ import sys
 import os
 import time
 
+assert sys.version_info >= (3, 7), "Python version is too old"
+
 from collections import namedtuple
 from enum import Enum, auto
 
index 90bc007..2c6f916 100644 (file)
@@ -6,15 +6,13 @@
 # Author: Felix Guo <felixguoxiuping@gmail.com>
 # Author: Brendan Higgins <brendanhiggins@google.com>
 
-from __future__ import annotations
 import importlib.util
 import logging
 import subprocess
 import os
 import shutil
 import signal
-from typing import Iterator
-from typing import Optional
+from typing import Iterator, Optional, Tuple
 
 from contextlib import ExitStack
 
@@ -208,7 +206,7 @@ def get_source_tree_ops(arch: str, cross_compile: Optional[str]) -> LinuxSourceT
                raise ConfigError(arch + ' is not a valid arch')
 
 def get_source_tree_ops_from_qemu_config(config_path: str,
-                                        cross_compile: Optional[str]) -> tuple[
+                                        cross_compile: Optional[str]) -> Tuple[
                                                         str, LinuxSourceTreeOperations]:
        # The module name/path has very little to do with where the actual file
        # exists (I learned this through experimentation and could not find it
index c3c524b..b88db3f 100644 (file)
@@ -338,9 +338,11 @@ def bubble_up_suite_errors(test_suites: Iterable[TestSuite]) -> TestStatus:
 def parse_test_result(lines: LineStream) -> TestResult:
        consume_non_diagnostic(lines)
        if not lines or not parse_tap_header(lines):
-               return TestResult(TestStatus.NO_TESTS, [], lines)
+               return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
        expected_test_suite_num = parse_test_plan(lines)
-       if not expected_test_suite_num:
+       if expected_test_suite_num == 0:
+               return TestResult(TestStatus.NO_TESTS, [], lines)
+       elif expected_test_suite_num is None:
                return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
        test_suites = []
        for i in range(1, expected_test_suite_num + 1):
index bdae0e5..75045aa 100755 (executable)
@@ -157,8 +157,18 @@ class KUnitParserTest(unittest.TestCase):
                        kunit_parser.TestStatus.FAILURE,
                        result.status)
 
+       def test_no_header(self):
+               empty_log = test_data_path('test_is_test_passed-no_tests_run_no_header.log')
+               with open(empty_log) as file:
+                       result = kunit_parser.parse_run_tests(
+                               kunit_parser.extract_tap_lines(file.readlines()))
+               self.assertEqual(0, len(result.suites))
+               self.assertEqual(
+                       kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS,
+                       result.status)
+
        def test_no_tests(self):
-               empty_log = test_data_path('test_is_test_passed-no_tests_run.log')
+               empty_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log')
                with open(empty_log) as file:
                        result = kunit_parser.parse_run_tests(
                                kunit_parser.extract_tap_lines(file.readlines()))
@@ -173,7 +183,7 @@ class KUnitParserTest(unittest.TestCase):
                with open(crash_log) as file:
                        result = kunit_parser.parse_run_tests(
                                kunit_parser.extract_tap_lines(file.readlines()))
-               print_mock.assert_any_call(StrContains('no tests run!'))
+               print_mock.assert_any_call(StrContains('could not parse test results!'))
                print_mock.stop()
                file.close()
 
@@ -309,7 +319,7 @@ class KUnitJsonTest(unittest.TestCase):
                        result["sub_groups"][1]["test_cases"][0])
 
        def test_no_tests_json(self):
-               result = self._json_for('test_is_test_passed-no_tests_run.log')
+               result = self._json_for('test_is_test_passed-no_tests_run_with_header.log')
                self.assertEqual(0, len(result['sub_groups']))
 
 class StrContains(str):
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log
new file mode 100644 (file)
index 0000000..5f48ee6
--- /dev/null
@@ -0,0 +1,2 @@
+TAP version 14
+1..0
index ee27d68..b5940e6 100644 (file)
@@ -715,6 +715,8 @@ out:
        bpf_object__close(obj);
 }
 
+#include "tailcall_bpf2bpf4.skel.h"
+
 /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
  * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
  * counter behaves correctly, bpf program will go through following flow:
@@ -727,10 +729,15 @@ out:
  * the loop begins. At the end of the test make sure that the global counter is
  * equal to 31, because tailcall counter includes the first two tailcalls
  * whereas global counter is incremented only on loop presented on flow above.
+ *
+ * The noise parameter is used to insert bpf_map_update calls into the logic
+ * to force verifier to patch instructions. This allows us to ensure jump
+ * logic remains correct with instruction movement.
  */
-static void test_tailcall_bpf2bpf_4(void)
+static void test_tailcall_bpf2bpf_4(bool noise)
 {
-       int err, map_fd, prog_fd, main_fd, data_fd, i, val;
+       int err, map_fd, prog_fd, main_fd, data_fd, i;
+       struct tailcall_bpf2bpf4__bss val;
        struct bpf_map *prog_array, *data_map;
        struct bpf_program *prog;
        struct bpf_object *obj;
@@ -774,11 +781,6 @@ static void test_tailcall_bpf2bpf_4(void)
                        goto out;
        }
 
-       err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
-                               &duration, &retval, NULL);
-       CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n",
-             err, errno, retval);
-
        data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
        if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
                return;
@@ -788,9 +790,21 @@ static void test_tailcall_bpf2bpf_4(void)
                return;
 
        i = 0;
+       val.noise = noise;
+       val.count = 0;
+       err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
+       if (CHECK_FAIL(err))
+               goto out;
+
+       err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
+                               &duration, &retval, NULL);
+       CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n",
+             err, errno, retval);
+
+       i = 0;
        err = bpf_map_lookup_elem(data_fd, &i, &val);
-       CHECK(err || val != 31, "tailcall count", "err %d errno %d count %d\n",
-             err, errno, val);
+       CHECK(err || val.count != 31, "tailcall count", "err %d errno %d count %d\n",
+             err, errno, val.count);
 
 out:
        bpf_object__close(obj);
@@ -815,5 +829,7 @@ void test_tailcalls(void)
        if (test__start_subtest("tailcall_bpf2bpf_3"))
                test_tailcall_bpf2bpf_3();
        if (test__start_subtest("tailcall_bpf2bpf_4"))
-               test_tailcall_bpf2bpf_4();
+               test_tailcall_bpf2bpf_4(false);
+       if (test__start_subtest("tailcall_bpf2bpf_5"))
+               test_tailcall_bpf2bpf_4(true);
 }
index 77df6d4..e89368a 100644 (file)
@@ -3,6 +3,13 @@
 #include <bpf/bpf_helpers.h>
 
 struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __uint(max_entries, 1);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(__u32));
+} nop_table SEC(".maps");
+
+struct {
        __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
        __uint(max_entries, 3);
        __uint(key_size, sizeof(__u32));
@@ -10,10 +17,21 @@ struct {
 } jmp_table SEC(".maps");
 
 int count = 0;
+int noise = 0;
+
+__always_inline int subprog_noise(void)
+{
+       __u32 key = 0;
+
+       bpf_map_lookup_elem(&nop_table, &key);
+       return 0;
+}
 
 __noinline
 int subprog_tail_2(struct __sk_buff *skb)
 {
+       if (noise)
+               subprog_noise();
        bpf_tail_call_static(skb, &jmp_table, 2);
        return skb->len * 3;
 }
index a3e593d..2debba4 100644 (file)
@@ -1,4 +1,233 @@
 {
+       "map access: known scalar += value_ptr unknown vs const",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, len)),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+       BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4),
+       BPF_MOV64_IMM(BPF_REG_1, 6),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_MOV64_IMM(BPF_REG_1, 3),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_16b = { 5 },
+       .fixup_map_array_48b = { 8 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "map access: known scalar += value_ptr const vs unknown",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, len)),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+       BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2),
+       BPF_MOV64_IMM(BPF_REG_1, 3),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+       BPF_MOV64_IMM(BPF_REG_1, 6),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_16b = { 5 },
+       .fixup_map_array_48b = { 8 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "map access: known scalar += value_ptr const vs const (ne)",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, len)),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+       BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2),
+       BPF_MOV64_IMM(BPF_REG_1, 3),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_MOV64_IMM(BPF_REG_1, 5),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_16b = { 5 },
+       .fixup_map_array_48b = { 8 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "map access: known scalar += value_ptr const vs const (eq)",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, len)),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+       BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2),
+       BPF_MOV64_IMM(BPF_REG_1, 5),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_MOV64_IMM(BPF_REG_1, 5),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_16b = { 5 },
+       .fixup_map_array_48b = { 8 },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "map access: known scalar += value_ptr unknown vs unknown (eq)",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, len)),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+       BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4),
+       BPF_MOV64_IMM(BPF_REG_1, 6),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+       BPF_MOV64_IMM(BPF_REG_1, 6),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_16b = { 5 },
+       .fixup_map_array_48b = { 8 },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "map access: known scalar += value_ptr unknown vs unknown (lt)",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, len)),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+       BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4),
+       BPF_MOV64_IMM(BPF_REG_1, 6),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x3),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+       BPF_MOV64_IMM(BPF_REG_1, 6),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_16b = { 5 },
+       .fixup_map_array_48b = { 8 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "map access: known scalar += value_ptr unknown vs unknown (gt)",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                   offsetof(struct __sk_buff, len)),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+       BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4),
+       BPF_MOV64_IMM(BPF_REG_1, 6),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+       BPF_MOV64_IMM(BPF_REG_1, 6),
+       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x3),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_16b = { 5 },
+       .fixup_map_array_48b = { 8 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R1 tried to add from different maps, paths or scalars",
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
        "map access: known scalar += value_ptr from different maps",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
index 06a351b..0709af0 100644 (file)
@@ -38,6 +38,7 @@
 /x86_64/xen_vmcall_test
 /x86_64/xss_msr_test
 /x86_64/vmx_pmu_msrs_test
+/access_tracking_perf_test
 /demand_paging_test
 /dirty_log_test
 /dirty_log_perf_test
index b853be2..5832f51 100644 (file)
@@ -71,6 +71,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
 TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
 TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
+TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
 TEST_GEN_PROGS_x86_64 += demand_paging_test
 TEST_GEN_PROGS_x86_64 += dirty_log_test
 TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
index a16c8f0..cc89818 100644 (file)
@@ -1019,7 +1019,8 @@ static __u64 sve_rejects_set[] = {
 #define VREGS_SUBLIST \
        { "vregs", .regs = vregs, .regs_n = ARRAY_SIZE(vregs), }
 #define PMU_SUBLIST \
-       { "pmu", .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
+       { "pmu", .capability = KVM_CAP_ARM_PMU_V3, .feature = KVM_ARM_VCPU_PMU_V3, \
+         .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
 #define SVE_SUBLIST \
        { "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
          .regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c
new file mode 100644 (file)
index 0000000..e2baa18
--- /dev/null
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * access_tracking_perf_test
+ *
+ * Copyright (C) 2021, Google, Inc.
+ *
+ * This test measures the performance effects of KVM's access tracking.
+ * Access tracking is driven by the MMU notifiers test_young, clear_young, and
+ * clear_flush_young. These notifiers do not have a direct userspace API,
+ * however the clear_young notifier can be triggered by marking a pages as idle
+ * in /sys/kernel/mm/page_idle/bitmap. This test leverages that mechanism to
+ * enable access tracking on guest memory.
+ *
+ * To measure performance this test runs a VM with a configurable number of
+ * vCPUs that each touch every page in disjoint regions of memory. Performance
+ * is measured in the time it takes all vCPUs to finish touching their
+ * predefined region.
+ *
+ * Note that a deterministic correctness test of access tracking is not possible
+ * by using page_idle as it exists today. This is for a few reasons:
+ *
+ * 1. page_idle only issues clear_young notifiers, which lack a TLB flush. This
+ *    means subsequent guest accesses are not guaranteed to see page table
+ *    updates made by KVM until some time in the future.
+ *
+ * 2. page_idle only operates on LRU pages. Newly allocated pages are not
+ *    immediately allocated to LRU lists. Instead they are held in a "pagevec",
+ *    which is drained to LRU lists some time in the future. There is no
+ *    userspace API to force this drain to occur.
+ *
+ * These limitations are worked around in this test by using a large enough
+ * region of memory for each vCPU such that the number of translations cached in
+ * the TLB and the number of pages held in pagevecs are a small fraction of the
+ * overall workload. And if either of those conditions are not true this test
+ * will fail rather than silently passing.
+ */
+#include <inttypes.h>
+#include <limits.h>
+#include <pthread.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "kvm_util.h"
+#include "test_util.h"
+#include "perf_test_util.h"
+#include "guest_modes.h"
+
+/* Global variable used to synchronize all of the vCPU threads. */
+static int iteration = -1;
+
+/* Defines what vCPU threads should do during a given iteration. */
+static enum {
+       /* Run the vCPU to access all its memory. */
+       ITERATION_ACCESS_MEMORY,
+       /* Mark the vCPU's memory idle in page_idle. */
+       ITERATION_MARK_IDLE,
+} iteration_work;
+
+/* Set to true when vCPU threads should exit. */
+static bool done;
+
+/* The iteration that was last completed by each vCPU. */
+static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
+
+/* Whether to overlap the regions of memory vCPUs access. */
+static bool overlap_memory_access;
+
+struct test_params {
+       /* The backing source for the region of memory. */
+       enum vm_mem_backing_src_type backing_src;
+
+       /* The amount of memory to allocate for each vCPU. */
+       uint64_t vcpu_memory_bytes;
+
+       /* The number of vCPUs to create in the VM. */
+       int vcpus;
+};
+
+static uint64_t pread_uint64(int fd, const char *filename, uint64_t index)
+{
+       uint64_t value;
+       off_t offset = index * sizeof(value);
+
+       TEST_ASSERT(pread(fd, &value, sizeof(value), offset) == sizeof(value),
+                   "pread from %s offset 0x%" PRIx64 " failed!",
+                   filename, offset);
+
+       return value;
+
+}
+
+#define PAGEMAP_PRESENT (1ULL << 63)
+#define PAGEMAP_PFN_MASK ((1ULL << 55) - 1)
+
+static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva)
+{
+       uint64_t hva = (uint64_t) addr_gva2hva(vm, gva);
+       uint64_t entry;
+       uint64_t pfn;
+
+       entry = pread_uint64(pagemap_fd, "pagemap", hva / getpagesize());
+       if (!(entry & PAGEMAP_PRESENT))
+               return 0;
+
+       pfn = entry & PAGEMAP_PFN_MASK;
+       if (!pfn) {
+               print_skip("Looking up PFNs requires CAP_SYS_ADMIN");
+               exit(KSFT_SKIP);
+       }
+
+       return pfn;
+}
+
+static bool is_page_idle(int page_idle_fd, uint64_t pfn)
+{
+       uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64);
+
+       return !!((bits >> (pfn % 64)) & 1);
+}
+
+static void mark_page_idle(int page_idle_fd, uint64_t pfn)
+{
+       uint64_t bits = 1ULL << (pfn % 64);
+
+       TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8,
+                   "Set page_idle bits for PFN 0x%" PRIx64, pfn);
+}
+
+static void mark_vcpu_memory_idle(struct kvm_vm *vm, int vcpu_id)
+{
+       uint64_t base_gva = perf_test_args.vcpu_args[vcpu_id].gva;
+       uint64_t pages = perf_test_args.vcpu_args[vcpu_id].pages;
+       uint64_t page;
+       uint64_t still_idle = 0;
+       uint64_t no_pfn = 0;
+       int page_idle_fd;
+       int pagemap_fd;
+
+       /* If vCPUs are using an overlapping region, let vCPU 0 mark it idle. */
+       if (overlap_memory_access && vcpu_id)
+               return;
+
+       page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
+       TEST_ASSERT(page_idle_fd > 0, "Failed to open page_idle.");
+
+       pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
+       TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap.");
+
+       for (page = 0; page < pages; page++) {
+               uint64_t gva = base_gva + page * perf_test_args.guest_page_size;
+               uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);
+
+               if (!pfn) {
+                       no_pfn++;
+                       continue;
+               }
+
+               if (is_page_idle(page_idle_fd, pfn)) {
+                       still_idle++;
+                       continue;
+               }
+
+               mark_page_idle(page_idle_fd, pfn);
+       }
+
+       /*
+        * Assumption: Less than 1% of pages are going to be swapped out from
+        * under us during this test.
+        */
+       TEST_ASSERT(no_pfn < pages / 100,
+                   "vCPU %d: No PFN for %" PRIu64 " out of %" PRIu64 " pages.",
+                   vcpu_id, no_pfn, pages);
+
+       /*
+        * Test that at least 90% of memory has been marked idle (the rest might
+        * not be marked idle because the pages have not yet made it to an LRU
+        * list or the translations are still cached in the TLB). 90% is
+        * arbitrary; high enough that we ensure most memory access went through
+        * access tracking but low enough as to not make the test too brittle
+        * over time and across architectures.
+        */
+       TEST_ASSERT(still_idle < pages / 10,
+                   "vCPU%d: Too many pages still idle (%"PRIu64 " out of %"
+                   PRIu64 ").\n",
+                   vcpu_id, still_idle, pages);
+
+       close(page_idle_fd);
+       close(pagemap_fd);
+}
+
+static void assert_ucall(struct kvm_vm *vm, uint32_t vcpu_id,
+                        uint64_t expected_ucall)
+{
+       struct ucall uc;
+       uint64_t actual_ucall = get_ucall(vm, vcpu_id, &uc);
+
+       TEST_ASSERT(expected_ucall == actual_ucall,
+                   "Guest exited unexpectedly (expected ucall %" PRIu64
+                   ", got %" PRIu64 ")",
+                   expected_ucall, actual_ucall);
+}
+
+static bool spin_wait_for_next_iteration(int *current_iteration)
+{
+       int last_iteration = *current_iteration;
+
+       do {
+               if (READ_ONCE(done))
+                       return false;
+
+               *current_iteration = READ_ONCE(iteration);
+       } while (last_iteration == *current_iteration);
+
+       return true;
+}
+
+static void *vcpu_thread_main(void *arg)
+{
+       struct perf_test_vcpu_args *vcpu_args = arg;
+       struct kvm_vm *vm = perf_test_args.vm;
+       int vcpu_id = vcpu_args->vcpu_id;
+       int current_iteration = -1;
+
+       vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
+
+       while (spin_wait_for_next_iteration(&current_iteration)) {
+               switch (READ_ONCE(iteration_work)) {
+               case ITERATION_ACCESS_MEMORY:
+                       vcpu_run(vm, vcpu_id);
+                       assert_ucall(vm, vcpu_id, UCALL_SYNC);
+                       break;
+               case ITERATION_MARK_IDLE:
+                       mark_vcpu_memory_idle(vm, vcpu_id);
+                       break;
+               };
+
+               vcpu_last_completed_iteration[vcpu_id] = current_iteration;
+       }
+
+       return NULL;
+}
+
+static void spin_wait_for_vcpu(int vcpu_id, int target_iteration)
+{
+       while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) !=
+              target_iteration) {
+               continue;
+       }
+}
+
+/* The type of memory accesses to perform in the VM. */
+enum access_type {
+       ACCESS_READ,
+       ACCESS_WRITE,
+};
+
+static void run_iteration(struct kvm_vm *vm, int vcpus, const char *description)
+{
+       struct timespec ts_start;
+       struct timespec ts_elapsed;
+       int next_iteration;
+       int vcpu_id;
+
+       /* Kick off the vCPUs by incrementing iteration. */
+       next_iteration = ++iteration;
+
+       clock_gettime(CLOCK_MONOTONIC, &ts_start);
+
+       /* Wait for all vCPUs to finish the iteration. */
+       for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++)
+               spin_wait_for_vcpu(vcpu_id, next_iteration);
+
+       ts_elapsed = timespec_elapsed(ts_start);
+       pr_info("%-30s: %ld.%09lds\n",
+               description, ts_elapsed.tv_sec, ts_elapsed.tv_nsec);
+}
+
+static void access_memory(struct kvm_vm *vm, int vcpus, enum access_type access,
+                         const char *description)
+{
+       perf_test_args.wr_fract = (access == ACCESS_READ) ? INT_MAX : 1;
+       sync_global_to_guest(vm, perf_test_args);
+       iteration_work = ITERATION_ACCESS_MEMORY;
+       run_iteration(vm, vcpus, description);
+}
+
+static void mark_memory_idle(struct kvm_vm *vm, int vcpus)
+{
+       /*
+        * Even though this parallelizes the work across vCPUs, this is still a
+        * very slow operation because page_idle forces the test to mark one pfn
+        * at a time and the clear_young notifier serializes on the KVM MMU
+        * lock.
+        */
+       pr_debug("Marking VM memory idle (slow)...\n");
+       iteration_work = ITERATION_MARK_IDLE;
+       run_iteration(vm, vcpus, "Mark memory idle");
+}
+
+static pthread_t *create_vcpu_threads(int vcpus)
+{
+       pthread_t *vcpu_threads;
+       int i;
+
+       vcpu_threads = malloc(vcpus * sizeof(vcpu_threads[0]));
+       TEST_ASSERT(vcpu_threads, "Failed to allocate vcpu_threads.");
+
+       for (i = 0; i < vcpus; i++) {
+               vcpu_last_completed_iteration[i] = iteration;
+               pthread_create(&vcpu_threads[i], NULL, vcpu_thread_main,
+                              &perf_test_args.vcpu_args[i]);
+       }
+
+       return vcpu_threads;
+}
+
+static void terminate_vcpu_threads(pthread_t *vcpu_threads, int vcpus)
+{
+       int i;
+
+       /* Set done to signal the vCPU threads to exit */
+       done = true;
+
+       for (i = 0; i < vcpus; i++)
+               pthread_join(vcpu_threads[i], NULL);
+}
+
+static void run_test(enum vm_guest_mode mode, void *arg)
+{
+       struct test_params *params = arg;
+       struct kvm_vm *vm;
+       pthread_t *vcpu_threads;
+       int vcpus = params->vcpus;
+
+       vm = perf_test_create_vm(mode, vcpus, params->vcpu_memory_bytes,
+                                params->backing_src);
+
+       perf_test_setup_vcpus(vm, vcpus, params->vcpu_memory_bytes,
+                             !overlap_memory_access);
+
+       vcpu_threads = create_vcpu_threads(vcpus);
+
+       pr_info("\n");
+       access_memory(vm, vcpus, ACCESS_WRITE, "Populating memory");
+
+       /* As a control, read and write to the populated memory first. */
+       access_memory(vm, vcpus, ACCESS_WRITE, "Writing to populated memory");
+       access_memory(vm, vcpus, ACCESS_READ, "Reading from populated memory");
+
+       /* Repeat on memory that has been marked as idle. */
+       mark_memory_idle(vm, vcpus);
+       access_memory(vm, vcpus, ACCESS_WRITE, "Writing to idle memory");
+       mark_memory_idle(vm, vcpus);
+       access_memory(vm, vcpus, ACCESS_READ, "Reading from idle memory");
+
+       terminate_vcpu_threads(vcpu_threads, vcpus);
+       free(vcpu_threads);
+       perf_test_destroy_vm(vm);
+}
+
+static void help(char *name)
+{
+       puts("");
+       printf("usage: %s [-h] [-m mode] [-b vcpu_bytes] [-v vcpus] [-o]  [-s mem_type]\n",
+              name);
+       puts("");
+       printf(" -h: Display this help message.");
+       guest_modes_help();
+       printf(" -b: specify the size of the memory region which should be\n"
+              "     dirtied by each vCPU. e.g. 10M or 3G.\n"
+              "     (default: 1G)\n");
+       printf(" -v: specify the number of vCPUs to run.\n");
+       printf(" -o: Overlap guest memory accesses instead of partitioning\n"
+              "     them into a separate region of memory for each vCPU.\n");
+       printf(" -s: specify the type of memory that should be used to\n"
+              "     back the guest data region.\n\n");
+       backing_src_help();
+       puts("");
+       exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+       struct test_params params = {
+               .backing_src = VM_MEM_SRC_ANONYMOUS,
+               .vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE,
+               .vcpus = 1,
+       };
+       int page_idle_fd;
+       int opt;
+
+       guest_modes_append_default();
+
+       while ((opt = getopt(argc, argv, "hm:b:v:os:")) != -1) {
+               switch (opt) {
+               case 'm':
+                       guest_modes_cmdline(optarg);
+                       break;
+               case 'b':
+                       params.vcpu_memory_bytes = parse_size(optarg);
+                       break;
+               case 'v':
+                       params.vcpus = atoi(optarg);
+                       break;
+               case 'o':
+                       overlap_memory_access = true;
+                       break;
+               case 's':
+                       params.backing_src = parse_backing_src_type(optarg);
+                       break;
+               case 'h':
+               default:
+                       help(argv[0]);
+                       break;
+               }
+       }
+
+       page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
+       if (page_idle_fd < 0) {
+               print_skip("CONFIG_IDLE_PAGE_TRACKING is not enabled");
+               exit(KSFT_SKIP);
+       }
+       close(page_idle_fd);
+
+       for_each_guest_mode(run_test, &params);
+
+       return 0;
+}
index 04a2641..80cbd3a 100644 (file)
@@ -312,6 +312,7 @@ int main(int argc, char *argv[])
                        break;
                case 'o':
                        p.partition_vcpu_memory_access = false;
+                       break;
                case 's':
                        p.backing_src = parse_backing_src_type(optarg);
                        break;
index 615ab25..010b59b 100644 (file)
@@ -45,6 +45,7 @@ enum vm_guest_mode {
        VM_MODE_P40V48_64K,
        VM_MODE_PXXV48_4K,      /* For 48bits VA but ANY bits PA */
        VM_MODE_P47V64_4K,
+       VM_MODE_P44V64_4K,
        NUM_VM_MODES,
 };
 
@@ -62,7 +63,7 @@ enum vm_guest_mode {
 
 #elif defined(__s390x__)
 
-#define VM_MODE_DEFAULT                        VM_MODE_P47V64_4K
+#define VM_MODE_DEFAULT                        VM_MODE_P44V64_4K
 #define MIN_PAGE_SHIFT                 12U
 #define ptes_per_page(page_size)       ((page_size) / 16)
 
index 9f49f6c..632b74d 100644 (file)
@@ -401,7 +401,7 @@ unexpected_exception:
 void vm_init_descriptor_tables(struct kvm_vm *vm)
 {
        vm->handlers = vm_vaddr_alloc(vm, sizeof(struct handlers),
-                       vm->page_size, 0, 0);
+                       vm->page_size);
 
        *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
 }
index 25bff30..c330f41 100644 (file)
@@ -22,6 +22,22 @@ void guest_modes_append_default(void)
                }
        }
 #endif
+#ifdef __s390x__
+       {
+               int kvm_fd, vm_fd;
+               struct kvm_s390_vm_cpu_processor info;
+
+               kvm_fd = open_kvm_dev_path_or_exit();
+               vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, 0);
+               kvm_device_access(vm_fd, KVM_S390_VM_CPU_MODEL,
+                                 KVM_S390_VM_CPU_PROCESSOR, &info, false);
+               close(vm_fd);
+               close(kvm_fd);
+               /* Starting with z13 we have 47bits of physical address */
+               if (info.ibc >= 0x30)
+                       guest_mode_append(VM_MODE_P47V64_4K, true, true);
+       }
+#endif
 }
 
 void for_each_guest_mode(void (*func)(enum vm_guest_mode, void *), void *arg)
index 5b56b57..10a8ed6 100644 (file)
@@ -176,6 +176,7 @@ const char *vm_guest_mode_string(uint32_t i)
                [VM_MODE_P40V48_64K]    = "PA-bits:40,  VA-bits:48, 64K pages",
                [VM_MODE_PXXV48_4K]     = "PA-bits:ANY, VA-bits:48,  4K pages",
                [VM_MODE_P47V64_4K]     = "PA-bits:47,  VA-bits:64,  4K pages",
+               [VM_MODE_P44V64_4K]     = "PA-bits:44,  VA-bits:64,  4K pages",
        };
        _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
                       "Missing new mode strings?");
@@ -194,6 +195,7 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
        { 40, 48, 0x10000, 16 },
        {  0,  0,  0x1000, 12 },
        { 47, 64,  0x1000, 12 },
+       { 44, 64,  0x1000, 12 },
 };
 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
               "Missing new mode params?");
@@ -282,6 +284,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
        case VM_MODE_P47V64_4K:
                vm->pgtable_levels = 5;
                break;
+       case VM_MODE_P44V64_4K:
+               vm->pgtable_levels = 5;
+               break;
        default:
                TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
        }
index 85b18bb..72a1c9b 100644 (file)
@@ -377,7 +377,8 @@ static void test_add_max_memory_regions(void)
                (max_mem_slots - 1), MEM_REGION_SIZE >> 10);
 
        mem = mmap(NULL, (size_t)max_mem_slots * MEM_REGION_SIZE + alignment,
-                  PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+                  PROT_READ | PROT_WRITE,
+                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
        TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host");
        mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1));
 
index b0031f2..ecec308 100644 (file)
@@ -320,7 +320,7 @@ int main(int ac, char **av)
                run_delay = get_run_delay();
                pthread_create(&thread, &attr, do_steal_time, NULL);
                do
-                       pthread_yield();
+                       sched_yield();
                while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS);
                pthread_join(thread, NULL);
                run_delay = get_run_delay() - run_delay;
index 42bd658..af27c7e 100644 (file)
@@ -615,7 +615,7 @@ int main(void)
 
        vm_init_descriptor_tables(vm);
        vcpu_init_descriptor_tables(vm, VCPU_ID);
-       vm_handle_exception(vm, GP_VECTOR, guest_gp_handler);
+       vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
 
        pr_info("Testing access to Hyper-V specific MSRs\n");
        guest_test_msrs_access(vm, addr_gva2hva(vm, msr_gva),
index 523371c..da2325f 100644 (file)
@@ -71,7 +71,7 @@ static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val)
        /* Set up a #PF handler to eat the RSVD #PF and signal all done! */
        vm_init_descriptor_tables(vm);
        vcpu_init_descriptor_tables(vm, VCPU_ID);
-       vm_handle_exception(vm, PF_VECTOR, guest_pf_handler);
+       vm_install_exception_handler(vm, PF_VECTOR, guest_pf_handler);
 
        r = _vcpu_run(vm, VCPU_ID);
        TEST_ASSERT(r == 0, "vcpu_run failed: %d\n", r);
index c1f8318..d0fe2fd 100644 (file)
@@ -53,15 +53,28 @@ static inline void sync_with_host(uint64_t phase)
                     : "+a" (phase));
 }
 
-void self_smi(void)
+static void self_smi(void)
 {
        x2apic_write_reg(APIC_ICR,
                         APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
 }
 
-void guest_code(void *arg)
+static void l2_guest_code(void)
 {
+       sync_with_host(8);
+
+       sync_with_host(10);
+
+       vmcall();
+}
+
+static void guest_code(void *arg)
+{
+       #define L2_GUEST_STACK_SIZE 64
+       unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
        uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
+       struct svm_test_data *svm = arg;
+       struct vmx_pages *vmx_pages = arg;
 
        sync_with_host(1);
 
@@ -74,21 +87,50 @@ void guest_code(void *arg)
        sync_with_host(4);
 
        if (arg) {
-               if (cpu_has_svm())
-                       generic_svm_setup(arg, NULL, NULL);
-               else
-                       GUEST_ASSERT(prepare_for_vmx_operation(arg));
+               if (cpu_has_svm()) {
+                       generic_svm_setup(svm, l2_guest_code,
+                                         &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+               } else {
+                       GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+                       GUEST_ASSERT(load_vmcs(vmx_pages));
+                       prepare_vmcs(vmx_pages, l2_guest_code,
+                                    &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+               }
 
                sync_with_host(5);
 
                self_smi();
 
                sync_with_host(7);
+
+               if (cpu_has_svm()) {
+                       run_guest(svm->vmcb, svm->vmcb_gpa);
+                       svm->vmcb->save.rip += 3;
+                       run_guest(svm->vmcb, svm->vmcb_gpa);
+               } else {
+                       vmlaunch();
+                       vmresume();
+               }
+
+               /* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */
+               sync_with_host(12);
        }
 
        sync_with_host(DONE);
 }
 
+void inject_smi(struct kvm_vm *vm)
+{
+       struct kvm_vcpu_events events;
+
+       vcpu_events_get(vm, VCPU_ID, &events);
+
+       events.smi.pending = 1;
+       events.flags |= KVM_VCPUEVENT_VALID_SMM;
+
+       vcpu_events_set(vm, VCPU_ID, &events);
+}
+
 int main(int argc, char *argv[])
 {
        vm_vaddr_t nested_gva = 0;
@@ -147,6 +189,22 @@ int main(int argc, char *argv[])
                            "Unexpected stage: #%x, got %x",
                            stage, stage_reported);
 
+               /*
+                * Enter SMM during L2 execution and check that we correctly
+                * return from it. Do not perform save/restore while in SMM yet.
+                */
+               if (stage == 8) {
+                       inject_smi(vm);
+                       continue;
+               }
+
+               /*
+                * Perform save/restore while the guest is in SMM triggered
+                * during L2 execution.
+                */
+               if (stage == 10)
+                       inject_smi(vm);
+
                state = vcpu_save_state(vm, VCPU_ID);
                kvm_vm_release(vm);
                kvm_vm_restart(vm, O_RDWR);
index b37585e..46a97f3 100755 (executable)
@@ -282,7 +282,9 @@ done
 #
 echo $error > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_OFFLINE/error
 for memory in `hotpluggable_online_memory`; do
-       offline_memory_expect_fail $memory
+       if [ $((RANDOM % 100)) -lt $ratio ]; then
+               offline_memory_expect_fail $memory
+       fi
 done
 
 echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_OFFLINE/error
index c19ecc6..ecbf57f 100755 (executable)
@@ -313,9 +313,10 @@ check_exception()
        fi
        log_test $? 0 "IPv4: ${desc}"
 
-       if [ "$with_redirect" = "yes" ]; then
+       # No PMTU info for test "redirect" and "mtu exception plus redirect"
+       if [ "$with_redirect" = "yes" ] && [ "$desc" != "redirect exception plus mtu" ]; then
                ip -netns h1 -6 ro get ${H1_VRF_ARG} ${H2_N2_IP6} | \
-               grep -q "${H2_N2_IP6} from :: via ${R2_LLADDR} dev br0.*${mtu}"
+               grep -v "mtu" | grep -q "${H2_N2_IP6} .*via ${R2_LLADDR} dev br0"
        elif [ -n "${mtu}" ]; then
                ip -netns h1 -6 ro get ${H1_VRF_ARG} ${H2_N2_IP6} | \
                grep -q "${mtu}"
index 9a191c1..f02f4de 100755 (executable)
@@ -1409,7 +1409,7 @@ syncookies_tests()
        ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
        ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow
        run_tests $ns1 $ns2 10.0.1.1
-       chk_join_nr "subflows limited by server w cookies" 2 2 1
+       chk_join_nr "subflows limited by server w cookies" 2 1 1
 
        # test signal address with cookies
        reset_with_cookies
index 6365c7f..bd62883 100644 (file)
 #include <sys/socket.h>
 #include <sys/wait.h>
 #include <linux/tcp.h>
+#include <linux/udp.h>
 #include <arpa/inet.h>
 #include <net/if.h>
 #include <netinet/in.h>
+#include <netinet/ip.h>
 #include <netdb.h>
 #include <fcntl.h>
 #include <libgen.h>
 #include <time.h>
 #include <errno.h>
 
+#include <linux/xfrm.h>
+#include <linux/ipsec.h>
+#include <linux/pfkeyv2.h>
+
 #ifndef IPV6_UNICAST_IF
 #define IPV6_UNICAST_IF         76
 #endif
@@ -114,6 +120,9 @@ struct sock_args {
                struct in_addr  in;
                struct in6_addr in6;
        } expected_raddr;
+
+       /* ESP in UDP encap test */
+       int use_xfrm;
 };
 
 static int server_mode;
@@ -1346,6 +1355,41 @@ static int bind_socket(int sd, struct sock_args *args)
        return 0;
 }
 
+static int config_xfrm_policy(int sd, struct sock_args *args)
+{
+       struct xfrm_userpolicy_info policy = {};
+       int type = UDP_ENCAP_ESPINUDP;
+       int xfrm_af = IP_XFRM_POLICY;
+       int level = SOL_IP;
+
+       if (args->type != SOCK_DGRAM) {
+               log_error("Invalid socket type. Only DGRAM could be used for XFRM\n");
+               return 1;
+       }
+
+       policy.action = XFRM_POLICY_ALLOW;
+       policy.sel.family = args->version;
+       if (args->version == AF_INET6) {
+               xfrm_af = IPV6_XFRM_POLICY;
+               level = SOL_IPV6;
+       }
+
+       policy.dir = XFRM_POLICY_OUT;
+       if (setsockopt(sd, level, xfrm_af, &policy, sizeof(policy)) < 0)
+               return 1;
+
+       policy.dir = XFRM_POLICY_IN;
+       if (setsockopt(sd, level, xfrm_af, &policy, sizeof(policy)) < 0)
+               return 1;
+
+       if (setsockopt(sd, IPPROTO_UDP, UDP_ENCAP, &type, sizeof(type)) < 0) {
+               log_err_errno("Failed to set xfrm encap");
+               return 1;
+       }
+
+       return 0;
+}
+
 static int lsock_init(struct sock_args *args)
 {
        long flags;
@@ -1389,6 +1433,11 @@ static int lsock_init(struct sock_args *args)
        if (fcntl(sd, F_SETFD, FD_CLOEXEC) < 0)
                log_err_errno("Failed to set close-on-exec flag");
 
+       if (args->use_xfrm && config_xfrm_policy(sd, args)) {
+               log_err_errno("Failed to set xfrm policy");
+               goto err;
+       }
+
 out:
        return sd;
 
@@ -1772,7 +1821,7 @@ static int ipc_parent(int cpid, int fd, struct sock_args *args)
        return client_status;
 }
 
-#define GETOPT_STR  "sr:l:c:p:t:g:P:DRn:M:X:m:d:I:BN:O:SCi6L:0:1:2:3:Fbq"
+#define GETOPT_STR  "sr:l:c:p:t:g:P:DRn:M:X:m:d:I:BN:O:SCi6xL:0:1:2:3:Fbq"
 
 static void print_usage(char *prog)
 {
@@ -1795,6 +1844,7 @@ static void print_usage(char *prog)
        "    -D|R          datagram (D) / raw (R) socket (default stream)\n"
        "    -l addr       local address to bind to in server mode\n"
        "    -c addr       local address to bind to in client mode\n"
+       "    -x            configure XFRM policy on socket\n"
        "\n"
        "    -d dev        bind socket to given device name\n"
        "    -I dev        bind socket to given device name - server mode\n"
@@ -1966,6 +2016,9 @@ int main(int argc, char *argv[])
                case 'q':
                        quiet = 1;
                        break;
+               case 'x':
+                       args.use_xfrm = 1;
+                       break;
                default:
                        print_usage(argv[0]);
                        return 1;
index 64cd2e2..543ad75 100755 (executable)
 #      below for IPv6 doesn't apply here, because, on IPv4, administrative MTU
 #      changes alone won't affect PMTU
 #
+# - pmtu_vti4_udp_exception
+#       Same as pmtu_vti4_exception, but using ESP-in-UDP
+#
+# - pmtu_vti4_udp_routed_exception
+#       Set up vti tunnel on top of veth connected through routing namespace and
+#      add xfrm states and policies with ESP-in-UDP encapsulation. Check that
+#      route exception is not created if link layer MTU is not exceeded, then
+#      lower MTU on second part of routed environment and check that exception
+#      is created with the expected PMTU.
+#
 # - pmtu_vti6_exception
 #      Set up vti6 tunnel on top of veth, with xfrm states and policies, in two
 #      namespaces with matching endpoints. Check that route exception is
 #      decrease and increase MTU of tunnel, checking that route exception PMTU
 #      changes accordingly
 #
+# - pmtu_vti6_udp_exception
+#       Same as pmtu_vti6_exception, but using ESP-in-UDP
+#
+# - pmtu_vti6_udp_routed_exception
+#      Same as pmtu_vti6_udp_routed_exception but with routing between vti
+#      endpoints
+#
 # - pmtu_vti4_default_mtu
 #      Set up vti4 tunnel on top of veth, in two namespaces with matching
 #      endpoints. Check that MTU assigned to vti interface is the MTU of the
@@ -224,6 +241,10 @@ tests="
        pmtu_ipv6_ipv6_exception        IPv6 over IPv6: PMTU exceptions         1
        pmtu_vti6_exception             vti6: PMTU exceptions                   0
        pmtu_vti4_exception             vti4: PMTU exceptions                   0
+       pmtu_vti6_udp_exception         vti6: PMTU exceptions (ESP-in-UDP)      0
+       pmtu_vti4_udp_exception         vti4: PMTU exceptions (ESP-in-UDP)      0
+       pmtu_vti6_udp_routed_exception  vti6: PMTU exceptions, routed (ESP-in-UDP)      0
+       pmtu_vti4_udp_routed_exception  vti4: PMTU exceptions, routed (ESP-in-UDP)      0
        pmtu_vti4_default_mtu           vti4: default MTU assignment            0
        pmtu_vti6_default_mtu           vti6: default MTU assignment            0
        pmtu_vti4_link_add_mtu          vti4: MTU setting on link creation      0
@@ -246,7 +267,6 @@ ns_b="ip netns exec ${NS_B}"
 ns_c="ip netns exec ${NS_C}"
 ns_r1="ip netns exec ${NS_R1}"
 ns_r2="ip netns exec ${NS_R2}"
-
 # Addressing and routing for tests with routers: four network segments, with
 # index SEGMENT between 1 and 4, a common prefix (PREFIX4 or PREFIX6) and an
 # identifier ID, which is 1 for hosts (A and B), 2 for routers (R1 and R2).
@@ -279,7 +299,6 @@ routes="
        A       ${prefix6}:${b_r2}::1   ${prefix6}:${a_r2}::2
        B       default                 ${prefix6}:${b_r1}::2
 "
-
 USE_NH="no"
 #      ns      family  nh id      destination          gateway
 nexthops="
@@ -326,6 +345,7 @@ dummy6_mask="64"
 
 err_buf=
 tcpdump_pids=
+nettest_pids=
 
 err() {
        err_buf="${err_buf}${1}
@@ -548,6 +568,14 @@ setup_vti6() {
        setup_vti 6 ${veth6_a_addr} ${veth6_b_addr} ${tunnel6_a_addr} ${tunnel6_b_addr} ${tunnel6_mask}
 }
 
+setup_vti4routed() {
+       setup_vti 4 ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 ${tunnel4_a_addr} ${tunnel4_b_addr} ${tunnel4_mask}
+}
+
+setup_vti6routed() {
+       setup_vti 6 ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 ${tunnel6_a_addr} ${tunnel6_b_addr} ${tunnel6_mask}
+}
+
 setup_vxlan_or_geneve() {
        type="${1}"
        a_addr="${2}"
@@ -619,18 +647,36 @@ setup_xfrm() {
        proto=${1}
        veth_a_addr="${2}"
        veth_b_addr="${3}"
+       encap=${4}
 
-       run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel || return 1
-       run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
+       run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel ${encap} || return 1
+       run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel ${encap}
        run_cmd ${ns_a} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel
        run_cmd ${ns_a} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel
 
-       run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
-       run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
+       run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel ${encap}
+       run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel ${encap}
        run_cmd ${ns_b} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel
        run_cmd ${ns_b} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel
 }
 
+setup_nettest_xfrm() {
+       which nettest >/dev/null
+       if [ $? -ne 0 ]; then
+               echo "'nettest' command not found; skipping tests"
+               return 1
+       fi
+
+       [ ${1} -eq 6 ] && proto="-6" || proto=""
+       port=${2}
+
+       run_cmd ${ns_a} nettest ${proto} -q -D -s -x -p ${port} -t 5 &
+       nettest_pids="${nettest_pids} $!"
+
+       run_cmd ${ns_b} nettest ${proto} -q -D -s -x -p ${port} -t 5 &
+       nettest_pids="${nettest_pids} $!"
+}
+
 setup_xfrm4() {
        setup_xfrm 4 ${veth4_a_addr} ${veth4_b_addr}
 }
@@ -639,6 +685,26 @@ setup_xfrm6() {
        setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr}
 }
 
+setup_xfrm4udp() {
+       setup_xfrm 4 ${veth4_a_addr} ${veth4_b_addr} "encap espinudp 4500 4500 0.0.0.0"
+       setup_nettest_xfrm 4 4500
+}
+
+setup_xfrm6udp() {
+       setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr} "encap espinudp 4500 4500 0.0.0.0"
+       setup_nettest_xfrm 6 4500
+}
+
+setup_xfrm4udprouted() {
+       setup_xfrm 4 ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 "encap espinudp 4500 4500 0.0.0.0"
+       setup_nettest_xfrm 4 4500
+}
+
+setup_xfrm6udprouted() {
+       setup_xfrm 6 ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 "encap espinudp 4500 4500 0.0.0.0"
+       setup_nettest_xfrm 6 4500
+}
+
 setup_routing_old() {
        for i in ${routes}; do
                [ "${ns}" = "" ]        && ns="${i}"            && continue
@@ -823,6 +889,11 @@ cleanup() {
        done
        tcpdump_pids=
 
+       for pid in ${nettest_pids}; do
+               kill ${pid}
+       done
+       nettest_pids=
+
        for n in ${NS_A} ${NS_B} ${NS_C} ${NS_R1} ${NS_R2}; do
                ip netns del ${n} 2> /dev/null
        done
@@ -1432,6 +1503,135 @@ test_pmtu_vti6_exception() {
        return ${fail}
 }
 
+test_pmtu_vti4_udp_exception() {
+       setup namespaces veth vti4 xfrm4udp || return $ksft_skip
+       trace "${ns_a}" veth_a    "${ns_b}" veth_b \
+             "${ns_a}" vti4_a    "${ns_b}" vti4_b
+
+       veth_mtu=1500
+       vti_mtu=$((veth_mtu - 20))
+
+       #                                UDP   SPI   SN   IV  ICV   pad length   next header
+       esp_payload_rfc4106=$((vti_mtu - 8   - 4   - 4  - 8 - 16  - 1          - 1))
+       ping_payload=$((esp_payload_rfc4106 - 28))
+
+       mtu "${ns_a}" veth_a ${veth_mtu}
+       mtu "${ns_b}" veth_b ${veth_mtu}
+       mtu "${ns_a}" vti4_a ${vti_mtu}
+       mtu "${ns_b}" vti4_b ${vti_mtu}
+
+       # Send DF packet without exceeding link layer MTU, check that no
+       # exception is created
+       run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel4_b_addr}
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
+       check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
+
+       # Now exceed link layer MTU by one byte, check that exception is created
+       # with the right PMTU value
+       run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((ping_payload + 1)) ${tunnel4_b_addr}
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
+       check_pmtu_value "${esp_payload_rfc4106}" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106 + 1)))"
+}
+
+test_pmtu_vti6_udp_exception() {
+       setup namespaces veth vti6 xfrm6udp || return $ksft_skip
+       trace "${ns_a}" veth_a    "${ns_b}" veth_b \
+             "${ns_a}" vti6_a    "${ns_b}" vti6_b
+       fail=0
+
+       # Create route exception by exceeding link layer MTU
+       mtu "${ns_a}" veth_a 4000
+       mtu "${ns_b}" veth_b 4000
+       mtu "${ns_a}" vti6_a 5000
+       mtu "${ns_b}" vti6_b 5000
+       run_cmd ${ns_a} ${ping6} -q -i 0.1 -w 1 -s 60000 ${tunnel6_b_addr}
+
+       # Check that exception was created
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
+       check_pmtu_value any "${pmtu}" "creating tunnel exceeding link layer MTU" || return 1
+
+       # Decrease tunnel MTU, check for PMTU decrease in route exception
+       mtu "${ns_a}" vti6_a 3000
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
+       check_pmtu_value "3000" "${pmtu}" "decreasing tunnel MTU" || fail=1
+
+       # Increase tunnel MTU, check for PMTU increase in route exception
+       mtu "${ns_a}" vti6_a 9000
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
+       check_pmtu_value "9000" "${pmtu}" "increasing tunnel MTU" || fail=1
+
+       return ${fail}
+}
+
+test_pmtu_vti4_udp_routed_exception() {
+       setup namespaces routing vti4routed xfrm4udprouted || return $ksft_skip
+       trace "${ns_a}" veth_A-R1    "${ns_b}" veth_B-R1 \
+             "${ns_a}" vti4_a       "${ns_b}" vti4_b
+
+       veth_mtu=1500
+       vti_mtu=$((veth_mtu - 20))
+
+       #                                UDP   SPI   SN   IV  ICV   pad length   next header
+       esp_payload_rfc4106=$((vti_mtu - 8   - 4   - 4  - 8 - 16  - 1          - 1))
+       ping_payload=$((esp_payload_rfc4106 - 28))
+
+        mtu "${ns_a}"  veth_A-R1 ${veth_mtu}
+        mtu "${ns_r1}" veth_R1-A ${veth_mtu}
+        mtu "${ns_b}"  veth_B-R1 ${veth_mtu}
+        mtu "${ns_r1}" veth_R1-B ${veth_mtu}
+
+       mtu "${ns_a}" vti4_a ${vti_mtu}
+       mtu "${ns_b}" vti4_b ${vti_mtu}
+
+       # Send DF packet without exceeding link layer MTU, check that no
+       # exception is created
+       run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel4_b_addr}
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
+       check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
+
+       # Now decrease link layer MTU by 8 bytes on R1, check that exception is created
+       # with the right PMTU value
+        mtu "${ns_r1}" veth_R1-B $((veth_mtu - 8))
+       run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((ping_payload)) ${tunnel4_b_addr}
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
+       check_pmtu_value "$((esp_payload_rfc4106 - 8))" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106)))"
+}
+
+test_pmtu_vti6_udp_routed_exception() {
+       setup namespaces routing vti6routed xfrm6udprouted || return $ksft_skip
+       trace "${ns_a}" veth_A-R1    "${ns_b}" veth_B-R1 \
+             "${ns_a}" vti6_a       "${ns_b}" vti6_b
+
+       veth_mtu=1500
+       vti_mtu=$((veth_mtu - 40))
+
+       #                                UDP   SPI   SN   IV  ICV   pad length   next header
+       esp_payload_rfc4106=$((vti_mtu - 8   - 4   - 4  - 8 - 16  - 1          - 1))
+       ping_payload=$((esp_payload_rfc4106 - 48))
+
+        mtu "${ns_a}"  veth_A-R1 ${veth_mtu}
+        mtu "${ns_r1}" veth_R1-A ${veth_mtu}
+        mtu "${ns_b}"  veth_B-R1 ${veth_mtu}
+        mtu "${ns_r1}" veth_R1-B ${veth_mtu}
+
+       # mtu "${ns_a}" vti6_a ${vti_mtu}
+       # mtu "${ns_b}" vti6_b ${vti_mtu}
+
+       run_cmd ${ns_a} ${ping6} -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel6_b_addr}
+
+       # Check that exception was not created
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
+       check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
+
+       # Now decrease link layer MTU by 8 bytes on R1, check that exception is created
+       # with the right PMTU value
+        mtu "${ns_r1}" veth_R1-B $((veth_mtu - 8))
+       run_cmd ${ns_a} ${ping6} -q -M want -i 0.1 -w 1 -s $((ping_payload)) ${tunnel6_b_addr}
+       pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
+       check_pmtu_value "$((esp_payload_rfc4106 - 8))" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106)))"
+
+}
+
 test_pmtu_vti4_default_mtu() {
        setup namespaces veth vti4 || return $ksft_skip
 
index 21091be..aee631c 100644 (file)
@@ -47,7 +47,7 @@ static void usage(const char *error)
 {
        if (error)
                printf("invalid option: %s\n", error);
-       printf("timestamping interface option*\n\n"
+       printf("timestamping <interface> [bind_phc_index] [option]*\n\n"
               "Options:\n"
               "  IP_MULTICAST_LOOP - looping outgoing multicasts\n"
               "  SO_TIMESTAMP - normal software time stamping, ms resolution\n"
@@ -58,6 +58,7 @@ static void usage(const char *error)
               "  SOF_TIMESTAMPING_RX_SOFTWARE - software fallback for incoming packets\n"
               "  SOF_TIMESTAMPING_SOFTWARE - request reporting of software time stamps\n"
               "  SOF_TIMESTAMPING_RAW_HARDWARE - request reporting of raw HW time stamps\n"
+              "  SOF_TIMESTAMPING_BIND_PHC - request to bind a PHC of PTP vclock\n"
               "  SIOCGSTAMP - check last socket time stamp\n"
               "  SIOCGSTAMPNS - more accurate socket time stamp\n"
               "  PTPV2 - use PTPv2 messages\n");
@@ -311,7 +312,6 @@ static void recvpacket(int sock, int recvmsg_flags,
 
 int main(int argc, char **argv)
 {
-       int so_timestamping_flags = 0;
        int so_timestamp = 0;
        int so_timestampns = 0;
        int siocgstamp = 0;
@@ -325,6 +325,8 @@ int main(int argc, char **argv)
        struct ifreq device;
        struct ifreq hwtstamp;
        struct hwtstamp_config hwconfig, hwconfig_requested;
+       struct so_timestamping so_timestamping_get = { 0, -1 };
+       struct so_timestamping so_timestamping = { 0, -1 };
        struct sockaddr_in addr;
        struct ip_mreq imr;
        struct in_addr iaddr;
@@ -342,7 +344,12 @@ int main(int argc, char **argv)
                exit(1);
        }
 
-       for (i = 2; i < argc; i++) {
+       if (argc >= 3 && sscanf(argv[2], "%d", &so_timestamping.bind_phc) == 1)
+               val = 3;
+       else
+               val = 2;
+
+       for (i = val; i < argc; i++) {
                if (!strcasecmp(argv[i], "SO_TIMESTAMP"))
                        so_timestamp = 1;
                else if (!strcasecmp(argv[i], "SO_TIMESTAMPNS"))
@@ -356,17 +363,19 @@ int main(int argc, char **argv)
                else if (!strcasecmp(argv[i], "PTPV2"))
                        ptpv2 = 1;
                else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_TX_HARDWARE"))
-                       so_timestamping_flags |= SOF_TIMESTAMPING_TX_HARDWARE;
+                       so_timestamping.flags |= SOF_TIMESTAMPING_TX_HARDWARE;
                else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_TX_SOFTWARE"))
-                       so_timestamping_flags |= SOF_TIMESTAMPING_TX_SOFTWARE;
+                       so_timestamping.flags |= SOF_TIMESTAMPING_TX_SOFTWARE;
                else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_RX_HARDWARE"))
-                       so_timestamping_flags |= SOF_TIMESTAMPING_RX_HARDWARE;
+                       so_timestamping.flags |= SOF_TIMESTAMPING_RX_HARDWARE;
                else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_RX_SOFTWARE"))
-                       so_timestamping_flags |= SOF_TIMESTAMPING_RX_SOFTWARE;
+                       so_timestamping.flags |= SOF_TIMESTAMPING_RX_SOFTWARE;
                else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_SOFTWARE"))
-                       so_timestamping_flags |= SOF_TIMESTAMPING_SOFTWARE;
+                       so_timestamping.flags |= SOF_TIMESTAMPING_SOFTWARE;
                else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_RAW_HARDWARE"))
-                       so_timestamping_flags |= SOF_TIMESTAMPING_RAW_HARDWARE;
+                       so_timestamping.flags |= SOF_TIMESTAMPING_RAW_HARDWARE;
+               else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_BIND_PHC"))
+                       so_timestamping.flags |= SOF_TIMESTAMPING_BIND_PHC;
                else
                        usage(argv[i]);
        }
@@ -385,10 +394,10 @@ int main(int argc, char **argv)
        hwtstamp.ifr_data = (void *)&hwconfig;
        memset(&hwconfig, 0, sizeof(hwconfig));
        hwconfig.tx_type =
-               (so_timestamping_flags & SOF_TIMESTAMPING_TX_HARDWARE) ?
+               (so_timestamping.flags & SOF_TIMESTAMPING_TX_HARDWARE) ?
                HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
        hwconfig.rx_filter =
-               (so_timestamping_flags & SOF_TIMESTAMPING_RX_HARDWARE) ?
+               (so_timestamping.flags & SOF_TIMESTAMPING_RX_HARDWARE) ?
                ptpv2 ? HWTSTAMP_FILTER_PTP_V2_L4_SYNC :
                HWTSTAMP_FILTER_PTP_V1_L4_SYNC : HWTSTAMP_FILTER_NONE;
        hwconfig_requested = hwconfig;
@@ -413,6 +422,9 @@ int main(int argc, char **argv)
                 sizeof(struct sockaddr_in)) < 0)
                bail("bind");
 
+       if (setsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE, interface, if_len))
+               bail("bind device");
+
        /* set multicast group for outgoing packets */
        inet_aton("224.0.1.130", &iaddr); /* alternate PTP domain 1 */
        addr.sin_addr = iaddr;
@@ -444,10 +456,9 @@ int main(int argc, char **argv)
                           &enabled, sizeof(enabled)) < 0)
                bail("setsockopt SO_TIMESTAMPNS");
 
-       if (so_timestamping_flags &&
-               setsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING,
-                          &so_timestamping_flags,
-                          sizeof(so_timestamping_flags)) < 0)
+       if (so_timestamping.flags &&
+           setsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &so_timestamping,
+                      sizeof(so_timestamping)) < 0)
                bail("setsockopt SO_TIMESTAMPING");
 
        /* request IP_PKTINFO for debugging purposes */
@@ -468,14 +479,18 @@ int main(int argc, char **argv)
        else
                printf("SO_TIMESTAMPNS %d\n", val);
 
-       if (getsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &val, &len) < 0) {
+       len = sizeof(so_timestamping_get);
+       if (getsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &so_timestamping_get,
+                      &len) < 0) {
                printf("%s: %s\n", "getsockopt SO_TIMESTAMPING",
                       strerror(errno));
        } else {
-               printf("SO_TIMESTAMPING %d\n", val);
-               if (val != so_timestamping_flags)
-                       printf("   not the expected value %d\n",
-                              so_timestamping_flags);
+               printf("SO_TIMESTAMPING flags %d, bind phc %d\n",
+                      so_timestamping_get.flags, so_timestamping_get.bind_phc);
+               if (so_timestamping_get.flags != so_timestamping.flags ||
+                   so_timestamping_get.bind_phc != so_timestamping.bind_phc)
+                       printf("   not expected, flags %d, bind phc %d\n",
+                              so_timestamping.flags, so_timestamping.bind_phc);
        }
 
        /* send packets forever every five seconds */
index cd6430b..8748199 100644 (file)
@@ -5,7 +5,7 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
        conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
        nft_concat_range.sh nft_conntrack_helper.sh \
        nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
-       ipip-conntrack-mtu.sh
+       ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh
 
 LDLIBS = -lmnl
 TEST_GEN_FILES =  nf-queue
diff --git a/tools/testing/selftests/netfilter/conntrack_tcp_unreplied.sh b/tools/testing/selftests/netfilter/conntrack_tcp_unreplied.sh
new file mode 100755 (executable)
index 0000000..e7d7bf1
--- /dev/null
@@ -0,0 +1,167 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Check that UNREPLIED tcp conntrack will eventually timeout.
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+waittime=20
+sfx=$(mktemp -u "XXXXXXXX")
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without nft tool"
+       exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+
+cleanup() {
+       ip netns pids $ns1 | xargs kill 2>/dev/null
+       ip netns pids $ns2 | xargs kill 2>/dev/null
+
+       ip netns del $ns1
+       ip netns del $ns2
+}
+
+ipv4() {
+    echo -n 192.168.$1.2
+}
+
+check_counter()
+{
+       ns=$1
+       name=$2
+       expect=$3
+       local lret=0
+
+       cnt=$(ip netns exec $ns2 nft list counter inet filter "$name" | grep -q "$expect")
+       if [ $? -ne 0 ]; then
+               echo "ERROR: counter $name in $ns2 has unexpected value (expected $expect)" 1>&2
+               ip netns exec $ns2 nft list counter inet filter "$name" 1>&2
+               lret=1
+       fi
+
+       return $lret
+}
+
+# Create test namespaces
+ip netns add $ns1 || exit 1
+
+trap cleanup EXIT
+
+ip netns add $ns2 || exit 1
+
+# Connect the namespace to the host using a veth pair
+ip -net $ns1 link add name veth1 type veth peer name veth2
+ip -net $ns1 link set netns $ns2 dev veth2
+
+ip -net $ns1 link set up dev lo
+ip -net $ns2 link set up dev lo
+ip -net $ns1 link set up dev veth1
+ip -net $ns2 link set up dev veth2
+
+ip -net $ns2 addr add 10.11.11.2/24 dev veth2
+ip -net $ns2 route add default via 10.11.11.1
+
+ip netns exec $ns2 sysctl -q net.ipv4.conf.veth2.forwarding=1
+
+# add a rule inside NS so we enable conntrack
+ip netns exec $ns1 iptables -A INPUT -m state --state established,related -j ACCEPT
+
+ip -net $ns1 addr add 10.11.11.1/24 dev veth1
+ip -net $ns1 route add 10.99.99.99 via 10.11.11.2
+
+# Check connectivity works
+ip netns exec $ns1 ping -q -c 2 10.11.11.2 >/dev/null || exit 1
+
+ip netns exec $ns2 nc -l -p 8080 < /dev/null &
+
+# however, conntrack entries are there
+
+ip netns exec $ns2 nft -f - <<EOF
+table inet filter {
+       counter connreq { }
+       counter redir { }
+       chain input {
+               type filter hook input priority 0; policy accept;
+               ct state new tcp flags syn ip daddr 10.99.99.99 tcp dport 80 counter name "connreq" accept
+               ct state new ct status dnat tcp dport 8080 counter name "redir" accept
+       }
+}
+EOF
+if [ $? -ne 0 ]; then
+       echo "ERROR: Could not load nft rules"
+       exit 1
+fi
+
+ip netns exec $ns2 sysctl -q net.netfilter.nf_conntrack_tcp_timeout_syn_sent=10
+
+echo "INFO: connect $ns1 -> $ns2 to the virtual ip"
+ip netns exec $ns1 bash -c 'while true ; do
+       nc -p 60000 10.99.99.99 80
+       sleep 1
+       done' &
+
+sleep 1
+
+ip netns exec $ns2 nft -f - <<EOF
+table inet nat {
+       chain prerouting {
+               type nat hook prerouting priority 0; policy accept;
+               ip daddr 10.99.99.99 tcp dport 80 redirect to :8080
+       }
+}
+EOF
+if [ $? -ne 0 ]; then
+       echo "ERROR: Could not load nat redirect"
+       exit 1
+fi
+
+count=$(ip netns exec $ns2 conntrack -L -p tcp --dport 80 2>/dev/null | wc -l)
+if [ $count -eq 0 ]; then
+       echo "ERROR: $ns2 did not pick up tcp connection from peer"
+       exit 1
+fi
+
+echo "INFO: NAT redirect added in ns $ns2, waiting for $waittime seconds for nat to take effect"
+for i in $(seq 1 $waittime); do
+       echo -n "."
+
+       sleep 1
+
+       count=$(ip netns exec $ns2 conntrack -L -p tcp --reply-port-src 8080 2>/dev/null | wc -l)
+       if [ $count -gt 0 ]; then
+               echo
+               echo "PASS: redirection took effect after $i seconds"
+               break
+       fi
+
+       m=$((i%20))
+       if [ $m -eq 0 ]; then
+               echo " waited for $i seconds"
+       fi
+done
+
+expect="packets 1 bytes 60"
+check_counter "$ns2" "redir" "$expect"
+if [ $? -ne 0 ]; then
+       ret=1
+fi
+
+if [ $ret -eq 0 ];then
+       echo "PASS: redirection counter has expected values"
+else
+       echo "ERROR: no tcp connection was redirected"
+fi
+
+exit $ret
index e363bda..2ea438e 100644 (file)
@@ -210,8 +210,10 @@ static void anon_release_pages(char *rel_area)
 
 static void anon_allocate_area(void **alloc_area)
 {
-       if (posix_memalign(alloc_area, page_size, nr_pages * page_size))
-               err("posix_memalign() failed");
+       *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
+                          MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+       if (*alloc_area == MAP_FAILED)
+               err("mmap of anonymous memory failed");
 }
 
 static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset)
index f08f5e8..0be80c2 100644 (file)
@@ -186,7 +186,6 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
                    coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
                        r = kvm_io_bus_unregister_dev(kvm,
                                zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
-                       kvm_iodevice_destructor(&dev->dev);
 
                        /*
                         * On failure, unregister destroys all devices on the
@@ -196,6 +195,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
                         */
                        if (r)
                                break;
+                       kvm_iodevice_destructor(&dev->dev);
                }
        }
 
index 7d95126..d20fba0 100644 (file)
@@ -935,7 +935,7 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
                stat_data->kvm = kvm;
                stat_data->desc = pdesc;
                stat_data->kind = KVM_STAT_VCPU;
-               kvm->debugfs_stat_data[i] = stat_data;
+               kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
                debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
                                    kvm->debugfs_dentry, stat_data,
                                    &stat_fops_per_vm);
@@ -3110,6 +3110,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
                                        ++vcpu->stat.generic.halt_poll_invalid;
                                goto out;
                        }
+                       cpu_relax();
                        poll_end = cur = ktime_get();
                } while (kvm_vcpu_can_poll(cur, stop));
        }
@@ -4390,6 +4391,16 @@ struct compat_kvm_dirty_log {
        };
 };
 
+struct compat_kvm_clear_dirty_log {
+       __u32 slot;
+       __u32 num_pages;
+       __u64 first_page;
+       union {
+               compat_uptr_t dirty_bitmap; /* one bit per page */
+               __u64 padding2;
+       };
+};
+
 static long kvm_vm_compat_ioctl(struct file *filp,
                           unsigned int ioctl, unsigned long arg)
 {
@@ -4399,6 +4410,24 @@ static long kvm_vm_compat_ioctl(struct file *filp,
        if (kvm->mm != current->mm)
                return -EIO;
        switch (ioctl) {
+#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+       case KVM_CLEAR_DIRTY_LOG: {
+               struct compat_kvm_clear_dirty_log compat_log;
+               struct kvm_clear_dirty_log log;
+
+               if (copy_from_user(&compat_log, (void __user *)arg,
+                                  sizeof(compat_log)))
+                       return -EFAULT;
+               log.slot         = compat_log.slot;
+               log.num_pages    = compat_log.num_pages;
+               log.first_page   = compat_log.first_page;
+               log.padding2     = compat_log.padding2;
+               log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
+
+               r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
+               break;
+       }
+#endif
        case KVM_GET_DIRTY_LOG: {
                struct compat_kvm_dirty_log compat_log;
                struct kvm_dirty_log log;