Merge tag 'm68k-for-v5.15-tag3' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 28 Sep 2021 20:24:43 +0000 (13:24 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 28 Sep 2021 20:24:43 +0000 (13:24 -0700)
Pull more m68k updates from Geert Uytterhoeven:

 - signal handling fixes

 - removal of set_fs()

[ The set_fs removal isn't strictly a fix, but it's been pending for a
  while and is very welcome. The signal handling fixes resolved an issue
  that was incorrectly attributed to the set_fs changes    - Linus ]

* tag 'm68k-for-v5.15-tag3' of git://git.kernel.org/pub/scm/linux/kernel/git/geert/linux-m68k:
  m68k: Remove set_fs()
  m68k: Provide __{get,put}_kernel_nofault
  m68k: Factor the 8-byte lowlevel {get,put}_user code into helpers
  m68k: Use BUILD_BUG for passing invalid sizes to get_user/put_user
  m68k: Remove the 030 case in virt_to_phys_slow
  m68k: Document that access_ok is broken for !CONFIG_CPU_HAS_ADDRESS_SPACES
  m68k: Leave stack mangling to asm wrapper of sigreturn()
  m68k: Update ->thread.esp0 before calling syscall_trace() in ret_from_signal
  m68k: Handle arrivals of multiple signals correctly

719 files changed:
Documentation/admin-guide/README.rst
Documentation/core-api/irq/irq-domain.rst
Documentation/devicetree/bindings/arm/tegra.yaml
Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml [new file with mode: 0644]
Documentation/networking/device_drivers/ethernet/intel/ice.rst
Documentation/networking/dsa/sja1105.rst
Documentation/process/changes.rst
Documentation/translations/zh_CN/admin-guide/README.rst
Documentation/translations/zh_TW/admin-guide/README.rst
MAINTAINERS
Makefile
arch/alpha/Kconfig
arch/alpha/include/asm/asm-prototypes.h
arch/alpha/include/asm/io.h
arch/alpha/include/asm/jensen.h
arch/alpha/include/asm/setup.h [new file with mode: 0644]
arch/alpha/include/uapi/asm/setup.h
arch/alpha/kernel/sys_jensen.c
arch/alpha/lib/Makefile
arch/alpha/lib/udiv-qrnnd.S [moved from arch/alpha/math-emu/qrnnd.S with 98% similarity]
arch/alpha/math-emu/Makefile
arch/alpha/math-emu/math.c
arch/arm/kernel/signal.c
arch/arm64/Kconfig
arch/arm64/boot/dts/qcom/ipq8074.dtsi
arch/arm64/include/asm/acpi.h
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/mte.h
arch/arm64/include/asm/string.h
arch/arm64/kernel/acpi.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/mte.c
arch/arm64/kernel/process.c
arch/arm64/kernel/signal.c
arch/arm64/kvm/hyp/nvhe/Makefile
arch/arm64/kvm/perf.c
arch/arm64/kvm/pmu-emul.c
arch/arm64/lib/strcmp.S
arch/arm64/lib/strncmp.S
arch/csky/kernel/signal.c
arch/mips/kernel/signal.c
arch/nios2/Kconfig.debug
arch/nios2/kernel/setup.c
arch/parisc/include/asm/page.h
arch/parisc/lib/iomap.c
arch/powerpc/boot/Makefile
arch/powerpc/include/asm/asm-const.h
arch/powerpc/kernel/interrupt.c
arch/powerpc/kernel/interrupt_64.S
arch/powerpc/kernel/mce.c
arch/powerpc/kernel/signal.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/sysdev/xics/xics-common.c
arch/riscv/Kconfig
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/include/asm/ccwgroup.h
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h
arch/s390/net/bpf_jit_comp.c
arch/s390/pci/pci_mmio.c
arch/sh/boot/Makefile
arch/sh/include/asm/pgtable-3level.h
arch/sparc/kernel/ioport.c
arch/sparc/kernel/mdesc.c
arch/sparc/lib/iomap.c
arch/x86/Kconfig
arch/x86/Makefile_32.cpu
arch/x86/hyperv/hv_apic.c
arch/x86/include/asm/kvm_page_track.h
arch/x86/include/asm/pkeys.h
arch/x86/include/asm/special_insns.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/xen/swiotlb-xen.h
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/setup.c
arch/x86/kernel/setup_percpu.c
arch/x86/kvm/emulate.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/hyperv.h
arch/x86/kvm/ioapic.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/page_track.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/evmcs.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/lib/insn.c
arch/x86/mm/fault.c
arch/x86/mm/init_64.c
arch/x86/mm/kasan_init_64.c
arch/x86/mm/numa.c
arch/x86/mm/numa_emulation.c
arch/x86/mm/pat/memtype.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/mmu_pv.c
arch/x86/xen/pci-swiotlb-xen.c
arch/x86/xen/smp_pv.c
block/bio.c
block/blk-cgroup.c
block/blk-integrity.c
block/blk-mq-tag.c
block/bsg.c
block/fops.c
drivers/acpi/osl.c
drivers/android/binder.c
drivers/android/binder_internal.h
drivers/base/arch_numa.c
drivers/base/power/trace.c
drivers/base/swnode.c
drivers/comedi/comedi_fops.c
drivers/cpufreq/cpufreq_governor_attr_set.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/vexpress-spc-cpufreq.c
drivers/edac/dmc520_edac.c
drivers/edac/synopsys_edac.c
drivers/fpga/dfl.c
drivers/fpga/machxo2-spi.c
drivers/gpio/gpio-aspeed-sgpio.c
drivers/gpio/gpio-rockchip.c
drivers/gpio/gpio-uniphier.c
drivers/gpio/gpiolib-acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h
drivers/gpu/drm/amd/pm/inc/smu_types.h
drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h
drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
drivers/gpu/drm/etnaviv/etnaviv_iommu.c
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.h
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/display/intel_bw.c
drivers/gpu/drm/i915/display/intel_dmc.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
drivers/gpu/drm/i915/gt/intel_rps.c
drivers/gpu/drm/i915/gt/uc/intel_uc.c
drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/rockchip/cdn-dp-core.c
drivers/gpu/drm/ttm/ttm_pool.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
drivers/hid/hid-apple.c
drivers/hid/hid-betopff.c
drivers/hid/hid-u2fzero.c
drivers/hid/wacom_wac.c
drivers/hv/ring_buffer.c
drivers/hwtracing/coresight/coresight-syscfg.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-armada-370-xp.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-mbigen.c
drivers/irqchip/irq-renesas-rza1.c
drivers/macintosh/smu.c
drivers/mcb/mcb-core.c
drivers/md/md.c
drivers/media/platform/s5p-jpeg/jpeg-core.c
drivers/media/platform/s5p-jpeg/jpeg-core.h
drivers/media/rc/ir_toy.c
drivers/misc/bcm-vk/bcm_vk_tty.c
drivers/misc/genwqe/card_base.c
drivers/misc/habanalabs/common/command_submission.c
drivers/misc/habanalabs/common/hw_queue.c
drivers/misc/habanalabs/gaudi/gaudi.c
drivers/misc/habanalabs/gaudi/gaudi_security.c
drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/net/dsa/b53/b53_mdio.c
drivers/net/dsa/b53/b53_mmap.c
drivers/net/dsa/b53/b53_priv.h
drivers/net/dsa/b53/b53_spi.c
drivers/net/dsa/b53/b53_srab.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/dsa_loop.c
drivers/net/dsa/hirschmann/hellcreek.c
drivers/net/dsa/lan9303-core.c
drivers/net/dsa/lan9303.h
drivers/net/dsa/lan9303_i2c.c
drivers/net/dsa/lan9303_mdio.c
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/microchip/ksz8795_spi.c
drivers/net/dsa/microchip/ksz8863_smi.c
drivers/net/dsa/microchip/ksz9477_i2c.c
drivers/net/dsa/microchip/ksz9477_spi.c
drivers/net/dsa/mt7530.c
drivers/net/dsa/mv88e6060.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/devlink.c
drivers/net/dsa/mv88e6xxx/devlink.h
drivers/net/dsa/ocelot/felix.c
drivers/net/dsa/ocelot/felix.h
drivers/net/dsa/ocelot/felix_vsc9959.c
drivers/net/dsa/ocelot/seville_vsc9953.c
drivers/net/dsa/qca/ar9331.c
drivers/net/dsa/qca8k.c
drivers/net/dsa/realtek-smi-core.c
drivers/net/dsa/sja1105/sja1105_clocking.c
drivers/net/dsa/sja1105/sja1105_devlink.c
drivers/net/dsa/sja1105/sja1105_flower.c
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/dsa/sja1105/sja1105_mdio.c
drivers/net/dsa/sja1105/sja1105_spi.c
drivers/net/dsa/sja1105/sja1105_static_config.c
drivers/net/dsa/sja1105/sja1105_static_config.h
drivers/net/dsa/sja1105/sja1105_vl.c
drivers/net/dsa/sja1105/sja1105_vl.h
drivers/net/dsa/vitesse-vsc73xx-core.c
drivers/net/dsa/vitesse-vsc73xx-platform.c
drivers/net/dsa/vitesse-vsc73xx-spi.c
drivers/net/dsa/vitesse-vsc73xx.h
drivers/net/dsa/xrs700x/xrs700x.c
drivers/net/dsa/xrs700x/xrs700x.h
drivers/net/dsa/xrs700x/xrs700x_i2c.c
drivers/net/dsa/xrs700x/xrs700x_mdio.c
drivers/net/ethernet/3com/3c515.c
drivers/net/ethernet/8390/ne.c
drivers/net/ethernet/amd/ni65.c
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/broadcom/bgmac-bcma.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/cadence/macb_pci.c
drivers/net/ethernet/freescale/enetc/enetc.c
drivers/net/ethernet/freescale/enetc/enetc_ierb.c
drivers/net/ethernet/freescale/enetc/enetc_ierb.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/i825xx/82596.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/Kconfig
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_idc.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
drivers/net/ethernet/microsoft/mana/hw_channel.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_devlink.c
drivers/net/ethernet/mscc/ocelot_mrp.c
drivers/net/ethernet/mscc/ocelot_net.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/rdc/r6040.c
drivers/net/ethernet/sfc/efx_channels.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/hamradio/6pack.c
drivers/net/hamradio/dmascc.c
drivers/net/ipa/ipa_table.c
drivers/net/pcs/pcs-xpcs-nxp.c
drivers/net/phy/dp83640_reg.h
drivers/net/phy/mdio_device.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/usb/hso.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wan/Makefile
drivers/net/xen-netback/netback.c
drivers/nfc/st-nci/spi.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/multipath.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/configfs.c
drivers/nvmem/Kconfig
drivers/of/device.c
drivers/of/property.c
drivers/pci/Kconfig
drivers/pci/pci-acpi.c
drivers/pci/quirks.c
drivers/pci/vpd.c
drivers/perf/arm_pmu.c
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/dell/Kconfig
drivers/platform/x86/gigabyte-wmi.c
drivers/platform/x86/intel/hid.c
drivers/platform/x86/intel/punit_ipc.c
drivers/platform/x86/lg-laptop.c
drivers/platform/x86/touchscreen_dmi.c
drivers/ptp/Kconfig
drivers/regulator/max14577-regulator.c
drivers/regulator/qcom-rpmh-regulator.c
drivers/rtc/rtc-cmos.c
drivers/s390/char/sclp_early.c
drivers/s390/cio/ccwgroup.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_queue.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/arm/Kconfig
drivers/scsi/arm/acornscsi.c
drivers/scsi/arm/fas216.c
drivers/scsi/arm/queue.c
drivers/scsi/elx/efct/efct_lio.c
drivers/scsi/elx/libefc/efc_device.c
drivers/scsi/elx/libefc/efc_fabric.c
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_ctl.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/ncr53c8xx.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/sd_zbc.c
drivers/scsi/ses.c
drivers/scsi/sr_ioctl.c
drivers/scsi/st.c
drivers/scsi/ufs/ufshcd-pci.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/scsi/ufs/ufshpb.c
drivers/spi/spi-rockchip.c
drivers/spi/spi-tegra20-slink.c
drivers/spi/spi.c
drivers/staging/greybus/uart.c
drivers/staging/media/hantro/hantro_drv.c
drivers/staging/media/sunxi/cedrus/cedrus_video.c
drivers/staging/r8188eu/os_dep/ioctl_linux.c
drivers/target/target_core_configfs.c
drivers/target/target_core_pr.c
drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
drivers/thermal/qcom/tsens.c
drivers/thermal/thermal_core.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/mvebu-uart.c
drivers/tty/synclink_gt.c
drivers/tty/tty_ldisc.c
drivers/usb/cdns3/cdns3-gadget.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-acm.h
drivers/usb/core/hcd.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc3/core.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/gadget/function/u_audio.c
drivers/usb/gadget/udc/r8a66597-udc.c
drivers/usb/host/bcma-hcd.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/xhci.c
drivers/usb/musb/tusb6010.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/option.c
drivers/usb/storage/unusual_devs.h
drivers/usb/storage/unusual_uas.h
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa_user/vduse_dev.c
drivers/vhost/net.c
drivers/vhost/vdpa.c
drivers/video/fbdev/Kconfig
drivers/virtio/virtio.c
drivers/watchdog/Kconfig
drivers/xen/Kconfig
drivers/xen/balloon.c
drivers/xen/gntdev.c
drivers/xen/swiotlb-xen.c
fs/afs/callback.c
fs/afs/cell.c
fs/afs/dir.c
fs/afs/dir_edit.c
fs/afs/file.c
fs/afs/fs_probe.c
fs/afs/fsclient.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/protocol_afs.h [new file with mode: 0644]
fs/afs/protocol_yfs.h
fs/afs/rotate.c
fs/afs/server.c
fs/afs/super.c
fs/afs/write.c
fs/btrfs/file-item.c
fs/btrfs/space-info.c
fs/btrfs/verity.c
fs/btrfs/volumes.c
fs/buffer.c
fs/ceph/caps.c
fs/cifs/cache.c
fs/cifs/cifs_debug.c
fs/cifs/cifs_fs_sb.h
fs/cifs/cifs_ioctl.h
fs/cifs/cifs_spnego.c
fs/cifs/cifs_spnego.h
fs/cifs/cifs_unicode.c
fs/cifs/cifsacl.c
fs/cifs/cifsacl.h
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/dns_resolve.c
fs/cifs/dns_resolve.h
fs/cifs/export.c
fs/cifs/file.c
fs/cifs/fscache.c
fs/cifs/fscache.h
fs/cifs/inode.c
fs/cifs/ioctl.c
fs/cifs/link.c
fs/cifs/misc.c
fs/cifs/netmisc.c
fs/cifs/ntlmssp.h
fs/cifs/readdir.c
fs/cifs/rfc1002pdu.h
fs/cifs/sess.c
fs/cifs/smb2file.c
fs/cifs/smb2glob.h
fs/cifs/smb2inode.c
fs/cifs/smb2misc.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smb2proto.h
fs/cifs/smb2status.h
fs/cifs/smb2transport.c
fs/cifs/smberr.h
fs/cifs/transport.c
fs/cifs/winucase.c
fs/cifs/xattr.c
fs/erofs/inode.c
fs/erofs/zmap.c
fs/ext2/balloc.c
fs/inode.c
fs/io-wq.c
fs/io_uring.c
fs/ksmbd/misc.c
fs/ksmbd/misc.h
fs/ksmbd/server.c
fs/ksmbd/smb2pdu.c
fs/ksmbd/smb_common.c
fs/ksmbd/smb_common.h
fs/ksmbd/transport_rdma.c
fs/ksmbd/vfs.c
fs/ksmbd/vfs.h
fs/lockd/svcxdr.h
fs/nfsd/nfs4state.c
fs/ocfs2/dlmglue.c
fs/qnx4/dir.c
fs/smbfs_common/smbfsctl.h
fs/vboxsf/super.c
fs/verity/enable.c
fs/verity/open.c
include/acpi/acpi_io.h
include/asm-generic/io.h
include/asm-generic/iomap.h
include/asm-generic/mshyperv.h
include/asm-generic/pci_iomap.h
include/asm-generic/vmlinux.lds.h
include/kvm/arm_pmu.h
include/linux/buffer_head.h
include/linux/cgroup-defs.h
include/linux/cgroup.h
include/linux/compiler-clang.h
include/linux/compiler-gcc.h
include/linux/compiler.h
include/linux/compiler_attributes.h
include/linux/dsa/ocelot.h
include/linux/irqdomain.h
include/linux/kvm_host.h
include/linux/mdio.h
include/linux/memblock.h
include/linux/migrate.h
include/linux/mmap_lock.h
include/linux/nvmem-consumer.h
include/linux/overflow.h
include/linux/packing.h
include/linux/perf/arm_pmu.h
include/linux/pkeys.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/tracehook.h
include/linux/uio.h
include/linux/usb/hcd.h
include/net/dsa.h
include/net/sock.h
include/scsi/scsi_device.h
include/trace/events/afs.h
include/trace/events/erofs.h
include/uapi/linux/android/binder.h
include/uapi/linux/cifs/cifs_mount.h
include/uapi/linux/io_uring.h
include/xen/xen-ops.h
init/do_mounts.c
init/main.c
ipc/sem.c
kernel/bpf/disasm.c
kernel/bpf/disasm.h
kernel/bpf/stackmap.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup.c
kernel/dma/debug.c
kernel/dma/mapping.c
kernel/entry/common.c
kernel/events/core.c
kernel/irq/irqdomain.c
kernel/locking/rwbase_rt.c
kernel/printk/printk.c
kernel/rseq.c
kernel/time/posix-cpu-timers.c
kernel/trace/blktrace.c
lib/Kconfig.debug
lib/Kconfig.kasan
lib/bootconfig.c
lib/iov_iter.c
lib/packing.c
lib/pci_iomap.c
lib/zlib_inflate/inffast.c
mm/damon/dbgfs-test.h
mm/debug.c
mm/ksm.c
mm/memblock.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/shmem.c
mm/swap.c
mm/util.c
mm/workingset.c
net/caif/chnl_net.c
net/core/dev.c
net/core/netclassid_cgroup.c
net/core/netprio_cgroup.c
net/core/sock.c
net/dccp/minisocks.c
net/dsa/dsa.c
net/dsa/dsa2.c
net/dsa/dsa_priv.h
net/dsa/slave.c
net/dsa/tag_ocelot.c
net/dsa/tag_ocelot_8021q.c
net/ipv4/nexthop.c
net/ipv4/tcp_input.c
net/ipv4/udp_tunnel_nic.c
net/ipv6/ip6_fib.c
net/l2tp/l2tp_core.c
net/mctp/route.c
net/mptcp/protocol.c
net/packet/af_packet.c
net/smc/smc_clc.c
net/smc/smc_core.c
net/tipc/socket.c
net/unix/af_unix.c
scripts/Makefile.clang
scripts/Makefile.kasan
scripts/Makefile.modpost
scripts/checkkconfigsymbols.py
scripts/clang-tools/gen_compile_commands.py
scripts/min-tool-version.sh
scripts/sorttable.c
security/selinux/hooks.c
security/smack/smack_lsm.c
tools/arch/x86/include/uapi/asm/unistd_32.h [moved from tools/arch/x86/include/asm/unistd_32.h with 100% similarity]
tools/arch/x86/include/uapi/asm/unistd_64.h [moved from tools/arch/x86/include/asm/unistd_64.h with 83% similarity]
tools/arch/x86/lib/insn.c
tools/bootconfig/include/linux/memblock.h
tools/include/linux/compiler-gcc.h
tools/include/linux/overflow.h
tools/lib/perf/evsel.c
tools/perf/Documentation/jitdump-specification.txt
tools/perf/Documentation/perf-c2c.txt
tools/perf/Documentation/perf-intel-pt.txt
tools/perf/Documentation/perf-lock.txt
tools/perf/Documentation/perf-script-perl.txt
tools/perf/Documentation/perf-script-python.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Documentation/topdown.txt
tools/perf/arch/arm/util/auxtrace.c
tools/perf/arch/arm/util/cs-etm.c
tools/perf/arch/arm/util/perf_regs.c
tools/perf/arch/arm/util/pmu.c
tools/perf/arch/arm/util/unwind-libdw.c
tools/perf/arch/arm/util/unwind-libunwind.c
tools/perf/arch/x86/util/iostat.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/pmu-events/arch/powerpc/power8/other.json
tools/perf/tests/code-reading.c
tools/perf/tests/dwarf-unwind.c
tools/perf/ui/browser.c
tools/perf/ui/browser.h
tools/perf/ui/browsers/annotate.c
tools/perf/util/bpf-event.c
tools/perf/util/config.c
tools/perf/util/machine.c
tools/testing/selftests/arm64/signal/test_signals_utils.c
tools/testing/selftests/bpf/cgroup_helpers.c
tools/testing/selftests/bpf/cgroup_helpers.h
tools/testing/selftests/bpf/network_helpers.c
tools/testing/selftests/bpf/network_helpers.h
tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/task_pt_regs.c
tools/testing/selftests/bpf/progs/connect4_dropper.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_task_pt_regs.c
tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/access_tracking_perf_test.c
tools/testing/selftests/kvm/demand_paging_test.c
tools/testing/selftests/kvm/dirty_log_perf_test.c
tools/testing/selftests/kvm/include/test_util.h
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/kvm_page_table_test.c
tools/testing/selftests/kvm/lib/test_util.c
tools/testing/selftests/kvm/rseq_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/steal_time.c
tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
tools/testing/selftests/lib.mk
tools/testing/selftests/nci/nci_dev.c
tools/testing/selftests/net/af_unix/Makefile
tools/testing/selftests/net/af_unix/test_unix_oob.c
tools/testing/selftests/net/altnames.sh
tools/testing/selftests/powerpc/tm/tm-syscall-asm.S
tools/testing/selftests/powerpc/tm/tm-syscall.c
tools/usb/testusb.c
tools/vm/page-types.c
virt/kvm/kvm_main.c

index 35314b6..caa3c09 100644 (file)
@@ -259,7 +259,7 @@ Configuring the kernel
 Compiling the kernel
 --------------------
 
- - Make sure you have at least gcc 4.9 available.
+ - Make sure you have at least gcc 5.1 available.
    For more information, refer to :ref:`Documentation/process/changes.rst <changes>`.
 
    Please note that you can still run a.out user programs with this kernel.
index 6979b4a..9c0e875 100644 (file)
@@ -175,9 +175,10 @@ for IRQ numbers that are passed to struct device registrations.  In that
 case the Linux IRQ numbers cannot be dynamically assigned and the legacy
 mapping should be used.
 
-As the name implies, the *_legacy() functions are deprecated and only
+As the name implies, the \*_legacy() functions are deprecated and only
 exist to ease the support of ancient platforms. No new users should be
-added.
+added. Same goes for the \*_simple() functions when their use results
+in the legacy behaviour.
 
 The legacy map assumes a contiguous range of IRQ numbers has already
 been allocated for the controller and that the IRQ number can be
index b962fa6..d79d36a 100644 (file)
@@ -54,7 +54,7 @@ properties:
           - const: toradex,apalis_t30
           - const: nvidia,tegra30
       - items:
-          - const: toradex,apalis_t30-eval-v1.1
+          - const: toradex,apalis_t30-v1.1-eval
           - const: toradex,apalis_t30-eval
           - const: toradex,apalis_t30-v1.1
           - const: toradex,apalis_t30
index fbb59c9..78044c3 100644 (file)
@@ -9,7 +9,7 @@ function block.
 
 All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node.
 For a description of the MMSYS_CONFIG binding, see
-Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt.
+Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml.
 
 DISP function blocks
 ====================
index 7f2578d..9eb4bb5 100644 (file)
@@ -19,7 +19,9 @@ properties:
       - const: allwinner,sun8i-v3s-emac
       - const: allwinner,sun50i-a64-emac
       - items:
-          - const: allwinner,sun50i-h6-emac
+          - enum:
+              - allwinner,sun20i-d1-emac
+              - allwinner,sun50i-h6-emac
           - const: allwinner,sun50i-a64-emac
 
   reg:
diff --git a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
new file mode 100644 (file)
index 0000000..b9ca8ef
--- /dev/null
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ufs/samsung,exynos-ufs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung SoC series UFS host controller Device Tree Bindings
+
+maintainers:
+  - Alim Akhtar <alim.akhtar@samsung.com>
+
+description: |
+  Each Samsung UFS host controller instance should have its own node.
+  This binding define Samsung specific binding other then what is used
+  in the common ufshcd bindings
+  [1] Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+
+properties:
+
+  compatible:
+    enum:
+      - samsung,exynos7-ufs
+
+  reg:
+    items:
+      - description: HCI register
+      - description: vendor specific register
+      - description: unipro register
+      - description: UFS protector register
+
+  reg-names:
+    items:
+      - const: hci
+      - const: vs_hci
+      - const: unipro
+      - const: ufsp
+
+  clocks:
+    items:
+      - description: ufs link core clock
+      - description: unipro main clock
+
+  clock-names:
+    items:
+      - const: core_clk
+      - const: sclk_unipro_main
+
+  interrupts:
+    maxItems: 1
+
+  phys:
+    maxItems: 1
+
+  phy-names:
+    const: ufs-phy
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - phys
+  - phy-names
+  - clocks
+  - clock-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/exynos7-clk.h>
+
+    ufs: ufs@15570000 {
+       compatible = "samsung,exynos7-ufs";
+       reg = <0x15570000 0x100>,
+             <0x15570100 0x100>,
+             <0x15571000 0x200>,
+             <0x15572000 0x300>;
+       reg-names = "hci", "vs_hci", "unipro", "ufsp";
+       interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
+       clocks = <&clock_fsys1 ACLK_UFS20_LINK>,
+                <&clock_fsys1 SCLK_UFSUNIPRO20_USER>;
+       clock-names = "core_clk", "sclk_unipro_main";
+       pinctrl-names = "default";
+       pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
+       phys = <&ufs_phy>;
+       phy-names = "ufs-phy";
+    };
+...
index e7d9cbf..67b7a70 100644 (file)
@@ -851,7 +851,7 @@ NOTES:
 - 0x88A8 traffic will not be received unless VLAN stripping is disabled with
   the following command::
 
-    # ethool -K <ethX> rxvlan off
+    # ethtool -K <ethX> rxvlan off
 
 - 0x88A8/0x8100 double VLANs cannot be used with 0x8100 or 0x8100/0x8100 VLANS
   configured on the same port. 0x88a8/0x8100 traffic will not be received if
index 564caee..29b1bae 100644 (file)
@@ -296,7 +296,7 @@ not available.
 Device Tree bindings and board design
 =====================================
 
-This section references ``Documentation/devicetree/bindings/net/dsa/sja1105.txt``
+This section references ``Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml``
 and aims to showcase some potential switch caveats.
 
 RMII PHY role and out-of-band signaling
index d3a8557..e35ab74 100644 (file)
@@ -29,7 +29,7 @@ you probably needn't concern yourself with pcmciautils.
 ====================== ===============  ========================================
         Program        Minimal version       Command to check the version
 ====================== ===============  ========================================
-GNU C                  4.9              gcc --version
+GNU C                  5.1              gcc --version
 Clang/LLVM (optional)  10.0.1           clang --version
 GNU make               3.81             make --version
 binutils               2.23             ld -v
index 669a022..980eb20 100644 (file)
@@ -223,7 +223,7 @@ Linux内核5.x版本 <http://kernel.org/>
 编译内核
 ---------
 
- - 确保您至少有gcc 4.9可用。
+ - 确保您至少有gcc 5.1可用。
    有关更多信息,请参阅 :ref:`Documentation/process/changes.rst <changes>` 。
 
    请注意,您仍然可以使用此内核运行a.out用户程序。
index b752e50..6ce97ed 100644 (file)
@@ -226,7 +226,7 @@ Linux內核5.x版本 <http://kernel.org/>
 編譯內核
 ---------
 
- - 確保您至少有gcc 4.9可用。
+ - 確保您至少有gcc 5.1可用。
    有關更多信息,請參閱 :ref:`Documentation/process/changes.rst <changes>` 。
 
    請注意,您仍然可以使用此內核運行a.out用戶程序。
index eeb4c70..5b33791 100644 (file)
@@ -977,12 +977,12 @@ L:        platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/x86/amd-pmc.*
 
-AMD POWERPLAY
+AMD POWERPLAY AND SWSMU
 M:     Evan Quan <evan.quan@amd.com>
 L:     amd-gfx@lists.freedesktop.org
 S:     Supported
 T:     git https://gitlab.freedesktop.org/agd5f/linux.git
-F:     drivers/gpu/drm/amd/pm/powerplay/
+F:     drivers/gpu/drm/amd/pm/
 
 AMD PTDMA DRIVER
 M:     Sanjay R Mehta <sanju.mehta@amd.com>
@@ -2804,9 +2804,8 @@ F:        arch/arm/mach-pxa/include/mach/vpac270.h
 F:     arch/arm/mach-pxa/vpac270.c
 
 ARM/VT8500 ARM ARCHITECTURE
-M:     Tony Prisk <linux@prisktech.co.nz>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     Maintained
+S:     Orphan
 F:     Documentation/devicetree/bindings/i2c/i2c-wmt.txt
 F:     arch/arm/mach-vt8500/
 F:     drivers/clocksource/timer-vt8500.c
@@ -13255,9 +13254,9 @@ F:      Documentation/scsi/NinjaSCSI.rst
 F:     drivers/scsi/nsp32*
 
 NIOS2 ARCHITECTURE
-M:     Ley Foon Tan <ley.foon.tan@intel.com>
+M:     Dinh Nguyen <dinguyen@kernel.org>
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
 F:     arch/nios2/
 
 NITRO ENCLAVES (NE)
@@ -14342,7 +14341,8 @@ F:      Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml
 F:     drivers/pci/controller/pci-ixp4xx.c
 
 PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
-M:     Jonathan Derrick <jonathan.derrick@intel.com>
+M:     Nirmal Patel <nirmal.patel@linux.intel.com>
+R:     Jonathan Derrick <jonathan.derrick@linux.dev>
 L:     linux-pci@vger.kernel.org
 S:     Supported
 F:     drivers/pci/controller/vmd.c
@@ -16650,13 +16650,6 @@ M:     Lubomir Rintel <lkundrak@v3.sk>
 S:     Supported
 F:     drivers/char/pcmcia/scr24x_cs.c
 
-SCSI CDROM DRIVER
-M:     Jens Axboe <axboe@kernel.dk>
-L:     linux-scsi@vger.kernel.org
-S:     Maintained
-W:     http://www.kernel.dk
-F:     drivers/scsi/sr*
-
 SCSI RDMA PROTOCOL (SRP) INITIATOR
 M:     Bart Van Assche <bvanassche@acm.org>
 L:     linux-rdma@vger.kernel.org
@@ -16955,7 +16948,6 @@ F:      drivers/misc/sgi-xp/
 
 SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
 M:     Karsten Graul <kgraul@linux.ibm.com>
-M:     Guvenc Gulce <guvenc@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -17968,10 +17960,11 @@ F:    Documentation/admin-guide/svga.rst
 F:     arch/x86/boot/video*
 
 SWIOTLB SUBSYSTEM
-M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M:     Christoph Hellwig <hch@infradead.org>
 L:     iommu@lists.linux-foundation.org
 S:     Supported
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
+W:     http://git.infradead.org/users/hch/dma-mapping.git
+T:     git git://git.infradead.org/users/hch/dma-mapping.git
 F:     arch/*/kernel/pci-swiotlb.c
 F:     include/linux/swiotlb.h
 F:     kernel/dma/swiotlb.c
@@ -19288,13 +19281,12 @@ S:    Maintained
 F:     drivers/usb/misc/chaoskey.c
 
 USB CYPRESS C67X00 DRIVER
-M:     Peter Korsgaard <jacmet@sunsite.dk>
 L:     linux-usb@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     drivers/usb/c67x00/
 
 USB DAVICOM DM9601 DRIVER
-M:     Peter Korsgaard <jacmet@sunsite.dk>
+M:     Peter Korsgaard <peter@korsgaard.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 W:     http://www.linux-usb.org/usbnet
@@ -20474,7 +20466,6 @@ F:      samples/bpf/xdpsock*
 F:     tools/lib/bpf/xsk*
 
 XEN BLOCK SUBSYSTEM
-M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 M:     Roger Pau Monné <roger.pau@citrix.com>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
 S:     Supported
@@ -20522,7 +20513,7 @@ S:      Supported
 F:     drivers/net/xen-netback/*
 
 XEN PCI SUBSYSTEM
-M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M:     Juergen Gross <jgross@suse.com>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
 S:     Supported
 F:     arch/x86/pci/*xen*
@@ -20545,7 +20536,8 @@ S:      Supported
 F:     sound/xen/*
 
 XEN SWIOTLB SUBSYSTEM
-M:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M:     Juergen Gross <jgross@suse.com>
+M:     Stefano Stabellini <sstabellini@kernel.org>
 L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
 L:     iommu@lists.linux-foundation.org
 S:     Supported
index 7cfe4ff..437ccc6 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc3
 NAME = Opossums on Parade
 
 # *DOCUMENTATION*
@@ -849,12 +849,6 @@ endif
 
 DEBUG_CFLAGS   :=
 
-# Workaround for GCC versions < 5.0
-# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61801
-ifdef CONFIG_CC_IS_GCC
-DEBUG_CFLAGS   += $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
-endif
-
 ifdef CONFIG_DEBUG_INFO
 
 ifdef CONFIG_DEBUG_INFO_SPLIT
index 02e5b67..4e87783 100644 (file)
@@ -20,7 +20,7 @@ config ALPHA
        select NEED_SG_DMA_LENGTH
        select VIRT_TO_BUS
        select GENERIC_IRQ_PROBE
-       select GENERIC_PCI_IOMAP if PCI
+       select GENERIC_PCI_IOMAP
        select AUTO_IRQ_AFFINITY if SMP
        select GENERIC_IRQ_SHOW
        select ARCH_WANT_IPC_PARSE_VERSION
@@ -199,7 +199,6 @@ config ALPHA_EIGER
 
 config ALPHA_JENSEN
        bool "Jensen"
-       depends on BROKEN
        select HAVE_EISA
        help
          DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
index b34cc1f..c8ae46f 100644 (file)
@@ -16,3 +16,4 @@ extern void __divlu(void);
 extern void __remlu(void);
 extern void __divqu(void);
 extern void __remqu(void);
+extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, unsigned long , unsigned long);
index 0fab5ac..c9cb554 100644 (file)
@@ -60,7 +60,7 @@ extern inline void set_hae(unsigned long new_hae)
  * Change virtual addresses to physical addresses and vv.
  */
 #ifdef USE_48_BIT_KSEG
-static inline unsigned long virt_to_phys(void *address)
+static inline unsigned long virt_to_phys(volatile void *address)
 {
        return (unsigned long)address - IDENT_ADDR;
 }
@@ -70,7 +70,7 @@ static inline void * phys_to_virt(unsigned long address)
        return (void *) (address + IDENT_ADDR);
 }
 #else
-static inline unsigned long virt_to_phys(void *address)
+static inline unsigned long virt_to_phys(volatile void *address)
 {
         unsigned long phys = (unsigned long)address;
 
@@ -106,7 +106,7 @@ static inline void * phys_to_virt(unsigned long address)
 extern unsigned long __direct_map_base;
 extern unsigned long __direct_map_size;
 
-static inline unsigned long __deprecated virt_to_bus(void *address)
+static inline unsigned long __deprecated virt_to_bus(volatile void *address)
 {
        unsigned long phys = virt_to_phys(address);
        unsigned long bus = phys + __direct_map_base;
index 9168951..1c41314 100644 (file)
@@ -111,18 +111,18 @@ __EXTERN_INLINE void jensen_set_hae(unsigned long addr)
  * convinced that I need one of the newer machines.
  */
 
-static inline unsigned int jensen_local_inb(unsigned long addr)
+__EXTERN_INLINE unsigned int jensen_local_inb(unsigned long addr)
 {
        return 0xff & *(vuip)((addr << 9) + EISA_VL82C106);
 }
 
-static inline void jensen_local_outb(u8 b, unsigned long addr)
+__EXTERN_INLINE void jensen_local_outb(u8 b, unsigned long addr)
 {
        *(vuip)((addr << 9) + EISA_VL82C106) = b;
        mb();
 }
 
-static inline unsigned int jensen_bus_inb(unsigned long addr)
+__EXTERN_INLINE unsigned int jensen_bus_inb(unsigned long addr)
 {
        long result;
 
@@ -131,7 +131,7 @@ static inline unsigned int jensen_bus_inb(unsigned long addr)
        return __kernel_extbl(result, addr & 3);
 }
 
-static inline void jensen_bus_outb(u8 b, unsigned long addr)
+__EXTERN_INLINE void jensen_bus_outb(u8 b, unsigned long addr)
 {
        jensen_set_hae(0);
        *(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
diff --git a/arch/alpha/include/asm/setup.h b/arch/alpha/include/asm/setup.h
new file mode 100644 (file)
index 0000000..262aab9
--- /dev/null
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ALPHA_SETUP_H
+#define __ALPHA_SETUP_H
+
+#include <uapi/asm/setup.h>
+
+/*
+ * We leave one page for the initial stack page, and one page for
+ * the initial process structure. Also, the console eats 3 MB for
+ * the initial bootloader (one of which we can reclaim later).
+ */
+#define BOOT_PCB       0x20000000
+#define BOOT_ADDR      0x20000000
+/* Remove when official MILO sources have ELF support: */
+#define BOOT_SIZE      (16*1024)
+
+#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
+#define KERNEL_START_PHYS      0x300000 /* Old bootloaders hardcoded this.  */
+#else
+#define KERNEL_START_PHYS      0x1000000 /* required: Wildfire/Titan/Marvel */
+#endif
+
+#define KERNEL_START   (PAGE_OFFSET+KERNEL_START_PHYS)
+#define SWAPPER_PGD    KERNEL_START
+#define INIT_STACK     (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
+#define EMPTY_PGT      (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
+#define EMPTY_PGE      (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
+#define ZERO_PGE       (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
+
+#define START_ADDR     (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
+
+/*
+ * This is setup by the secondary bootstrap loader.  Because
+ * the zero page is zeroed out as soon as the vm system is
+ * initialized, we need to copy things out into a more permanent
+ * place.
+ */
+#define PARAM                  ZERO_PGE
+#define COMMAND_LINE           ((char *)(absolute_pointer(PARAM + 0x0000)))
+#define INITRD_START           (*(unsigned long *) (PARAM+0x100))
+#define INITRD_SIZE            (*(unsigned long *) (PARAM+0x108))
+
+#endif
index 13b7ee4..f881ea5 100644 (file)
@@ -1,43 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __ALPHA_SETUP_H
-#define __ALPHA_SETUP_H
+#ifndef _UAPI__ALPHA_SETUP_H
+#define _UAPI__ALPHA_SETUP_H
 
 #define COMMAND_LINE_SIZE      256
 
-/*
- * We leave one page for the initial stack page, and one page for
- * the initial process structure. Also, the console eats 3 MB for
- * the initial bootloader (one of which we can reclaim later).
- */
-#define BOOT_PCB       0x20000000
-#define BOOT_ADDR      0x20000000
-/* Remove when official MILO sources have ELF support: */
-#define BOOT_SIZE      (16*1024)
-
-#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
-#define KERNEL_START_PHYS      0x300000 /* Old bootloaders hardcoded this.  */
-#else
-#define KERNEL_START_PHYS      0x1000000 /* required: Wildfire/Titan/Marvel */
-#endif
-
-#define KERNEL_START   (PAGE_OFFSET+KERNEL_START_PHYS)
-#define SWAPPER_PGD    KERNEL_START
-#define INIT_STACK     (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
-#define EMPTY_PGT      (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
-#define EMPTY_PGE      (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
-#define ZERO_PGE       (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
-
-#define START_ADDR     (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
-
-/*
- * This is setup by the secondary bootstrap loader.  Because
- * the zero page is zeroed out as soon as the vm system is
- * initialized, we need to copy things out into a more permanent
- * place.
- */
-#define PARAM                  ZERO_PGE
-#define COMMAND_LINE           ((char*)(PARAM + 0x0000))
-#define INITRD_START           (*(unsigned long *) (PARAM+0x100))
-#define INITRD_SIZE            (*(unsigned long *) (PARAM+0x108))
-
-#endif
+#endif /* _UAPI__ALPHA_SETUP_H */
index e5d870f..5c9c884 100644 (file)
@@ -7,6 +7,11 @@
  *
  * Code supporting the Jensen.
  */
+#define __EXTERN_INLINE
+#include <asm/io.h>
+#include <asm/jensen.h>
+#undef  __EXTERN_INLINE
+
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 
 #include <asm/ptrace.h>
 
-#define __EXTERN_INLINE inline
-#include <asm/io.h>
-#include <asm/jensen.h>
-#undef  __EXTERN_INLINE
-
 #include <asm/dma.h>
 #include <asm/irq.h>
 #include <asm/mmu_context.h>
index 854d5e7..1cc74f7 100644 (file)
@@ -14,6 +14,7 @@ ev6-$(CONFIG_ALPHA_EV6) := ev6-
 ev67-$(CONFIG_ALPHA_EV67) := ev67-
 
 lib-y =        __divqu.o __remqu.o __divlu.o __remlu.o \
+       udiv-qrnnd.o \
        udelay.o \
        $(ev6-y)memset.o \
        $(ev6-y)memcpy.o \
similarity index 98%
rename from arch/alpha/math-emu/qrnnd.S
rename to arch/alpha/lib/udiv-qrnnd.S
index d6373ec..b887aa5 100644 (file)
@@ -25,6 +25,7 @@
  # along with GCC; see the file COPYING.  If not, write to the 
  # Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  # MA 02111-1307, USA.
+#include <asm/export.h>
 
         .set noreorder
         .set noat
@@ -161,3 +162,4 @@ $Odd:
        ret     $31,($26),1
 
        .end    __udiv_qrnnd
+EXPORT_SYMBOL(__udiv_qrnnd)
index 6eda097..3206402 100644 (file)
@@ -7,4 +7,4 @@ ccflags-y := -w
 
 obj-$(CONFIG_MATHEMU) += math-emu.o
 
-math-emu-objs := math.o qrnnd.o
+math-emu-objs := math.o
index f7cef66..4212258 100644 (file)
@@ -403,5 +403,3 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask)
 egress:
        return si_code;
 }
-
-EXPORT_SYMBOL(__udiv_qrnnd);
index d0a800b..a41e27a 100644 (file)
@@ -628,7 +628,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
                                uprobe_notify_resume(regs);
                        } else {
                                tracehook_notify_resume(regs);
-                               rseq_handle_notify_resume(NULL, regs);
                        }
                }
                local_irq_disable();
index 077f2ec..5c7ae4c 100644 (file)
@@ -86,7 +86,7 @@ config ARM64
        select ARCH_SUPPORTS_LTO_CLANG_THIN
        select ARCH_SUPPORTS_CFI_CLANG
        select ARCH_SUPPORTS_ATOMIC_RMW
-       select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
+       select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
        select ARCH_SUPPORTS_NUMA_BALANCING
        select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
        select ARCH_WANT_DEFAULT_BPF_JIT
index a620ac0..db33300 100644 (file)
                                interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
                                phys = <&qusb_phy_0>, <&usb0_ssphy>;
                                phy-names = "usb2-phy", "usb3-phy";
-                               tx-fifo-resize;
                                snps,is-utmi-l1-suspend;
                                snps,hird-threshold = /bits/ 8 <0x0>;
                                snps,dis_u2_susphy_quirk;
                                interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
                                phys = <&qusb_phy_1>, <&usb1_ssphy>;
                                phy-names = "usb2-phy", "usb3-phy";
-                               tx-fifo-resize;
                                snps,is-utmi-l1-suspend;
                                snps,hird-threshold = /bits/ 8 <0x0>;
                                snps,dis_u2_susphy_quirk;
index 7535dc7..bd68e1b 100644 (file)
@@ -50,9 +50,6 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
 void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
 #define acpi_os_ioremap acpi_os_ioremap
 
-void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size);
-#define acpi_os_memmap acpi_os_memmap
-
 typedef u64 phys_cpuid_t;
 #define PHYS_CPUID_INVALID INVALID_HWID
 
index 89faca0..bfa5840 100644 (file)
@@ -525,6 +525,11 @@ alternative_endif
 #define EXPORT_SYMBOL_NOKASAN(name)    EXPORT_SYMBOL(name)
 #endif
 
+#ifdef CONFIG_KASAN_HW_TAGS
+#define EXPORT_SYMBOL_NOHWKASAN(name)
+#else
+#define EXPORT_SYMBOL_NOHWKASAN(name)  EXPORT_SYMBOL_NOKASAN(name)
+#endif
        /*
         * Emit a 64-bit absolute little endian symbol reference in a way that
         * ensures that it will be resolved at build time, even when building a
index 3f93b9e..0251165 100644 (file)
@@ -99,11 +99,17 @@ void mte_check_tfsr_el1(void);
 
 static inline void mte_check_tfsr_entry(void)
 {
+       if (!system_supports_mte())
+               return;
+
        mte_check_tfsr_el1();
 }
 
 static inline void mte_check_tfsr_exit(void)
 {
+       if (!system_supports_mte())
+               return;
+
        /*
         * The asynchronous faults are sync'ed automatically with
         * TFSR_EL1 on kernel entry but for exit an explicit dsb()
index 3a3264f..95f7686 100644 (file)
@@ -12,11 +12,13 @@ extern char *strrchr(const char *, int c);
 #define __HAVE_ARCH_STRCHR
 extern char *strchr(const char *, int c);
 
+#ifndef CONFIG_KASAN_HW_TAGS
 #define __HAVE_ARCH_STRCMP
 extern int strcmp(const char *, const char *);
 
 #define __HAVE_ARCH_STRNCMP
 extern int strncmp(const char *, const char *, __kernel_size_t);
+#endif
 
 #define __HAVE_ARCH_STRLEN
 extern __kernel_size_t strlen(const char *);
index 1c9c2f7..f385172 100644 (file)
@@ -273,8 +273,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
        return __pgprot(PROT_DEVICE_nGnRnE);
 }
 
-static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
-                                      acpi_size size, bool memory)
+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
 {
        efi_memory_desc_t *md, *region = NULL;
        pgprot_t prot;
@@ -300,11 +299,9 @@ static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
         * It is fine for AML to remap regions that are not represented in the
         * EFI memory map at all, as it only describes normal memory, and MMIO
         * regions that require a virtual mapping to make them accessible to
-        * the EFI runtime services. Determine the region default
-        * attributes by checking the requested memory semantics.
+        * the EFI runtime services.
         */
-       prot = memory ? __pgprot(PROT_NORMAL_NC) :
-                       __pgprot(PROT_DEVICE_nGnRnE);
+       prot = __pgprot(PROT_DEVICE_nGnRnE);
        if (region) {
                switch (region->type) {
                case EFI_LOADER_CODE:
@@ -364,16 +361,6 @@ static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
        return __ioremap(phys, size, prot);
 }
 
-void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
-{
-       return __acpi_os_ioremap(phys, size, false);
-}
-
-void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size)
-{
-       return __acpi_os_ioremap(phys, size, true);
-}
-
 /*
  * Claim Synchronous External Aborts as a firmware first notification.
  *
index f8a3067..6ec7036 100644 (file)
@@ -1526,9 +1526,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
        /*
         * For reasons that aren't entirely clear, enabling KPTI on Cavium
         * ThunderX leads to apparent I-cache corruption of kernel text, which
-        * ends as well as you might imagine. Don't even try.
+        * ends as well as you might imagine. Don't even try. We cannot rely
+        * on the cpus_have_*cap() helpers here to detect the CPU erratum
+        * because cpucap detection order may change. However, since we know
+        * affected CPUs are always in a homogeneous configuration, it is
+        * safe to rely on this_cpu_has_cap() here.
         */
-       if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
+       if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
                str = "ARM64_WORKAROUND_CAVIUM_27456";
                __kpti_forced = -1;
        }
index 5a294f2..ff49627 100644 (file)
@@ -513,7 +513,7 @@ size_t sve_state_size(struct task_struct const *task)
 void sve_alloc(struct task_struct *task)
 {
        if (task->thread.sve_state) {
-               memset(task->thread.sve_state, 0, sve_state_size(current));
+               memset(task->thread.sve_state, 0, sve_state_size(task));
                return;
        }
 
index 9d314a3..e5e801b 100644 (file)
@@ -142,12 +142,7 @@ void mte_enable_kernel_async(void)
 #ifdef CONFIG_KASAN_HW_TAGS
 void mte_check_tfsr_el1(void)
 {
-       u64 tfsr_el1;
-
-       if (!system_supports_mte())
-               return;
-
-       tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
+       u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
 
        if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
                /*
@@ -199,6 +194,9 @@ void mte_thread_init_user(void)
 
 void mte_thread_switch(struct task_struct *next)
 {
+       if (!system_supports_mte())
+               return;
+
        mte_update_sctlr_user(next);
 
        /*
index 19100fe..40adb8c 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/nospec.h>
-#include <linux/sched.h>
 #include <linux/stddef.h>
 #include <linux/sysctl.h>
 #include <linux/unistd.h>
@@ -58,7 +57,7 @@
 
 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
 #include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
+unsigned long __stack_chk_guard __ro_after_init;
 EXPORT_SYMBOL(__stack_chk_guard);
 #endif
 
index 9fe70b1..c287b94 100644 (file)
@@ -940,10 +940,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
                        if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
                                do_signal(regs);
 
-                       if (thread_flags & _TIF_NOTIFY_RESUME) {
+                       if (thread_flags & _TIF_NOTIFY_RESUME)
                                tracehook_notify_resume(regs);
-                               rseq_handle_notify_resume(NULL, regs);
-                       }
 
                        if (thread_flags & _TIF_FOREIGN_FPSTATE)
                                fpsimd_restore_current_state();
index 5df6193..8d741f7 100644 (file)
@@ -54,7 +54,7 @@ $(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix $(obj)/,$(hyp-obj)) FORCE
 #    runtime. Because the hypervisor is part of the kernel binary, relocations
 #    produce a kernel VA. We enumerate relocations targeting hyp at build time
 #    and convert the kernel VAs at those positions to hyp VAs.
-$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel
+$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel FORCE
        $(call if_changed,hyprel)
 
 # 5) Compile hyp-reloc.S and link it into the existing partially linked object.
index f9bb3b1..c84fe24 100644 (file)
@@ -50,9 +50,6 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
 
 int kvm_perf_init(void)
 {
-       if (kvm_pmu_probe_pmuver() != ID_AA64DFR0_PMUVER_IMP_DEF && !is_protected_kvm_enabled())
-               static_branch_enable(&kvm_arm_pmu_available);
-
        return perf_register_guest_info_callbacks(&kvm_guest_cbs);
 }
 
index f5065f2..2af3c37 100644 (file)
@@ -740,7 +740,14 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
        kvm_pmu_create_perf_event(vcpu, select_idx);
 }
 
-int kvm_pmu_probe_pmuver(void)
+void kvm_host_pmu_init(struct arm_pmu *pmu)
+{
+       if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
+           !kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled())
+               static_branch_enable(&kvm_arm_pmu_available);
+}
+
+static int kvm_pmu_probe_pmuver(void)
 {
        struct perf_event_attr attr = { };
        struct perf_event *event;
index d7bee21..83bcad7 100644 (file)
@@ -173,4 +173,4 @@ L(done):
        ret
 
 SYM_FUNC_END_PI(strcmp)
-EXPORT_SYMBOL_NOKASAN(strcmp)
+EXPORT_SYMBOL_NOHWKASAN(strcmp)
index 48d44f7..e42bcfc 100644 (file)
@@ -258,4 +258,4 @@ L(ret0):
        ret
 
 SYM_FUNC_END_PI(strncmp)
-EXPORT_SYMBOL_NOKASAN(strncmp)
+EXPORT_SYMBOL_NOHWKASAN(strncmp)
index 312f046..bc4238b 100644 (file)
@@ -260,8 +260,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
        if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
                do_signal(regs);
 
-       if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+       if (thread_info_flags & _TIF_NOTIFY_RESUME)
                tracehook_notify_resume(regs);
-               rseq_handle_notify_resume(NULL, regs);
-       }
 }
index f1e9851..c9b2a75 100644 (file)
@@ -906,10 +906,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
        if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
                do_signal(regs);
 
-       if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+       if (thread_info_flags & _TIF_NOTIFY_RESUME)
                tracehook_notify_resume(regs);
-               rseq_handle_notify_resume(NULL, regs);
-       }
 
        user_enter();
 }
index a8bc06e..ca1beb8 100644 (file)
@@ -3,9 +3,10 @@
 config EARLY_PRINTK
        bool "Activate early kernel debugging"
        default y
+       depends on TTY
        select SERIAL_CORE_CONSOLE
        help
-         Enable early printk on console
+         Enable early printk on console.
          This is useful for kernel debugging when your machine crashes very
          early before the console code is initialized.
          You should normally say N here, unless you want to debug such a crash.
index cf8d687..40bc8fb 100644 (file)
@@ -149,8 +149,6 @@ static void __init find_limits(unsigned long *min, unsigned long *max_low,
 
 void __init setup_arch(char **cmdline_p)
 {
-       int dram_start;
-
        console_verbose();
 
        memory_start = memblock_start_of_DRAM();
index d00313d..0561568 100644 (file)
@@ -184,7 +184,7 @@ extern int npmem_ranges;
 #include <asm-generic/getorder.h>
 #include <asm/pdc.h>
 
-#define PAGE0   ((struct zeropage *)__PAGE_OFFSET)
+#define PAGE0   ((struct zeropage *)absolute_pointer(__PAGE_OFFSET))
 
 /* DEFINITION OF THE ZERO-PAGE (PAG0) */
 /* based on work by Jason Eckhardt (jason@equator.com) */
index f03adb1..367f639 100644 (file)
@@ -513,12 +513,15 @@ void ioport_unmap(void __iomem *addr)
        }
 }
 
+#ifdef CONFIG_PCI
 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
 {
        if (!INDIRECT_ADDR(addr)) {
                iounmap(addr);
        }
 }
+EXPORT_SYMBOL(pci_iounmap);
+#endif
 
 EXPORT_SYMBOL(ioread8);
 EXPORT_SYMBOL(ioread16);
@@ -544,4 +547,3 @@ EXPORT_SYMBOL(iowrite16_rep);
 EXPORT_SYMBOL(iowrite32_rep);
 EXPORT_SYMBOL(ioport_map);
 EXPORT_SYMBOL(ioport_unmap);
-EXPORT_SYMBOL(pci_iounmap);
index 6900d0a..089ee3e 100644 (file)
@@ -35,7 +35,6 @@ endif
 BOOTCFLAGS    := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
                 -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
                 -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
-                -include $(srctree)/include/linux/compiler_attributes.h \
                 $(LINUXINCLUDE)
 
 ifdef CONFIG_PPC64_BOOT_WRAPPER
@@ -70,6 +69,7 @@ ifeq ($(call cc-option-yn, -fstack-protector),y)
 BOOTCFLAGS     += -fno-stack-protector
 endif
 
+BOOTCFLAGS     += -include $(srctree)/include/linux/compiler_attributes.h
 BOOTCFLAGS     += -I$(objtree)/$(obj) -I$(srctree)/$(obj)
 
 DTC_FLAGS      ?= -p 1024
index 0ce2368..dbfa5e1 100644 (file)
 #  define ASM_CONST(x)         __ASM_CONST(x)
 #endif
 
-/*
- * Inline assembly memory constraint
- *
- * GCC 4.9 doesn't properly handle pre update memory constraint "m<>"
- *
- */
-#if defined(GCC_VERSION) && GCC_VERSION < 50000
-#define UPD_CONSTR ""
-#else
 #define UPD_CONSTR "<>"
-#endif
 
 #endif /* _ASM_POWERPC_ASM_CONST_H */
index a73f3f7..de10a26 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/switch_to.h>
 #include <asm/syscall.h>
 #include <asm/time.h>
+#include <asm/tm.h>
 #include <asm/unistd.h>
 
 #if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
@@ -136,6 +137,48 @@ notrace long system_call_exception(long r3, long r4, long r5,
         */
        irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
 
+       /*
+        * If system call is called with TM active, set _TIF_RESTOREALL to
+        * prevent RFSCV being used to return to userspace, because POWER9
+        * TM implementation has problems with this instruction returning to
+        * transactional state. Final register values are not relevant because
+        * the transaction will be aborted upon return anyway. Or in the case
+        * of unsupported_scv SIGILL fault, the return state does not much
+        * matter because it's an edge case.
+        */
+       if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+                       unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
+               current_thread_info()->flags |= _TIF_RESTOREALL;
+
+       /*
+        * If the system call was made with a transaction active, doom it and
+        * return without performing the system call. Unless it was an
+        * unsupported scv vector, in which case it's treated like an illegal
+        * instruction.
+        */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
+           !trap_is_unsupported_scv(regs)) {
+               /* Enable TM in the kernel, and disable EE (for scv) */
+               hard_irq_disable();
+               mtmsr(mfmsr() | MSR_TM);
+
+               /* tabort, this dooms the transaction, nothing else */
+               asm volatile(".long 0x7c00071d | ((%0) << 16)"
+                               :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
+
+               /*
+                * Userspace will never see the return value. Execution will
+                * resume after the tbegin. of the aborted transaction with the
+                * checkpointed register state. A context switch could occur
+                * or signal delivered to the process before resuming the
+                * doomed transaction context, but that should all be handled
+                * as expected.
+                */
+               return -ENOSYS;
+       }
+#endif // CONFIG_PPC_TRANSACTIONAL_MEM
+
        local_irq_enable();
 
        if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
index d4212d2..ec950b0 100644 (file)
@@ -12,7 +12,6 @@
 #include <asm/mmu.h>
 #include <asm/ppc_asm.h>
 #include <asm/ptrace.h>
-#include <asm/tm.h>
 
        .section        ".toc","aw"
 SYS_CALL_TABLE:
@@ -55,12 +54,6 @@ COMPAT_SYS_CALL_TABLE:
        .globl system_call_vectored_\name
 system_call_vectored_\name:
 _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
-       extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
-       bne     tabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
        SCV_INTERRUPT_TO_KERNEL
        mr      r10,r1
        ld      r1,PACAKSAVE(r13)
@@ -247,12 +240,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_common_real)
        .globl system_call_common
 system_call_common:
 _ASM_NOKPROBE_SYMBOL(system_call_common)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
-       extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
-       bne     tabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
        mr      r10,r1
        ld      r1,PACAKSAVE(r13)
        std     r10,0(r1)
@@ -425,34 +412,6 @@ SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
 RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
 #endif
 
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-tabort_syscall:
-_ASM_NOKPROBE_SYMBOL(tabort_syscall)
-       /* Firstly we need to enable TM in the kernel */
-       mfmsr   r10
-       li      r9, 1
-       rldimi  r10, r9, MSR_TM_LG, 63-MSR_TM_LG
-       mtmsrd  r10, 0
-
-       /* tabort, this dooms the transaction, nothing else */
-       li      r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
-       TABORT(R9)
-
-       /*
-        * Return directly to userspace. We have corrupted user register state,
-        * but userspace will never see that register state. Execution will
-        * resume after the tbegin of the aborted transaction with the
-        * checkpointed register state.
-        */
-       li      r9, MSR_RI
-       andc    r10, r10, r9
-       mtmsrd  r10, 1
-       mtspr   SPRN_SRR0, r11
-       mtspr   SPRN_SRR1, r12
-       RFI_TO_USER
-       b       .       /* prevent speculative execution */
-#endif
-
        /*
         * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
         * touched, no exit work created, then this can be used.
index 47a683c..fd829f7 100644 (file)
@@ -249,6 +249,7 @@ void machine_check_queue_event(void)
 {
        int index;
        struct machine_check_event evt;
+       unsigned long msr;
 
        if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
                return;
@@ -262,8 +263,20 @@ void machine_check_queue_event(void)
        memcpy(&local_paca->mce_info->mce_event_queue[index],
               &evt, sizeof(evt));
 
-       /* Queue irq work to process this event later. */
-       irq_work_queue(&mce_event_process_work);
+       /*
+        * Queue irq work to process this event later. Before
+        * queuing the work enable translation for non radix LPAR,
+        * as irq_work_queue may try to access memory outside RMO
+        * region.
+        */
+       if (!radix_enabled() && firmware_has_feature(FW_FEATURE_LPAR)) {
+               msr = mfmsr();
+               mtmsr(msr | MSR_IR | MSR_DR);
+               irq_work_queue(&mce_event_process_work);
+               mtmsr(msr);
+       } else {
+               irq_work_queue(&mce_event_process_work);
+       }
 }
 
 void mce_common_process_ue(struct pt_regs *regs,
index e600764..b93b87d 100644 (file)
@@ -293,10 +293,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
                do_signal(current);
        }
 
-       if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+       if (thread_info_flags & _TIF_NOTIFY_RESUME)
                tracehook_notify_resume(regs);
-               rseq_handle_notify_resume(NULL, regs);
-       }
 }
 
 static unsigned long get_tm_stackpointer(struct task_struct *tsk)
index 7507939..9048442 100644 (file)
@@ -2536,7 +2536,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
        /* The following code handles the fake_suspend = 1 case */
        mflr    r0
        std     r0, PPC_LR_STKOFF(r1)
-       stdu    r1, -PPC_MIN_STKFRM(r1)
+       stdu    r1, -TM_FRAME_SIZE(r1)
 
        /* Turn on TM. */
        mfmsr   r8
@@ -2551,10 +2551,42 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
        nop
 
+       /*
+        * It's possible that treclaim. may modify registers, if we have lost
+        * track of fake-suspend state in the guest due to it using rfscv.
+        * Save and restore registers in case this occurs.
+        */
+       mfspr   r3, SPRN_DSCR
+       mfspr   r4, SPRN_XER
+       mfspr   r5, SPRN_AMR
+       /* SPRN_TAR would need to be saved here if the kernel ever used it */
+       mfcr    r12
+       SAVE_NVGPRS(r1)
+       SAVE_GPR(2, r1)
+       SAVE_GPR(3, r1)
+       SAVE_GPR(4, r1)
+       SAVE_GPR(5, r1)
+       stw     r12, 8(r1)
+       std     r1, HSTATE_HOST_R1(r13)
+
        /* We have to treclaim here because that's the only way to do S->N */
        li      r3, TM_CAUSE_KVM_RESCHED
        TRECLAIM(R3)
 
+       GET_PACA(r13)
+       ld      r1, HSTATE_HOST_R1(r13)
+       REST_GPR(2, r1)
+       REST_GPR(3, r1)
+       REST_GPR(4, r1)
+       REST_GPR(5, r1)
+       lwz     r12, 8(r1)
+       REST_NVGPRS(r1)
+       mtspr   SPRN_DSCR, r3
+       mtspr   SPRN_XER, r4
+       mtspr   SPRN_AMR, r5
+       mtcr    r12
+       HMT_MEDIUM
+
        /*
         * We were in fake suspend, so we are not going to save the
         * register state as the guest checkpointed state (since
@@ -2582,7 +2614,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
        std     r5, VCPU_TFHAR(r9)
        std     r6, VCPU_TFIAR(r9)
 
-       addi    r1, r1, PPC_MIN_STKFRM
+       addi    r1, r1, TM_FRAME_SIZE
        ld      r0, PPC_LR_STKOFF(r1)
        mtlr    r0
        blr
index 5c1a157..244a727 100644 (file)
@@ -348,9 +348,9 @@ static int xics_host_map(struct irq_domain *domain, unsigned int virq,
        if (xics_ics->check(xics_ics, hwirq))
                return -EINVAL;
 
-       /* No chip data for the XICS domain */
+       /* Let the ICS be the chip data for the XICS domain. For ICS native */
        irq_domain_set_info(domain, virq, hwirq, xics_ics->chip,
-                           NULL, handle_fasteoi_irq, NULL, NULL);
+                           xics_ics, handle_fasteoi_irq, NULL, NULL);
 
        return 0;
 }
index c3f3fd5..301a542 100644 (file)
@@ -236,7 +236,7 @@ config ARCH_RV32I
 config ARCH_RV64I
        bool "RV64I"
        select 64BIT
-       select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
+       select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
        select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
        select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
index 2bd90c5..b86de61 100644 (file)
@@ -685,16 +685,6 @@ config STACK_GUARD
          The minimum size for the stack guard should be 256 for 31 bit and
          512 for 64 bit.
 
-config WARN_DYNAMIC_STACK
-       def_bool n
-       prompt "Emit compiler warnings for function with dynamic stack usage"
-       help
-         This option enables the compiler option -mwarn-dynamicstack. If the
-         compiler supports this options generates warnings for functions
-         that dynamically allocate stack space using alloca.
-
-         Say N if you are unsure.
-
 endmenu
 
 menu "I/O subsystem"
index a3cf33a..450b351 100644 (file)
@@ -85,13 +85,6 @@ cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
 endif
 endif
 
-ifdef CONFIG_WARN_DYNAMIC_STACK
-  ifneq ($(call cc-option,-mwarn-dynamicstack),)
-    KBUILD_CFLAGS += -mwarn-dynamicstack
-    KBUILD_CFLAGS_DECOMPRESSOR += -mwarn-dynamicstack
-  endif
-endif
-
 ifdef CONFIG_EXPOLINE
   ifneq ($(call cc-option,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),)
     CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
index 37b6115..6aad18e 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_BPF_JIT=y
 CONFIG_BPF_JIT_ALWAYS_ON=y
 CONFIG_BPF_LSM=y
 CONFIG_PREEMPT=y
+CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -503,6 +504,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_LITEX is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
@@ -661,7 +663,6 @@ CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
 CONFIG_NFSD_V4_SECURITY_LABEL=y
 CONFIG_CIFS=m
-CONFIG_CIFS_WEAK_PW_HASH=y
 CONFIG_CIFS_UPCALL=y
 CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
@@ -720,6 +721,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
 CONFIG_CRYPTO_BLAKE2S=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
@@ -774,7 +777,6 @@ CONFIG_RANDOM32_SELFTEST=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_DMA_API_DEBUG=y
-CONFIG_STRING_SELFTEST=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
@@ -853,12 +855,12 @@ CONFIG_FAIL_FUNCTION=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LKDTM=m
 CONFIG_TEST_MIN_HEAP=y
-CONFIG_TEST_SORT=y
 CONFIG_KPROBES_SANITY_TEST=y
 CONFIG_RBTREE_TEST=y
 CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_STRING_SELFTEST=y
 CONFIG_TEST_BITOPS=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_LIVEPATCH=m
index 56a1cc8..f08b161 100644 (file)
@@ -8,6 +8,7 @@ CONFIG_BPF_SYSCALL=y
 CONFIG_BPF_JIT=y
 CONFIG_BPF_JIT_ALWAYS_ON=y
 CONFIG_BPF_LSM=y
+CONFIG_SCHED_CORE=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -494,6 +495,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_LITEX is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
@@ -648,7 +650,6 @@ CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
 CONFIG_NFSD_V4_SECURITY_LABEL=y
 CONFIG_CIFS=m
-CONFIG_CIFS_WEAK_PW_HASH=y
 CONFIG_CIFS_UPCALL=y
 CONFIG_CIFS_XATTR=y
 CONFIG_CIFS_POSIX=y
@@ -708,6 +709,8 @@ CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
 CONFIG_CRYPTO_BLAKE2S=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
index 36dbf50..aa995d9 100644 (file)
@@ -55,7 +55,7 @@ int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv,
                        int num_devices, const char *buf);
 
 extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
-extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
+int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv);
 
 extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
 extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
index 16256e1..1072245 100644 (file)
@@ -419,13 +419,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
 {
        kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
-       set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
+       set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
 }
 
 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
 {
        kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
-       clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
+       clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
 }
 
 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
index 752a0ff..6a6dd5e 100644 (file)
@@ -4066,7 +4066,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
                kvm_s390_patch_guest_per_regs(vcpu);
        }
 
-       clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask);
+       clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
 
        vcpu->arch.sie_block->icptcode = 0;
        cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
index ecd741e..52bc8fb 100644 (file)
@@ -79,7 +79,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
 
 static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
 {
-       return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
+       return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
 }
 
 static inline int kvm_is_ucontrol(struct kvm *kvm)
index 8841926..840d859 100644 (file)
@@ -248,8 +248,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
 
 #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask)            \
 ({                                                             \
-       /* Branch instruction needs 6 bytes */                  \
-       int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
+       int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2;      \
        _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
        REG_SET_SEEN(b1);                                       \
        REG_SET_SEEN(b2);                                       \
@@ -761,10 +760,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT4(0xb9080000, dst_reg, src_reg);
                break;
        case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
-               if (!imm)
-                       break;
-               /* alfi %dst,imm */
-               EMIT6_IMM(0xc20b0000, dst_reg, imm);
+               if (imm != 0) {
+                       /* alfi %dst,imm */
+                       EMIT6_IMM(0xc20b0000, dst_reg, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
@@ -786,17 +785,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT4(0xb9090000, dst_reg, src_reg);
                break;
        case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
-               if (!imm)
-                       break;
-               /* alfi %dst,-imm */
-               EMIT6_IMM(0xc20b0000, dst_reg, -imm);
+               if (imm != 0) {
+                       /* alfi %dst,-imm */
+                       EMIT6_IMM(0xc20b0000, dst_reg, -imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
                if (!imm)
                        break;
-               /* agfi %dst,-imm */
-               EMIT6_IMM(0xc2080000, dst_reg, -imm);
+               if (imm == -0x80000000) {
+                       /* algfi %dst,0x80000000 */
+                       EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
+               } else {
+                       /* agfi %dst,-imm */
+                       EMIT6_IMM(0xc2080000, dst_reg, -imm);
+               }
                break;
        /*
         * BPF_MUL
@@ -811,10 +815,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT4(0xb90c0000, dst_reg, src_reg);
                break;
        case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
-               if (imm == 1)
-                       break;
-               /* msfi %r5,imm */
-               EMIT6_IMM(0xc2010000, dst_reg, imm);
+               if (imm != 1) {
+                       /* msfi %r5,imm */
+                       EMIT6_IMM(0xc2010000, dst_reg, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
@@ -867,6 +871,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                        if (BPF_OP(insn->code) == BPF_MOD)
                                /* lhgi %dst,0 */
                                EMIT4_IMM(0xa7090000, dst_reg, 0);
+                       else
+                               EMIT_ZERO(dst_reg);
                        break;
                }
                /* lhi %w0,0 */
@@ -999,10 +1005,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT4(0xb9820000, dst_reg, src_reg);
                break;
        case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
-               if (!imm)
-                       break;
-               /* xilf %dst,imm */
-               EMIT6_IMM(0xc0070000, dst_reg, imm);
+               if (imm != 0) {
+                       /* xilf %dst,imm */
+                       EMIT6_IMM(0xc0070000, dst_reg, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
@@ -1033,10 +1039,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
                break;
        case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
-               if (imm == 0)
-                       break;
-               /* sll %dst,imm(%r0) */
-               EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
+               if (imm != 0) {
+                       /* sll %dst,imm(%r0) */
+                       EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
@@ -1058,10 +1064,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
                break;
        case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
-               if (imm == 0)
-                       break;
-               /* srl %dst,imm(%r0) */
-               EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
+               if (imm != 0) {
+                       /* srl %dst,imm(%r0) */
+                       EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
@@ -1083,10 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
                break;
        case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
-               if (imm == 0)
-                       break;
-               /* sra %dst,imm(%r0) */
-               EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
+               if (imm != 0) {
+                       /* sra %dst,imm(%r0) */
+                       EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
+               }
                EMIT_ZERO(dst_reg);
                break;
        case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
index ae683aa..c5b35ea 100644 (file)
@@ -159,7 +159,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
 
        mmap_read_lock(current->mm);
        ret = -EINVAL;
-       vma = find_vma(current->mm, mmio_addr);
+       vma = vma_lookup(current->mm, mmio_addr);
        if (!vma)
                goto out_unlock_mmap;
        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
@@ -298,7 +298,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
 
        mmap_read_lock(current->mm);
        ret = -EINVAL;
-       vma = find_vma(current->mm, mmio_addr);
+       vma = vma_lookup(current->mm, mmio_addr);
        if (!vma)
                goto out_unlock_mmap;
        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
index 58592df..c081e7e 100644 (file)
@@ -80,30 +80,30 @@ $(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
 $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
        $(call if_changed,lzo)
 
-$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2
+$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
        $(call if_changed,uimage,bzip2)
 
-$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz
+$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
        $(call if_changed,uimage,gzip)
 
-$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
        $(call if_changed,uimage,lzma)
 
-$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz
+$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE
        $(call if_changed,uimage,xz)
 
-$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
        $(call if_changed,uimage,lzo)
 
-$(obj)/uImage.bin: $(obj)/vmlinux.bin
+$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
        $(call if_changed,uimage,none)
 
 OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec
-$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux
+$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux FORCE
        $(call if_changed,objcopy)
 
 OBJCOPYFLAGS_uImage.srec := -I binary -O srec
-$(obj)/uImage.srec: $(obj)/uImage
+$(obj)/uImage.srec: $(obj)/uImage FORCE
        $(call if_changed,objcopy)
 
 $(obj)/uImage: $(obj)/uImage.$(suffix-y)
index 56bf35c..cdced80 100644 (file)
@@ -34,7 +34,7 @@ typedef struct { unsigned long long pmd; } pmd_t;
 
 static inline pmd_t *pud_pgtable(pud_t pud)
 {
-       return (pmd_t *)pud_val(pud);
+       return (pmd_t *)(unsigned long)pud_val(pud);
 }
 
 /* only used by the stubbed out hugetlb gup code, should never be called */
index 8e1d72a..7ceae24 100644 (file)
@@ -356,7 +356,9 @@ err_nomem:
 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
                dma_addr_t dma_addr, unsigned long attrs)
 {
-       if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size)))
+       size = PAGE_ALIGN(size);
+
+       if (!sparc_dma_free_resource(cpu_addr, size))
                return;
 
        dma_make_coherent(dma_addr, size);
index 8e645dd..30f171b 100644 (file)
@@ -39,6 +39,7 @@ struct mdesc_hdr {
        u32     node_sz; /* node block size */
        u32     name_sz; /* name block size */
        u32     data_sz; /* data block size */
+       char    data[];
 } __attribute__((aligned(16)));
 
 struct mdesc_elem {
@@ -612,7 +613,7 @@ EXPORT_SYMBOL(mdesc_get_node_info);
 
 static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
 {
-       return (struct mdesc_elem *) (mdesc + 1);
+       return (struct mdesc_elem *) mdesc->data;
 }
 
 static void *name_block(struct mdesc_hdr *mdesc)
index c9da9f1..f3a8cd4 100644 (file)
@@ -19,8 +19,10 @@ void ioport_unmap(void __iomem *addr)
 EXPORT_SYMBOL(ioport_map);
 EXPORT_SYMBOL(ioport_unmap);
 
+#ifdef CONFIG_PCI
 void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
 {
        /* nothing to do */
 }
 EXPORT_SYMBOL(pci_iounmap);
+#endif
index 4e001bb..ab83c22 100644 (file)
@@ -339,6 +339,11 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
 config ARCH_HIBERNATION_POSSIBLE
        def_bool y
 
+config ARCH_NR_GPIO
+       int
+       default 1024 if X86_64
+       default 512
+
 config ARCH_SUSPEND_POSSIBLE
        def_bool y
 
@@ -2605,7 +2610,6 @@ config PCI_OLPC
 config PCI_XEN
        def_bool y
        depends on PCI && XEN
-       select SWIOTLB_XEN
 
 config MMCONF_FAM10H
        def_bool y
index e7355f8..94834c4 100644 (file)
@@ -4,6 +4,12 @@
 
 tune           = $(call cc-option,-mtune=$(1),$(2))
 
+ifdef CONFIG_CC_IS_CLANG
+align          := -falign-functions=0 $(call cc-option,-falign-jumps=0) $(call cc-option,-falign-loops=0)
+else
+align          := -falign-functions=0 -falign-jumps=0 -falign-loops=0
+endif
+
 cflags-$(CONFIG_M486SX)                += -march=i486
 cflags-$(CONFIG_M486)          += -march=i486
 cflags-$(CONFIG_M586)          += -march=i586
@@ -19,11 +25,11 @@ cflags-$(CONFIG_MK6)                += -march=k6
 # They make zero difference whatsosever to performance at this time.
 cflags-$(CONFIG_MK7)           += -march=athlon
 cflags-$(CONFIG_MK8)           += $(call cc-option,-march=k8,-march=athlon)
-cflags-$(CONFIG_MCRUSOE)       += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
-cflags-$(CONFIG_MEFFICEON)     += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+cflags-$(CONFIG_MCRUSOE)       += -march=i686 $(align)
+cflags-$(CONFIG_MEFFICEON)     += -march=i686 $(call tune,pentium3) $(align)
 cflags-$(CONFIG_MWINCHIPC6)    += $(call cc-option,-march=winchip-c6,-march=i586)
 cflags-$(CONFIG_MWINCHIP3D)    += $(call cc-option,-march=winchip2,-march=i586)
-cflags-$(CONFIG_MCYRIXIII)     += $(call cc-option,-march=c3,-march=i486) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+cflags-$(CONFIG_MCYRIXIII)     += $(call cc-option,-march=c3,-march=i486) $(align)
 cflags-$(CONFIG_MVIAC3_2)      += $(call cc-option,-march=c3-2,-march=i686)
 cflags-$(CONFIG_MVIAC7)                += -march=i686
 cflags-$(CONFIG_MCORE2)                += -march=i686 $(call tune,core2)
index 90e682a..32a1ad3 100644 (file)
@@ -99,7 +99,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val)
 /*
  * IPI implementation on Hyper-V.
  */
-static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
+static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
+               bool exclude_self)
 {
        struct hv_send_ipi_ex **arg;
        struct hv_send_ipi_ex *ipi_arg;
@@ -123,7 +124,10 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
 
        if (!cpumask_equal(mask, cpu_present_mask)) {
                ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
-               nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
+               if (exclude_self)
+                       nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask);
+               else
+                       nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
        }
        if (nr_bank < 0)
                goto ipi_mask_ex_done;
@@ -138,15 +142,25 @@ ipi_mask_ex_done:
        return hv_result_success(status);
 }
 
-static bool __send_ipi_mask(const struct cpumask *mask, int vector)
+static bool __send_ipi_mask(const struct cpumask *mask, int vector,
+               bool exclude_self)
 {
-       int cur_cpu, vcpu;
+       int cur_cpu, vcpu, this_cpu = smp_processor_id();
        struct hv_send_ipi ipi_arg;
        u64 status;
+       unsigned int weight;
 
        trace_hyperv_send_ipi_mask(mask, vector);
 
-       if (cpumask_empty(mask))
+       weight = cpumask_weight(mask);
+
+       /*
+        * Do nothing if
+        *   1. the mask is empty
+        *   2. the mask only contains self when exclude_self is true
+        */
+       if (weight == 0 ||
+           (exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask)))
                return true;
 
        if (!hv_hypercall_pg)
@@ -172,6 +186,8 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
        ipi_arg.cpu_mask = 0;
 
        for_each_cpu(cur_cpu, mask) {
+               if (exclude_self && cur_cpu == this_cpu)
+                       continue;
                vcpu = hv_cpu_number_to_vp_number(cur_cpu);
                if (vcpu == VP_INVAL)
                        return false;
@@ -191,7 +207,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
        return hv_result_success(status);
 
 do_ex_hypercall:
-       return __send_ipi_mask_ex(mask, vector);
+       return __send_ipi_mask_ex(mask, vector, exclude_self);
 }
 
 static bool __send_ipi_one(int cpu, int vector)
@@ -208,7 +224,7 @@ static bool __send_ipi_one(int cpu, int vector)
                return false;
 
        if (vp >= 64)
-               return __send_ipi_mask_ex(cpumask_of(cpu), vector);
+               return __send_ipi_mask_ex(cpumask_of(cpu), vector, false);
 
        status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp));
        return hv_result_success(status);
@@ -222,20 +238,13 @@ static void hv_send_ipi(int cpu, int vector)
 
 static void hv_send_ipi_mask(const struct cpumask *mask, int vector)
 {
-       if (!__send_ipi_mask(mask, vector))
+       if (!__send_ipi_mask(mask, vector, false))
                orig_apic.send_IPI_mask(mask, vector);
 }
 
 static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
 {
-       unsigned int this_cpu = smp_processor_id();
-       struct cpumask new_mask;
-       const struct cpumask *local_mask;
-
-       cpumask_copy(&new_mask, mask);
-       cpumask_clear_cpu(this_cpu, &new_mask);
-       local_mask = &new_mask;
-       if (!__send_ipi_mask(local_mask, vector))
+       if (!__send_ipi_mask(mask, vector, true))
                orig_apic.send_IPI_mask_allbutself(mask, vector);
 }
 
@@ -246,7 +255,7 @@ static void hv_send_ipi_allbutself(int vector)
 
 static void hv_send_ipi_all(int vector)
 {
-       if (!__send_ipi_mask(cpu_online_mask, vector))
+       if (!__send_ipi_mask(cpu_online_mask, vector, false))
                orig_apic.send_IPI_all(vector);
 }
 
index 87bd602..6a5f3ac 100644 (file)
@@ -46,7 +46,7 @@ struct kvm_page_track_notifier_node {
                            struct kvm_page_track_notifier_node *node);
 };
 
-void kvm_page_track_init(struct kvm *kvm);
+int kvm_page_track_init(struct kvm *kvm);
 void kvm_page_track_cleanup(struct kvm *kvm);
 
 void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);
index 5c7bcaa..1d5f14a 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef _ASM_X86_PKEYS_H
 #define _ASM_X86_PKEYS_H
 
-#define ARCH_DEFAULT_PKEY      0
-
 /*
  * If more than 16 keys are ever supported, a thorough audit
  * will be necessary to ensure that the types that store key
index f3fbb84..68c257a 100644 (file)
@@ -275,7 +275,7 @@ static inline int enqcmds(void __iomem *dst, const void *src)
 {
        const struct { char _[64]; } *__src = src;
        struct { char _[64]; } __iomem *__dst = dst;
-       int zf;
+       bool zf;
 
        /*
         * ENQCMDS %(rdx), rax
index c9fa7be..5c95d24 100644 (file)
@@ -301,8 +301,8 @@ do {                                                                        \
        unsigned int __gu_low, __gu_high;                               \
        const unsigned int __user *__gu_ptr;                            \
        __gu_ptr = (const void __user *)(ptr);                          \
-       __get_user_asm(__gu_low, ptr, "l", "=r", label);                \
-       __get_user_asm(__gu_high, ptr+1, "l", "=r", label);             \
+       __get_user_asm(__gu_low, __gu_ptr, "l", "=r", label);           \
+       __get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label);        \
        (x) = ((unsigned long long)__gu_high << 32) | __gu_low;         \
 } while (0)
 #else
index 6b56d0d..66b4ddd 100644 (file)
@@ -3,14 +3,10 @@
 #define _ASM_X86_SWIOTLB_XEN_H
 
 #ifdef CONFIG_SWIOTLB_XEN
-extern int xen_swiotlb;
 extern int __init pci_xen_swiotlb_detect(void);
-extern void __init pci_xen_swiotlb_init(void);
 extern int pci_xen_swiotlb_init_late(void);
 #else
-#define xen_swiotlb (0)
-static inline int __init pci_xen_swiotlb_detect(void) { return 0; }
-static inline void __init pci_xen_swiotlb_init(void) { }
+#define pci_xen_swiotlb_detect NULL
 static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
 #endif
 
index 8cb7816..193204a 100644 (file)
@@ -1253,6 +1253,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
 
 static void kill_me_now(struct callback_head *ch)
 {
+       struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
+
+       p->mce_count = 0;
        force_sig(SIGBUS);
 }
 
@@ -1262,6 +1265,7 @@ static void kill_me_maybe(struct callback_head *cb)
        int flags = MF_ACTION_REQUIRED;
        int ret;
 
+       p->mce_count = 0;
        pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
 
        if (!p->mce_ripv)
@@ -1290,17 +1294,34 @@ static void kill_me_maybe(struct callback_head *cb)
        }
 }
 
-static void queue_task_work(struct mce *m, int kill_current_task)
+static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
 {
-       current->mce_addr = m->addr;
-       current->mce_kflags = m->kflags;
-       current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
-       current->mce_whole_page = whole_page(m);
+       int count = ++current->mce_count;
 
-       if (kill_current_task)
-               current->mce_kill_me.func = kill_me_now;
-       else
-               current->mce_kill_me.func = kill_me_maybe;
+       /* First call, save all the details */
+       if (count == 1) {
+               current->mce_addr = m->addr;
+               current->mce_kflags = m->kflags;
+               current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
+               current->mce_whole_page = whole_page(m);
+
+               if (kill_current_task)
+                       current->mce_kill_me.func = kill_me_now;
+               else
+                       current->mce_kill_me.func = kill_me_maybe;
+       }
+
+       /* Ten is likely overkill. Don't expect more than two faults before task_work() */
+       if (count > 10)
+               mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
+
+       /* Second or later call, make sure page address matches the one from first call */
+       if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
+               mce_panic("Consecutive machine checks to different user pages", m, msg);
+
+       /* Do not call task_work_add() more than once */
+       if (count > 1)
+               return;
 
        task_work_add(current, &current->mce_kill_me, TWA_RESUME);
 }
@@ -1438,7 +1459,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
                /* If this triggers there is no way to recover. Die hard. */
                BUG_ON(!on_thread_stack() || !user_mode(regs));
 
-               queue_task_work(&m, kill_current_task);
+               queue_task_work(&m, msg, kill_current_task);
 
        } else {
                /*
@@ -1456,7 +1477,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
                }
 
                if (m.kflags & MCE_IN_KERNEL_COPYIN)
-                       queue_task_work(&m, kill_current_task);
+                       queue_task_work(&m, msg, kill_current_task);
        }
 out:
        mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
index 79f1641..40ed44e 100644 (file)
@@ -830,6 +830,20 @@ void __init setup_arch(char **cmdline_p)
 
        x86_init.oem.arch_setup();
 
+       /*
+        * Do some memory reservations *before* memory is added to memblock, so
+        * memblock allocations won't overwrite it.
+        *
+        * After this point, everything still needed from the boot loader or
+        * firmware or kernel text should be early reserved or marked not RAM in
+        * e820. All other memory is free game.
+        *
+        * This call needs to happen before e820__memory_setup() which calls the
+        * xen_memory_setup() on Xen dom0 which relies on the fact that those
+        * early reservations have happened already.
+        */
+       early_reserve_memory();
+
        iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
        e820__memory_setup();
        parse_setup_data();
@@ -876,18 +890,6 @@ void __init setup_arch(char **cmdline_p)
 
        parse_early_param();
 
-       /*
-        * Do some memory reservations *before* memory is added to
-        * memblock, so memblock allocations won't overwrite it.
-        * Do it after early param, so we could get (unlikely) panic from
-        * serial.
-        *
-        * After this point everything still needed from the boot loader or
-        * firmware or kernel text should be early reserved or marked not
-        * RAM in e820. All other memory is free game.
-        */
-       early_reserve_memory();
-
 #ifdef CONFIG_MEMORY_HOTPLUG
        /*
         * Memory used by the kernel cannot be hot-removed because Linux
index 78a32b9..5afd985 100644 (file)
@@ -135,7 +135,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
 
 static void __init pcpu_fc_free(void *ptr, size_t size)
 {
-       memblock_free(__pa(ptr), size);
+       memblock_free_ptr(ptr, size);
 }
 
 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
index 2837110..c589ac8 100644 (file)
@@ -4206,7 +4206,7 @@ static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
        u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
 
        if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
-               return emulate_ud(ctxt);
+               return emulate_gp(ctxt, 0);
 
        return X86EMUL_CONTINUE;
 }
index 232a86a..d5124b5 100644 (file)
@@ -939,7 +939,7 @@ static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
        for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
                stimer_init(&hv_vcpu->stimer[i], i);
 
-       hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
+       hv_vcpu->vp_index = vcpu->vcpu_idx;
 
        return 0;
 }
@@ -1444,7 +1444,6 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
        switch (msr) {
        case HV_X64_MSR_VP_INDEX: {
                struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
-               int vcpu_idx = kvm_vcpu_get_idx(vcpu);
                u32 new_vp_index = (u32)data;
 
                if (!host || new_vp_index >= KVM_MAX_VCPUS)
@@ -1459,9 +1458,9 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
                 * VP index is changing, adjust num_mismatched_vp_indexes if
                 * it now matches or no longer matches vcpu_idx.
                 */
-               if (hv_vcpu->vp_index == vcpu_idx)
+               if (hv_vcpu->vp_index == vcpu->vcpu_idx)
                        atomic_inc(&hv->num_mismatched_vp_indexes);
-               else if (new_vp_index == vcpu_idx)
+               else if (new_vp_index == vcpu->vcpu_idx)
                        atomic_dec(&hv->num_mismatched_vp_indexes);
 
                hv_vcpu->vp_index = new_vp_index;
index 730da85..ed1c4e5 100644 (file)
@@ -83,7 +83,7 @@ static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
 {
        struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
 
-       return hv_vcpu ? hv_vcpu->vp_index : kvm_vcpu_get_idx(vcpu);
+       return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx;
 }
 
 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
index ff005fe..8c065da 100644 (file)
@@ -319,8 +319,8 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
        unsigned index;
        bool mask_before, mask_after;
        union kvm_ioapic_redirect_entry *e;
-       unsigned long vcpu_bitmap;
        int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
+       DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
 
        switch (ioapic->ioregsel) {
        case IOAPIC_REG_VERSION:
@@ -384,9 +384,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
                        irq.shorthand = APIC_DEST_NOSHORT;
                        irq.dest_id = e->fields.dest_id;
                        irq.msi_redir_hint = false;
-                       bitmap_zero(&vcpu_bitmap, 16);
+                       bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
                        kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
-                                                &vcpu_bitmap);
+                                                vcpu_bitmap);
                        if (old_dest_mode != e->fields.dest_mode ||
                            old_dest_id != e->fields.dest_id) {
                                /*
@@ -399,10 +399,10 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
                                    kvm_lapic_irq_dest_mode(
                                        !!e->fields.dest_mode);
                                kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
-                                                        &vcpu_bitmap);
+                                                        vcpu_bitmap);
                        }
                        kvm_make_scan_ioapic_request_mask(ioapic->kvm,
-                                                         &vcpu_bitmap);
+                                                         vcpu_bitmap);
                } else {
                        kvm_make_scan_ioapic_request(ioapic->kvm);
                }
index 2d7e611..1a64ba5 100644 (file)
@@ -2027,8 +2027,8 @@ static void mmu_pages_clear_parents(struct mmu_page_path *parents)
        } while (!sp->unsync_children);
 }
 
-static void mmu_sync_children(struct kvm_vcpu *vcpu,
-                             struct kvm_mmu_page *parent)
+static int mmu_sync_children(struct kvm_vcpu *vcpu,
+                            struct kvm_mmu_page *parent, bool can_yield)
 {
        int i;
        struct kvm_mmu_page *sp;
@@ -2055,12 +2055,18 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
                }
                if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
                        kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
+                       if (!can_yield) {
+                               kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+                               return -EINTR;
+                       }
+
                        cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
                        flush = false;
                }
        }
 
        kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
+       return 0;
 }
 
 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
@@ -2146,9 +2152,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                        kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
                }
 
-               if (sp->unsync_children)
-                       kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
-
                __clear_sp_write_flooding_count(sp);
 
 trace_get_page:
@@ -3684,7 +3687,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
                write_lock(&vcpu->kvm->mmu_lock);
                kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
 
-               mmu_sync_children(vcpu, sp);
+               mmu_sync_children(vcpu, sp, true);
 
                kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
                write_unlock(&vcpu->kvm->mmu_lock);
@@ -3700,7 +3703,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
                if (IS_VALID_PAE_ROOT(root)) {
                        root &= PT64_BASE_ADDR_MASK;
                        sp = to_shadow_page(root);
-                       mmu_sync_children(vcpu, sp);
+                       mmu_sync_children(vcpu, sp, true);
                }
        }
 
index 269f11f..21427e8 100644 (file)
@@ -164,13 +164,13 @@ void kvm_page_track_cleanup(struct kvm *kvm)
        cleanup_srcu_struct(&head->track_srcu);
 }
 
-void kvm_page_track_init(struct kvm *kvm)
+int kvm_page_track_init(struct kvm *kvm)
 {
        struct kvm_page_track_notifier_head *head;
 
        head = &kvm->arch.track_notifier_head;
-       init_srcu_struct(&head->track_srcu);
        INIT_HLIST_HEAD(&head->track_notifier_list);
+       return init_srcu_struct(&head->track_srcu);
 }
 
 /*
index 7d03e9b..913d52a 100644 (file)
@@ -707,8 +707,27 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
                if (!is_shadow_present_pte(*it.sptep)) {
                        table_gfn = gw->table_gfn[it.level - 2];
                        access = gw->pt_access[it.level - 2];
-                       sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
-                                             false, access);
+                       sp = kvm_mmu_get_page(vcpu, table_gfn, addr,
+                                             it.level-1, false, access);
+                       /*
+                        * We must synchronize the pagetable before linking it
+                        * because the guest doesn't need to flush tlb when
+                        * the gpte is changed from non-present to present.
+                        * Otherwise, the guest may use the wrong mapping.
+                        *
+                        * For PG_LEVEL_4K, kvm_mmu_get_page() has already
+                        * synchronized it transiently via kvm_sync_page().
+                        *
+                        * For higher level pagetable, we synchronize it via
+                        * the slower mmu_sync_children().  If it needs to
+                        * break, some progress has been made; return
+                        * RET_PF_RETRY and retry on the next #PF.
+                        * KVM_REQ_MMU_SYNC is not necessary but it
+                        * expedites the process.
+                        */
+                       if (sp->unsync_children &&
+                           mmu_sync_children(vcpu, sp, false))
+                               return RET_PF_RETRY;
                }
 
                /*
@@ -1047,14 +1066,6 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
  * Using the cached information from sp->gfns is safe because:
  * - The spte has a reference to the struct page, so the pfn for a given gfn
  *   can't change unless all sptes pointing to it are nuked first.
- *
- * Note:
- *   We should flush all tlbs if spte is dropped even though guest is
- *   responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
- *   and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
- *   used by guest then tlbs are not flushed, so guest is allowed to access the
- *   freed pages.
- *   And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
  */
 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
@@ -1107,13 +1118,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
                        return 0;
 
                if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
-                       /*
-                        * Update spte before increasing tlbs_dirty to make
-                        * sure no tlb flush is lost after spte is zapped; see
-                        * the comments in kvm_flush_remote_tlbs().
-                        */
-                       smp_wmb();
-                       vcpu->kvm->tlbs_dirty++;
+                       set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
                        continue;
                }
 
@@ -1128,12 +1133,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 
                if (gfn != sp->gfns[i]) {
                        drop_spte(vcpu->kvm, &sp->spt[i]);
-                       /*
-                        * The same as above where we are doing
-                        * prefetch_invalid_gpte().
-                        */
-                       smp_wmb();
-                       vcpu->kvm->tlbs_dirty++;
+                       set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
                        continue;
                }
 
index 2545d0c..510b833 100644 (file)
@@ -545,7 +545,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
                (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
                (svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
 
-       svm->vmcb->control.virt_ext            = svm->nested.ctl.virt_ext;
        svm->vmcb->control.int_vector          = svm->nested.ctl.int_vector;
        svm->vmcb->control.int_state           = svm->nested.ctl.int_state;
        svm->vmcb->control.event_inj           = svm->nested.ctl.event_inj;
@@ -579,7 +578,7 @@ static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to
 }
 
 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
-                        struct vmcb *vmcb12)
+                        struct vmcb *vmcb12, bool from_vmrun)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        int ret;
@@ -609,13 +608,16 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
        nested_vmcb02_prepare_save(svm, vmcb12);
 
        ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
-                                 nested_npt_enabled(svm), true);
+                                 nested_npt_enabled(svm), from_vmrun);
        if (ret)
                return ret;
 
        if (!npt_enabled)
                vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
 
+       if (!from_vmrun)
+               kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+
        svm_set_gif(svm, true);
 
        return 0;
@@ -681,7 +683,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
 
        svm->nested.nested_run_pending = 1;
 
-       if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
+       if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
                goto out_exit_err;
 
        if (nested_svm_vmrun_msrpm(svm))
index 75e0b21..c36b5fe 100644 (file)
@@ -595,43 +595,50 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
        return 0;
 }
 
-static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
+static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
+                                   int *error)
 {
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
        struct sev_data_launch_update_vmsa vmsa;
+       struct vcpu_svm *svm = to_svm(vcpu);
+       int ret;
+
+       /* Perform some pre-encryption checks against the VMSA */
+       ret = sev_es_sync_vmsa(svm);
+       if (ret)
+               return ret;
+
+       /*
+        * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
+        * the VMSA memory content (i.e it will write the same memory region
+        * with the guest's key), so invalidate it first.
+        */
+       clflush_cache_range(svm->vmsa, PAGE_SIZE);
+
+       vmsa.reserved = 0;
+       vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
+       vmsa.address = __sme_pa(svm->vmsa);
+       vmsa.len = PAGE_SIZE;
+       return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
+}
+
+static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
        struct kvm_vcpu *vcpu;
        int i, ret;
 
        if (!sev_es_guest(kvm))
                return -ENOTTY;
 
-       vmsa.reserved = 0;
-
        kvm_for_each_vcpu(i, vcpu, kvm) {
-               struct vcpu_svm *svm = to_svm(vcpu);
-
-               /* Perform some pre-encryption checks against the VMSA */
-               ret = sev_es_sync_vmsa(svm);
+               ret = mutex_lock_killable(&vcpu->mutex);
                if (ret)
                        return ret;
 
-               /*
-                * The LAUNCH_UPDATE_VMSA command will perform in-place
-                * encryption of the VMSA memory content (i.e it will write
-                * the same memory region with the guest's key), so invalidate
-                * it first.
-                */
-               clflush_cache_range(svm->vmsa, PAGE_SIZE);
+               ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
 
-               vmsa.handle = sev->handle;
-               vmsa.address = __sme_pa(svm->vmsa);
-               vmsa.len = PAGE_SIZE;
-               ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa,
-                                   &argp->error);
+               mutex_unlock(&vcpu->mutex);
                if (ret)
                        return ret;
-
-               svm->vcpu.arch.guest_state_protected = true;
        }
 
        return 0;
@@ -1397,8 +1404,10 @@ static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
 
        /* Bind ASID to this guest */
        ret = sev_bind_asid(kvm, start.handle, error);
-       if (ret)
+       if (ret) {
+               sev_decommission(start.handle);
                goto e_free_session;
+       }
 
        params.handle = start.handle;
        if (copy_to_user((void __user *)(uintptr_t)argp->data,
@@ -1464,7 +1473,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
 
        /* Pin guest memory */
        guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
-                                   PAGE_SIZE, &n, 0);
+                                   PAGE_SIZE, &n, 1);
        if (IS_ERR(guest_page)) {
                ret = PTR_ERR(guest_page);
                goto e_free_trans;
@@ -1501,6 +1510,20 @@ static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
        return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
 }
 
+static bool cmd_allowed_from_miror(u32 cmd_id)
+{
+       /*
+        * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
+        * active mirror VMs. Also allow the debugging and status commands.
+        */
+       if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
+           cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
+           cmd_id == KVM_SEV_DBG_ENCRYPT)
+               return true;
+
+       return false;
+}
+
 int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
 {
        struct kvm_sev_cmd sev_cmd;
@@ -1517,8 +1540,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
 
        mutex_lock(&kvm->lock);
 
-       /* enc_context_owner handles all memory enc operations */
-       if (is_mirroring_enc_context(kvm)) {
+       /* Only the enc_context_owner handles some memory enc operations. */
+       if (is_mirroring_enc_context(kvm) &&
+           !cmd_allowed_from_miror(sev_cmd.id)) {
                r = -EINVAL;
                goto out;
        }
@@ -1715,8 +1739,7 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
 {
        struct file *source_kvm_file;
        struct kvm *source_kvm;
-       struct kvm_sev_info *mirror_sev;
-       unsigned int asid;
+       struct kvm_sev_info source_sev, *mirror_sev;
        int ret;
 
        source_kvm_file = fget(source_fd);
@@ -1739,7 +1762,8 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
                goto e_source_unlock;
        }
 
-       asid = to_kvm_svm(source_kvm)->sev_info.asid;
+       memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
+              sizeof(source_sev));
 
        /*
         * The mirror kvm holds an enc_context_owner ref so its asid can't
@@ -1759,8 +1783,16 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
        /* Set enc_context_owner and copy its encryption context over */
        mirror_sev = &to_kvm_svm(kvm)->sev_info;
        mirror_sev->enc_context_owner = source_kvm;
-       mirror_sev->asid = asid;
        mirror_sev->active = true;
+       mirror_sev->asid = source_sev.asid;
+       mirror_sev->fd = source_sev.fd;
+       mirror_sev->es_active = source_sev.es_active;
+       mirror_sev->handle = source_sev.handle;
+       /*
+        * Do not copy ap_jump_table. Since the mirror does not share the same
+        * KVM contexts as the original, and they may have different
+        * memory-views.
+        */
 
        mutex_unlock(&kvm->lock);
        return 0;
index 05e8d4d..9896850 100644 (file)
@@ -1566,6 +1566,8 @@ static void svm_clear_vintr(struct vcpu_svm *svm)
 
                svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
                        V_IRQ_INJECTION_BITS_MASK;
+
+               svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
        }
 
        vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
@@ -2222,6 +2224,10 @@ static int gp_interception(struct kvm_vcpu *vcpu)
        if (error_code)
                goto reinject;
 
+       /* All SVM instructions expect page aligned RAX */
+       if (svm->vmcb->save.rax & ~PAGE_MASK)
+               goto reinject;
+
        /* Decode the instruction for usage later */
        if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
                goto reinject;
@@ -4285,43 +4291,44 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        struct kvm_host_map map_save;
        int ret;
 
-       if (is_guest_mode(vcpu)) {
-               /* FED8h - SVM Guest */
-               put_smstate(u64, smstate, 0x7ed8, 1);
-               /* FEE0h - SVM Guest VMCB Physical Address */
-               put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
+       if (!is_guest_mode(vcpu))
+               return 0;
 
-               svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
-               svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
-               svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
+       /* FED8h - SVM Guest */
+       put_smstate(u64, smstate, 0x7ed8, 1);
+       /* FEE0h - SVM Guest VMCB Physical Address */
+       put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
 
-               ret = nested_svm_vmexit(svm);
-               if (ret)
-                       return ret;
+       svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
+       svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
+       svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
 
-               /*
-                * KVM uses VMCB01 to store L1 host state while L2 runs but
-                * VMCB01 is going to be used during SMM and thus the state will
-                * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
-                * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
-                * format of the area is identical to guest save area offsetted
-                * by 0x400 (matches the offset of 'struct vmcb_save_area'
-                * within 'struct vmcb'). Note: HSAVE area may also be used by
-                * L1 hypervisor to save additional host context (e.g. KVM does
-                * that, see svm_prepare_guest_switch()) which must be
-                * preserved.
-                */
-               if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
-                                &map_save) == -EINVAL)
-                       return 1;
+       ret = nested_svm_vmexit(svm);
+       if (ret)
+               return ret;
 
-               BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
+       /*
+        * KVM uses VMCB01 to store L1 host state while L2 runs but
+        * VMCB01 is going to be used during SMM and thus the state will
+        * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
+        * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
+        * format of the area is identical to guest save area offsetted
+        * by 0x400 (matches the offset of 'struct vmcb_save_area'
+        * within 'struct vmcb'). Note: HSAVE area may also be used by
+        * L1 hypervisor to save additional host context (e.g. KVM does
+        * that, see svm_prepare_guest_switch()) which must be
+        * preserved.
+        */
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
+                        &map_save) == -EINVAL)
+               return 1;
 
-               svm_copy_vmrun_state(map_save.hva + 0x400,
-                                    &svm->vmcb01.ptr->save);
+       BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
 
-               kvm_vcpu_unmap(vcpu, &map_save, true);
-       }
+       svm_copy_vmrun_state(map_save.hva + 0x400,
+                            &svm->vmcb01.ptr->save);
+
+       kvm_vcpu_unmap(vcpu, &map_save, true);
        return 0;
 }
 
@@ -4329,50 +4336,54 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct kvm_host_map map, map_save;
-       int ret = 0;
+       u64 saved_efer, vmcb12_gpa;
+       struct vmcb *vmcb12;
+       int ret;
 
-       if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
-               u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
-               u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
-               u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
-               struct vmcb *vmcb12;
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
+               return 0;
 
-               if (guest) {
-                       if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
-                               return 1;
+       /* Non-zero if SMI arrived while vCPU was in guest mode. */
+       if (!GET_SMSTATE(u64, smstate, 0x7ed8))
+               return 0;
 
-                       if (!(saved_efer & EFER_SVME))
-                               return 1;
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+               return 1;
 
-                       if (kvm_vcpu_map(vcpu,
-                                        gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
-                               return 1;
+       saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
+       if (!(saved_efer & EFER_SVME))
+               return 1;
 
-                       if (svm_allocate_nested(svm))
-                               return 1;
+       vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
+               return 1;
 
-                       vmcb12 = map.hva;
+       ret = 1;
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)
+               goto unmap_map;
 
-                       nested_load_control_from_vmcb12(svm, &vmcb12->control);
+       if (svm_allocate_nested(svm))
+               goto unmap_save;
 
-                       ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);
-                       kvm_vcpu_unmap(vcpu, &map, true);
+       /*
+        * Restore L1 host state from L1 HSAVE area as VMCB01 was
+        * used during SMM (see svm_enter_smm())
+        */
 
-                       /*
-                        * Restore L1 host state from L1 HSAVE area as VMCB01 was
-                        * used during SMM (see svm_enter_smm())
-                        */
-                       if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
-                                        &map_save) == -EINVAL)
-                               return 1;
+       svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
 
-                       svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
-                                            map_save.hva + 0x400);
+       /*
+        * Enter the nested guest now
+        */
 
-                       kvm_vcpu_unmap(vcpu, &map_save, true);
-               }
-       }
+       vmcb12 = map.hva;
+       nested_load_control_from_vmcb12(svm, &vmcb12->control);
+       ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
 
+unmap_save:
+       kvm_vcpu_unmap(vcpu, &map_save, true);
+unmap_map:
+       kvm_vcpu_unmap(vcpu, &map, true);
        return ret;
 }
 
index 524d943..128a54b 100644 (file)
@@ -459,7 +459,8 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
        return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
 }
 
-int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12);
+int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
+                        u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
 void svm_leave_nested(struct vcpu_svm *svm);
 void svm_free_nested(struct vcpu_svm *svm);
 int svm_allocate_nested(struct vcpu_svm *svm);
index 0dab1b7..ba6f99f 100644 (file)
@@ -353,14 +353,20 @@ void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata)
        switch (msr_index) {
        case MSR_IA32_VMX_EXIT_CTLS:
        case MSR_IA32_VMX_TRUE_EXIT_CTLS:
-               ctl_high &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
+               ctl_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
                break;
        case MSR_IA32_VMX_ENTRY_CTLS:
        case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
-               ctl_high &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+               ctl_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
                break;
        case MSR_IA32_VMX_PROCBASED_CTLS2:
-               ctl_high &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+               ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
+               break;
+       case MSR_IA32_VMX_PINBASED_CTLS:
+               ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
+               break;
+       case MSR_IA32_VMX_VMFUNC:
+               ctl_low &= ~EVMCS1_UNSUPPORTED_VMFUNC;
                break;
        }
 
index ccb03d6..eedcebf 100644 (file)
@@ -2583,8 +2583,13 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
         * Guest state is invalid and unrestricted guest is disabled,
         * which means L1 attempted VMEntry to L2 with invalid state.
         * Fail the VMEntry.
+        *
+        * However when force loading the guest state (SMM exit or
+        * loading nested state after migration, it is possible to
+        * have invalid guest state now, which will be later fixed by
+        * restoring L2 register state
         */
-       if (CC(!vmx_guest_state_valid(vcpu))) {
+       if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) {
                *entry_failure_code = ENTRY_FAIL_DEFAULT;
                return -EINVAL;
        }
@@ -4351,6 +4356,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
        if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
                                vmcs12->vm_exit_msr_load_count))
                nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
+
+       to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
 }
 
 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
@@ -4899,14 +4906,7 @@ out_vmcs02:
        return -ENOMEM;
 }
 
-/*
- * Emulate the VMXON instruction.
- * Currently, we just remember that VMX is active, and do not save or even
- * inspect the argument to VMXON (the so-called "VMXON pointer") because we
- * do not currently need to store anything in that guest-allocated memory
- * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
- * argument is different from the VMXON pointer (which the spec says they do).
- */
+/* Emulate the VMXON instruction. */
 static int handle_vmon(struct kvm_vcpu *vcpu)
 {
        int ret;
@@ -5903,6 +5903,12 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
        case EXIT_REASON_VMFUNC:
                /* VM functions are emulated through L2->L0 vmexits. */
                return true;
+       case EXIT_REASON_BUS_LOCK:
+               /*
+                * At present, bus lock VM exit is never exposed to L1.
+                * Handle L2's bus locks in L0 directly.
+                */
+               return true;
        default:
                break;
        }
index 0c2c0d5..9ecfcf1 100644 (file)
@@ -1323,7 +1323,7 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
        vmx_prepare_switch_to_host(to_vmx(vcpu));
 }
 
-static bool emulation_required(struct kvm_vcpu *vcpu)
+bool vmx_emulation_required(struct kvm_vcpu *vcpu)
 {
        return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu);
 }
@@ -1367,7 +1367,7 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
        vmcs_writel(GUEST_RFLAGS, rflags);
 
        if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
-               vmx->emulation_required = emulation_required(vcpu);
+               vmx->emulation_required = vmx_emulation_required(vcpu);
 }
 
 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
@@ -1837,10 +1837,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                                    &msr_info->data))
                        return 1;
                /*
-                * Enlightened VMCS v1 doesn't have certain fields, but buggy
-                * Hyper-V versions are still trying to use corresponding
-                * features when they are exposed. Filter out the essential
-                * minimum.
+                * Enlightened VMCS v1 doesn't have certain VMCS fields but
+                * instead of just ignoring the features, different Hyper-V
+                * versions are either trying to use them and fail or do some
+                * sanity checking and refuse to boot. Filter all unsupported
+                * features out.
                 */
                if (!msr_info->host_initiated &&
                    vmx->nested.enlightened_vmcs_enabled)
@@ -3077,7 +3078,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        }
 
        /* depends on vcpu->arch.cr0 to be set to a new value */
-       vmx->emulation_required = emulation_required(vcpu);
+       vmx->emulation_required = vmx_emulation_required(vcpu);
 }
 
 static int vmx_get_max_tdp_level(void)
@@ -3330,7 +3331,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int
 {
        __vmx_set_segment(vcpu, var, seg);
 
-       to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
+       to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
 }
 
 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@@ -6621,10 +6622,24 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
                     vmx->loaded_vmcs->soft_vnmi_blocked))
                vmx->loaded_vmcs->entry_time = ktime_get();
 
-       /* Don't enter VMX if guest state is invalid, let the exit handler
-          start emulation until we arrive back to a valid state */
-       if (vmx->emulation_required)
+       /*
+        * Don't enter VMX if guest state is invalid, let the exit handler
+        * start emulation until we arrive back to a valid state.  Synthesize a
+        * consistency check VM-Exit due to invalid guest state and bail.
+        */
+       if (unlikely(vmx->emulation_required)) {
+
+               /* We don't emulate invalid state of a nested guest */
+               vmx->fail = is_guest_mode(vcpu);
+
+               vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
+               vmx->exit_reason.failed_vmentry = 1;
+               kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
+               vmx->exit_qualification = ENTRY_FAIL_DEFAULT;
+               kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
+               vmx->exit_intr_info = 0;
                return EXIT_FASTPATH_NONE;
+       }
 
        trace_kvm_entry(vcpu);
 
index 4858c5f..592217f 100644 (file)
@@ -248,12 +248,8 @@ struct vcpu_vmx {
         * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
         * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
         * be loaded into hardware if those conditions aren't met.
-        * nr_active_uret_msrs tracks the number of MSRs that need to be loaded
-        * into hardware when running the guest.  guest_uret_msrs[] is resorted
-        * whenever the number of "active" uret MSRs is modified.
         */
        struct vmx_uret_msr   guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
-       int                   nr_active_uret_msrs;
        bool                  guest_uret_msrs_loaded;
 #ifdef CONFIG_X86_64
        u64                   msr_host_kernel_gs_base;
@@ -359,6 +355,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
                        unsigned long fs_base, unsigned long gs_base);
 int vmx_get_cpl(struct kvm_vcpu *vcpu);
+bool vmx_emulation_required(struct kvm_vcpu *vcpu);
 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
index 28ef141..aabd3a2 100644 (file)
@@ -1332,6 +1332,13 @@ static const u32 msrs_to_save_all[] = {
        MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
        MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
        MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
+
+       MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
+       MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
+       MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
+       MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
+       MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
+       MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
 };
 
 static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
@@ -2969,7 +2976,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                                       offsetof(struct compat_vcpu_info, time));
        if (vcpu->xen.vcpu_time_info_set)
                kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
-       if (v == kvm_get_vcpu(v->kvm, 0))
+       if (!v->vcpu_idx)
                kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
        return 0;
 }
@@ -7658,6 +7665,13 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
 
                /* Process a latched INIT or SMI, if any.  */
                kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+               /*
+                * Even if KVM_SET_SREGS2 loaded PDPTRs out of band,
+                * on SMM exit we still need to reload them from
+                * guest memory
+                */
+               vcpu->arch.pdptrs_from_userspace = false;
        }
 
        kvm_mmu_reset_context(vcpu);
@@ -10652,6 +10666,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        int r;
 
        vcpu->arch.last_vmentry_cpu = -1;
+       vcpu->arch.regs_avail = ~0;
+       vcpu->arch.regs_dirty = ~0;
 
        if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -10893,6 +10909,9 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
        kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
        kvm_rip_write(vcpu, 0xfff0);
 
+       vcpu->arch.cr3 = 0;
+       kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
+
        /*
         * CR0.CD/NW are set on RESET, preserved on INIT.  Note, some versions
         * of Intel's SDM list CD/NW as being set on INIT, but they contradict
@@ -11139,9 +11158,15 @@ void kvm_arch_free_vm(struct kvm *kvm)
 
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 {
+       int ret;
+
        if (type)
                return -EINVAL;
 
+       ret = kvm_page_track_init(kvm);
+       if (ret)
+               return ret;
+
        INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
        INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
        INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
@@ -11174,7 +11199,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        kvm_apicv_init(kvm);
        kvm_hv_init_vm(kvm);
-       kvm_page_track_init(kvm);
        kvm_mmu_init_vm(kvm);
        kvm_xen_init_vm(kvm);
 
index 058f19b..c565def 100644 (file)
        ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
 
 #define __get_next(t, insn)    \
-       ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
+       ({ t r; memcpy(&r, insn->next_byte, sizeof(t)); insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
 
 #define __peek_nbyte_next(t, insn, n)  \
-       ({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); })
+       ({ t r; memcpy(&r, (insn)->next_byte + n, sizeof(t)); leXX_to_cpu(t, r); })
 
 #define get_next(t, insn)      \
        ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
index b2eefde..84a2c8c 100644 (file)
@@ -710,7 +710,8 @@ oops:
 
 static noinline void
 kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
-                        unsigned long address, int signal, int si_code)
+                        unsigned long address, int signal, int si_code,
+                        u32 pkey)
 {
        WARN_ON_ONCE(user_mode(regs));
 
@@ -735,8 +736,12 @@ kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
 
                        set_signal_archinfo(address, error_code);
 
-                       /* XXX: hwpoison faults will set the wrong code. */
-                       force_sig_fault(signal, si_code, (void __user *)address);
+                       if (si_code == SEGV_PKUERR) {
+                               force_sig_pkuerr((void __user *)address, pkey);
+                       } else {
+                               /* XXX: hwpoison faults will set the wrong code. */
+                               force_sig_fault(signal, si_code, (void __user *)address);
+                       }
                }
 
                /*
@@ -798,7 +803,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
        struct task_struct *tsk = current;
 
        if (!user_mode(regs)) {
-               kernelmode_fixup_or_oops(regs, error_code, address, pkey, si_code);
+               kernelmode_fixup_or_oops(regs, error_code, address,
+                                        SIGSEGV, si_code, pkey);
                return;
        }
 
@@ -930,7 +936,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
 {
        /* Kernel mode? Handle exceptions or die: */
        if (!user_mode(regs)) {
-               kernelmode_fixup_or_oops(regs, error_code, address, SIGBUS, BUS_ADRERR);
+               kernelmode_fixup_or_oops(regs, error_code, address,
+                                        SIGBUS, BUS_ADRERR, ARCH_DEFAULT_PKEY);
                return;
        }
 
@@ -1396,7 +1403,8 @@ good_area:
                 */
                if (!user_mode(regs))
                        kernelmode_fixup_or_oops(regs, error_code, address,
-                                                SIGBUS, BUS_ADRERR);
+                                                SIGBUS, BUS_ADRERR,
+                                                ARCH_DEFAULT_PKEY);
                return;
        }
 
@@ -1416,7 +1424,8 @@ good_area:
                return;
 
        if (fatal_signal_pending(current) && !user_mode(regs)) {
-               kernelmode_fixup_or_oops(regs, error_code, address, 0, 0);
+               kernelmode_fixup_or_oops(regs, error_code, address,
+                                        0, 0, ARCH_DEFAULT_PKEY);
                return;
        }
 
@@ -1424,7 +1433,8 @@ good_area:
                /* Kernel mode? Handle exceptions or die: */
                if (!user_mode(regs)) {
                        kernelmode_fixup_or_oops(regs, error_code, address,
-                                                SIGSEGV, SEGV_MAPERR);
+                                                SIGSEGV, SEGV_MAPERR,
+                                                ARCH_DEFAULT_PKEY);
                        return;
                }
 
index a6e1176..3609822 100644 (file)
@@ -1432,18 +1432,18 @@ int kern_addr_valid(unsigned long addr)
                return 0;
 
        p4d = p4d_offset(pgd, addr);
-       if (p4d_none(*p4d))
+       if (!p4d_present(*p4d))
                return 0;
 
        pud = pud_offset(p4d, addr);
-       if (pud_none(*pud))
+       if (!pud_present(*pud))
                return 0;
 
        if (pud_large(*pud))
                return pfn_valid(pud_pfn(*pud));
 
        pmd = pmd_offset(pud, addr);
-       if (pmd_none(*pmd))
+       if (!pmd_present(*pmd))
                return 0;
 
        if (pmd_large(*pmd))
index 1a50434..ef88537 100644 (file)
@@ -49,8 +49,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
                        p = early_alloc(PMD_SIZE, nid, false);
                        if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
                                return;
-                       else if (p)
-                               memblock_free(__pa(p), PMD_SIZE);
+                       memblock_free_ptr(p, PMD_SIZE);
                }
 
                p = early_alloc(PAGE_SIZE, nid, true);
@@ -86,8 +85,7 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
                        p = early_alloc(PUD_SIZE, nid, false);
                        if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
                                return;
-                       else if (p)
-                               memblock_free(__pa(p), PUD_SIZE);
+                       memblock_free_ptr(p, PUD_SIZE);
                }
 
                p = early_alloc(PAGE_SIZE, nid, true);
index a1b5c71..1e9b93b 100644 (file)
@@ -355,7 +355,7 @@ void __init numa_reset_distance(void)
 
        /* numa_distance could be 1LU marking allocation failure, test cnt */
        if (numa_distance_cnt)
-               memblock_free(__pa(numa_distance), size);
+               memblock_free_ptr(numa_distance, size);
        numa_distance_cnt = 0;
        numa_distance = NULL;   /* enable table creation */
 }
index 737491b..e801e30 100644 (file)
@@ -517,8 +517,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
        }
 
        /* free the copied physical distance table */
-       if (phys_dist)
-               memblock_free(__pa(phys_dist), phys_size);
+       memblock_free_ptr(phys_dist, phys_size);
        return;
 
 no_emu:
index 3112ca7..4ba2a3e 100644 (file)
@@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
        int err = 0;
 
        start = sanitize_phys(start);
-       end = sanitize_phys(end);
+
+       /*
+        * The end address passed into this function is exclusive, but
+        * sanitize_phys() expects an inclusive address.
+        */
+       end = sanitize_phys(end - 1) + 1;
        if (start >= end) {
                WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
                                start, end - 1, cattr_name(req_type));
index 753f637..6e0d075 100644 (file)
@@ -755,8 +755,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
        preempt_enable();
 }
 
-static void xen_convert_trap_info(const struct desc_ptr *desc,
-                                 struct trap_info *traps)
+static unsigned xen_convert_trap_info(const struct desc_ptr *desc,
+                                     struct trap_info *traps, bool full)
 {
        unsigned in, out, count;
 
@@ -766,17 +766,18 @@ static void xen_convert_trap_info(const struct desc_ptr *desc,
        for (in = out = 0; in < count; in++) {
                gate_desc *entry = (gate_desc *)(desc->address) + in;
 
-               if (cvt_gate_to_trap(in, entry, &traps[out]))
+               if (cvt_gate_to_trap(in, entry, &traps[out]) || full)
                        out++;
        }
-       traps[out].address = 0;
+
+       return out;
 }
 
 void xen_copy_trap_info(struct trap_info *traps)
 {
        const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
 
-       xen_convert_trap_info(desc, traps);
+       xen_convert_trap_info(desc, traps, true);
 }
 
 /* Load a new IDT into Xen.  In principle this can be per-CPU, so we
@@ -786,6 +787,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
 {
        static DEFINE_SPINLOCK(lock);
        static struct trap_info traps[257];
+       unsigned out;
 
        trace_xen_cpu_load_idt(desc);
 
@@ -793,7 +795,8 @@ static void xen_load_idt(const struct desc_ptr *desc)
 
        memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
 
-       xen_convert_trap_info(desc, traps);
+       out = xen_convert_trap_info(desc, traps, false);
+       memset(&traps[out], 0, sizeof(traps[0]));
 
        xen_mc_flush();
        if (HYPERVISOR_set_trap_table(traps))
@@ -1214,6 +1217,11 @@ static void __init xen_dom0_set_legacy_features(void)
        x86_platform.legacy.rtc = 1;
 }
 
+static void __init xen_domu_set_legacy_features(void)
+{
+       x86_platform.legacy.rtc = 0;
+}
+
 /* First C function to be called on Xen boot */
 asmlinkage __visible void __init xen_start_kernel(void)
 {
@@ -1359,6 +1367,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
                add_preferred_console("xenboot", 0, NULL);
                if (pci_xen)
                        x86_init.pci.arch_init = pci_xen_init;
+               x86_platform.set_legacy_features =
+                               xen_domu_set_legacy_features;
        } else {
                const struct dom0_vga_console_info *info =
                        (void *)((char *)xen_start_info +
index 1df5f01..8d75193 100644 (file)
@@ -1518,14 +1518,17 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
        if (pinned) {
                struct page *page = pfn_to_page(pfn);
 
-               if (static_branch_likely(&xen_struct_pages_ready))
+               pinned = false;
+               if (static_branch_likely(&xen_struct_pages_ready)) {
+                       pinned = PagePinned(page);
                        SetPagePinned(page);
+               }
 
                xen_mc_batch();
 
                __set_pfn_prot(pfn, PAGE_KERNEL_RO);
 
-               if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
+               if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
                        __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
 
                xen_mc_issue(PARAVIRT_LAZY_MMU);
index 54f9aa7..46df59a 100644 (file)
@@ -18,7 +18,7 @@
 #endif
 #include <linux/export.h>
 
-int xen_swiotlb __read_mostly;
+static int xen_swiotlb __read_mostly;
 
 /*
  * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary
@@ -56,7 +56,7 @@ int __init pci_xen_swiotlb_detect(void)
        return xen_swiotlb;
 }
 
-void __init pci_xen_swiotlb_init(void)
+static void __init pci_xen_swiotlb_init(void)
 {
        if (xen_swiotlb) {
                xen_swiotlb_init_early();
index 96afadf..7ed56c6 100644 (file)
@@ -290,8 +290,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
 
        gdt = get_cpu_gdt_rw(cpu);
 
-       memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
-
        /*
         * Bring up the CPU in cpu_bringup_and_idle() with the stack
         * pointing just below where pt_regs would be if it were a normal
@@ -308,8 +306,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
 
        xen_copy_trap_info(ctxt->trap_ctxt);
 
-       ctxt->ldt_ents = 0;
-
        BUG_ON((unsigned long)gdt & ~PAGE_MASK);
 
        gdt_mfn = arbitrary_virt_to_mfn(gdt);
index 5df3dd2..a6fb6a0 100644 (file)
@@ -1466,7 +1466,7 @@ again:
        if (!bio_integrity_endio(bio))
                return;
 
-       if (bio->bi_bdev)
+       if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED))
                rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
 
        if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
index 3c88a79..38b9f76 100644 (file)
@@ -1182,10 +1182,6 @@ int blkcg_init_queue(struct request_queue *q)
        if (preloaded)
                radix_tree_preload_end();
 
-       ret = blk_iolatency_init(q);
-       if (ret)
-               goto err_destroy_all;
-
        ret = blk_ioprio_init(q);
        if (ret)
                goto err_destroy_all;
@@ -1194,6 +1190,12 @@ int blkcg_init_queue(struct request_queue *q)
        if (ret)
                goto err_destroy_all;
 
+       ret = blk_iolatency_init(q);
+       if (ret) {
+               blk_throtl_exit(q);
+               goto err_destroy_all;
+       }
+
        return 0;
 
 err_destroy_all:
@@ -1364,10 +1366,14 @@ enomem:
        /* alloc failed, nothing's initialized yet, free everything */
        spin_lock_irq(&q->queue_lock);
        list_for_each_entry(blkg, &q->blkg_list, q_node) {
+               struct blkcg *blkcg = blkg->blkcg;
+
+               spin_lock(&blkcg->lock);
                if (blkg->pd[pol->plid]) {
                        pol->pd_free_fn(blkg->pd[pol->plid]);
                        blkg->pd[pol->plid] = NULL;
                }
+               spin_unlock(&blkcg->lock);
        }
        spin_unlock_irq(&q->queue_lock);
        ret = -ENOMEM;
@@ -1399,12 +1405,16 @@ void blkcg_deactivate_policy(struct request_queue *q,
        __clear_bit(pol->plid, q->blkcg_pols);
 
        list_for_each_entry(blkg, &q->blkg_list, q_node) {
+               struct blkcg *blkcg = blkg->blkcg;
+
+               spin_lock(&blkcg->lock);
                if (blkg->pd[pol->plid]) {
                        if (pol->pd_offline_fn)
                                pol->pd_offline_fn(blkg->pd[pol->plid]);
                        pol->pd_free_fn(blkg->pd[pol->plid]);
                        blkg->pd[pol->plid] = NULL;
                }
+               spin_unlock(&blkcg->lock);
        }
 
        spin_unlock_irq(&q->queue_lock);
index 69a1217..16d5d53 100644 (file)
@@ -426,8 +426,15 @@ EXPORT_SYMBOL(blk_integrity_register);
  */
 void blk_integrity_unregister(struct gendisk *disk)
 {
+       struct blk_integrity *bi = &disk->queue->integrity;
+
+       if (!bi->profile)
+               return;
+
+       /* ensure all bios are off the integrity workqueue */
+       blk_flush_integrity();
        blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
-       memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
+       memset(bi, 0, sizeof(*bi));
 }
 EXPORT_SYMBOL(blk_integrity_unregister);
 
index 86f8734..ff5caeb 100644 (file)
@@ -208,7 +208,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
 
        spin_lock_irqsave(&tags->lock, flags);
        rq = tags->rqs[bitnr];
-       if (!rq || !refcount_inc_not_zero(&rq->ref))
+       if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
                rq = NULL;
        spin_unlock_irqrestore(&tags->lock, flags);
        return rq;
index 3510951..882f56b 100644 (file)
@@ -165,13 +165,20 @@ static const struct file_operations bsg_fops = {
        .llseek         =       default_llseek,
 };
 
+static void bsg_device_release(struct device *dev)
+{
+       struct bsg_device *bd = container_of(dev, struct bsg_device, device);
+
+       ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
+       kfree(bd);
+}
+
 void bsg_unregister_queue(struct bsg_device *bd)
 {
        if (bd->queue->kobj.sd)
                sysfs_remove_link(&bd->queue->kobj, "bsg");
        cdev_device_del(&bd->cdev, &bd->device);
-       ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
-       kfree(bd);
+       put_device(&bd->device);
 }
 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
 
@@ -193,11 +200,13 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
        if (ret < 0) {
                if (ret == -ENOSPC)
                        dev_err(parent, "bsg: too many bsg devices\n");
-               goto out_kfree;
+               kfree(bd);
+               return ERR_PTR(ret);
        }
        bd->device.devt = MKDEV(bsg_major, ret);
        bd->device.class = bsg_class;
        bd->device.parent = parent;
+       bd->device.release = bsg_device_release;
        dev_set_name(&bd->device, "%s", name);
        device_initialize(&bd->device);
 
@@ -205,7 +214,7 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
        bd->cdev.owner = THIS_MODULE;
        ret = cdev_device_add(&bd->cdev, &bd->device);
        if (ret)
-               goto out_ida_remove;
+               goto out_put_device;
 
        if (q->kobj.sd) {
                ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg");
@@ -217,10 +226,8 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
 
 out_device_del:
        cdev_device_del(&bd->cdev, &bd->device);
-out_ida_remove:
-       ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
-out_kfree:
-       kfree(bd);
+out_put_device:
+       put_device(&bd->device);
        return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(bsg_register_queue);
index ffce6f6..1e970c2 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/task_io_accounting_ops.h>
 #include <linux/falloc.h>
 #include <linux/suspend.h>
+#include <linux/fs.h>
 #include "blk.h"
 
 static struct inode *bdev_file_inode(struct file *file)
@@ -553,7 +554,8 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
 static long blkdev_fallocate(struct file *file, int mode, loff_t start,
                             loff_t len)
 {
-       struct block_device *bdev = I_BDEV(bdev_file_inode(file));
+       struct inode *inode = bdev_file_inode(file);
+       struct block_device *bdev = I_BDEV(inode);
        loff_t end = start + len - 1;
        loff_t isize;
        int error;
@@ -580,10 +582,12 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
        if ((start | len) & (bdev_logical_block_size(bdev) - 1))
                return -EINVAL;
 
+       filemap_invalidate_lock(inode->i_mapping);
+
        /* Invalidate the page cache, including dirty pages. */
        error = truncate_bdev_range(bdev, file->f_mode, start, end);
        if (error)
-               return error;
+               goto fail;
 
        switch (mode) {
        case FALLOC_FL_ZERO_RANGE:
@@ -600,17 +604,12 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
                                             GFP_KERNEL, 0);
                break;
        default:
-               return -EOPNOTSUPP;
+               error = -EOPNOTSUPP;
        }
-       if (error)
-               return error;
 
-       /*
-        * Invalidate the page cache again; if someone wandered in and dirtied
-        * a page, we just discard it - userspace has no way of knowing whether
-        * the write happened before or after discard completing...
-        */
-       return truncate_bdev_range(bdev, file->f_mode, start, end);
+ fail:
+       filemap_invalidate_unlock(inode->i_mapping);
+       return error;
 }
 
 const struct file_operations def_blk_fops = {
index a43f152..45c5c0e 100644 (file)
@@ -284,8 +284,7 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
 #define should_use_kmap(pfn)   page_is_ram(pfn)
 #endif
 
-static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz,
-                             bool memory)
+static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
 {
        unsigned long pfn;
 
@@ -295,8 +294,7 @@ static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz,
                        return NULL;
                return (void __iomem __force *)kmap(pfn_to_page(pfn));
        } else
-               return memory ? acpi_os_memmap(pg_off, pg_sz) :
-                               acpi_os_ioremap(pg_off, pg_sz);
+               return acpi_os_ioremap(pg_off, pg_sz);
 }
 
 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
@@ -311,10 +309,9 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
 }
 
 /**
- * __acpi_os_map_iomem - Get a virtual address for a given physical address range.
+ * acpi_os_map_iomem - Get a virtual address for a given physical address range.
  * @phys: Start of the physical address range to map.
  * @size: Size of the physical address range to map.
- * @memory: true if remapping memory, false if IO
  *
  * Look up the given physical address range in the list of existing ACPI memory
  * mappings.  If found, get a reference to it and return a pointer to it (its
@@ -324,8 +321,8 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
  * During early init (when acpi_permanent_mmap has not been set yet) this
  * routine simply calls __acpi_map_table() to get the job done.
  */
-static void __iomem __ref
-*__acpi_os_map_iomem(acpi_physical_address phys, acpi_size size, bool memory)
+void __iomem __ref
+*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
 {
        struct acpi_ioremap *map;
        void __iomem *virt;
@@ -356,7 +353,7 @@ static void __iomem __ref
 
        pg_off = round_down(phys, PAGE_SIZE);
        pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
-       virt = acpi_map(phys, size, memory);
+       virt = acpi_map(phys, size);
        if (!virt) {
                mutex_unlock(&acpi_ioremap_lock);
                kfree(map);
@@ -375,17 +372,11 @@ out:
        mutex_unlock(&acpi_ioremap_lock);
        return map->virt + (phys - map->phys);
 }
-
-void __iomem *__ref
-acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
-{
-       return __acpi_os_map_iomem(phys, size, false);
-}
 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
 
 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
 {
-       return (void *)__acpi_os_map_iomem(phys, size, true);
+       return (void *)acpi_os_map_iomem(phys, size);
 }
 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
 
index d9030cb..9edacc8 100644 (file)
@@ -1852,6 +1852,7 @@ static void binder_deferred_fd_close(int fd)
 }
 
 static void binder_transaction_buffer_release(struct binder_proc *proc,
+                                             struct binder_thread *thread,
                                              struct binder_buffer *buffer,
                                              binder_size_t failed_at,
                                              bool is_failure)
@@ -2011,8 +2012,16 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
                                                &proc->alloc, &fd, buffer,
                                                offset, sizeof(fd));
                                WARN_ON(err);
-                               if (!err)
+                               if (!err) {
                                        binder_deferred_fd_close(fd);
+                                       /*
+                                        * Need to make sure the thread goes
+                                        * back to userspace to complete the
+                                        * deferred close
+                                        */
+                                       if (thread)
+                                               thread->looper_need_return = true;
+                               }
                        }
                } break;
                default:
@@ -3038,9 +3047,8 @@ static void binder_transaction(struct binder_proc *proc,
        if (reply) {
                binder_enqueue_thread_work(thread, tcomplete);
                binder_inner_proc_lock(target_proc);
-               if (target_thread->is_dead || target_proc->is_frozen) {
-                       return_error = target_thread->is_dead ?
-                               BR_DEAD_REPLY : BR_FROZEN_REPLY;
+               if (target_thread->is_dead) {
+                       return_error = BR_DEAD_REPLY;
                        binder_inner_proc_unlock(target_proc);
                        goto err_dead_proc_or_thread;
                }
@@ -3105,7 +3113,7 @@ err_bad_parent:
 err_copy_data_failed:
        binder_free_txn_fixups(t);
        trace_binder_transaction_failed_buffer_release(t->buffer);
-       binder_transaction_buffer_release(target_proc, t->buffer,
+       binder_transaction_buffer_release(target_proc, NULL, t->buffer,
                                          buffer_offset, true);
        if (target_node)
                binder_dec_node_tmpref(target_node);
@@ -3184,7 +3192,9 @@ err_invalid_target_handle:
  * Cleanup buffer and free it.
  */
 static void
-binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
+binder_free_buf(struct binder_proc *proc,
+               struct binder_thread *thread,
+               struct binder_buffer *buffer)
 {
        binder_inner_proc_lock(proc);
        if (buffer->transaction) {
@@ -3212,7 +3222,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
                binder_node_inner_unlock(buf_node);
        }
        trace_binder_transaction_buffer_release(buffer);
-       binder_transaction_buffer_release(proc, buffer, 0, false);
+       binder_transaction_buffer_release(proc, thread, buffer, 0, false);
        binder_alloc_free_buf(&proc->alloc, buffer);
 }
 
@@ -3414,7 +3424,7 @@ static int binder_thread_write(struct binder_proc *proc,
                                     proc->pid, thread->pid, (u64)data_ptr,
                                     buffer->debug_id,
                                     buffer->transaction ? "active" : "finished");
-                       binder_free_buf(proc, buffer);
+                       binder_free_buf(proc, thread, buffer);
                        break;
                }
 
@@ -4107,7 +4117,7 @@ retry:
                        buffer->transaction = NULL;
                        binder_cleanup_transaction(t, "fd fixups failed",
                                                   BR_FAILED_REPLY);
-                       binder_free_buf(proc, buffer);
+                       binder_free_buf(proc, thread, buffer);
                        binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
                                     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
                                     proc->pid, thread->pid,
@@ -4648,6 +4658,22 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
        return 0;
 }
 
+static bool binder_txns_pending_ilocked(struct binder_proc *proc)
+{
+       struct rb_node *n;
+       struct binder_thread *thread;
+
+       if (proc->outstanding_txns > 0)
+               return true;
+
+       for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
+               thread = rb_entry(n, struct binder_thread, rb_node);
+               if (thread->transaction_stack)
+                       return true;
+       }
+       return false;
+}
+
 static int binder_ioctl_freeze(struct binder_freeze_info *info,
                               struct binder_proc *target_proc)
 {
@@ -4679,8 +4705,13 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
                        (!target_proc->outstanding_txns),
                        msecs_to_jiffies(info->timeout_ms));
 
-       if (!ret && target_proc->outstanding_txns)
-               ret = -EAGAIN;
+       /* Check pending transactions that wait for reply */
+       if (ret >= 0) {
+               binder_inner_proc_lock(target_proc);
+               if (binder_txns_pending_ilocked(target_proc))
+                       ret = -EAGAIN;
+               binder_inner_proc_unlock(target_proc);
+       }
 
        if (ret < 0) {
                binder_inner_proc_lock(target_proc);
@@ -4696,6 +4727,7 @@ static int binder_ioctl_get_freezer_info(
 {
        struct binder_proc *target_proc;
        bool found = false;
+       __u32 txns_pending;
 
        info->sync_recv = 0;
        info->async_recv = 0;
@@ -4705,7 +4737,9 @@ static int binder_ioctl_get_freezer_info(
                if (target_proc->pid == info->pid) {
                        found = true;
                        binder_inner_proc_lock(target_proc);
-                       info->sync_recv |= target_proc->sync_recv;
+                       txns_pending = binder_txns_pending_ilocked(target_proc);
+                       info->sync_recv |= target_proc->sync_recv |
+                                       (txns_pending << 1);
                        info->async_recv |= target_proc->async_recv;
                        binder_inner_proc_unlock(target_proc);
                }
index 810c0b8..402c4d4 100644 (file)
@@ -378,6 +378,8 @@ struct binder_ref {
  *                        binder transactions
  *                        (protected by @inner_lock)
  * @sync_recv:            process received sync transactions since last frozen
+ *                        bit 0: received sync transaction after being frozen
+ *                        bit 1: new pending sync transaction during freezing
  *                        (protected by @inner_lock)
  * @async_recv:           process received async transactions since last frozen
  *                        (protected by @inner_lock)
index 46c5034..00fb412 100644 (file)
@@ -264,7 +264,7 @@ void __init numa_free_distance(void)
        size = numa_distance_cnt * numa_distance_cnt *
                sizeof(numa_distance[0]);
 
-       memblock_free(__pa(numa_distance), size);
+       memblock_free_ptr(numa_distance, size);
        numa_distance_cnt = 0;
        numa_distance = NULL;
 }
index a97f33d..9466503 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/export.h>
 #include <linux/rtc.h>
 #include <linux/suspend.h>
+#include <linux/init.h>
 
 #include <linux/mc146818rtc.h>
 
@@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
        const char *file = *(const char **)(tracedata + 2);
        unsigned int user_hash_value, file_hash_value;
 
+       if (!x86_platform.legacy.rtc)
+               return;
+
        user_hash_value = user % USERHASH;
        file_hash_value = hash_string(lineno, file, FILEHASH);
        set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
@@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
 
 static int __init early_resume_init(void)
 {
+       if (!x86_platform.legacy.rtc)
+               return 0;
+
        hash_value_early_read = read_magic_time();
        register_pm_notifier(&pm_trace_nb);
        return 0;
@@ -277,6 +284,9 @@ static int __init late_resume_init(void)
        unsigned int val = hash_value_early_read;
        unsigned int user, file, dev;
 
+       if (!x86_platform.legacy.rtc)
+               return 0;
+
        user = val % USERHASH;
        val = val / USERHASH;
        file = val % FILEHASH;
index 7bd0f3c..c46f6a8 100644 (file)
@@ -1116,6 +1116,9 @@ int device_create_managed_software_node(struct device *dev,
        to_swnode(fwnode)->managed = true;
        set_secondary_fwnode(dev, fwnode);
 
+       if (device_is_registered(dev))
+               software_node_notify(dev);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(device_create_managed_software_node);
index df77b6b..763cea8 100644 (file)
@@ -3090,6 +3090,7 @@ static int compat_insnlist(struct file *file, unsigned long arg)
        mutex_lock(&dev->mutex);
        rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file);
        mutex_unlock(&dev->mutex);
+       kfree(insns);
        return rc;
 }
 
index 66b05a3..a6f365b 100644 (file)
@@ -74,8 +74,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l
        if (count)
                return count;
 
-       kobject_put(&attr_set->kobj);
        mutex_destroy(&attr_set->update_lock);
+       kobject_put(&attr_set->kobj);
        return 0;
 }
 EXPORT_SYMBOL_GPL(gov_attr_set_put);
index 1097f82..8c176b7 100644 (file)
@@ -3205,11 +3205,15 @@ static int __init intel_pstate_init(void)
        if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
                return -ENODEV;
 
-       if (no_load)
-               return -ENODEV;
-
        id = x86_match_cpu(hwp_support_ids);
        if (id) {
+               bool hwp_forced = intel_pstate_hwp_is_enabled();
+
+               if (hwp_forced)
+                       pr_info("HWP enabled by BIOS\n");
+               else if (no_load)
+                       return -ENODEV;
+
                copy_cpu_funcs(&core_funcs);
                /*
                 * Avoid enabling HWP for processors without EPP support,
@@ -3219,8 +3223,7 @@ static int __init intel_pstate_init(void)
                 * If HWP is enabled already, though, there is no choice but to
                 * deal with it.
                 */
-               if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
-                   intel_pstate_hwp_is_enabled()) {
+               if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
                        hwp_active++;
                        hwp_mode_bdw = id->driver_data;
                        intel_pstate.attr = hwp_cpufreq_attrs;
@@ -3235,7 +3238,11 @@ static int __init intel_pstate_init(void)
 
                        goto hwp_cpu_matched;
                }
+               pr_info("HWP not enabled\n");
        } else {
+               if (no_load)
+                       return -ENODEV;
+
                id = x86_match_cpu(intel_pstate_cpu_ids);
                if (!id) {
                        pr_info("CPU model not supported\n");
@@ -3314,10 +3321,9 @@ static int __init intel_pstate_setup(char *str)
        else if (!strcmp(str, "passive"))
                default_driver = &intel_cpufreq;
 
-       if (!strcmp(str, "no_hwp")) {
-               pr_info("HWP disabled\n");
+       if (!strcmp(str, "no_hwp"))
                no_hwp = 1;
-       }
+
        if (!strcmp(str, "force"))
                force_load = 1;
        if (!strcmp(str, "hwp_only"))
index 284b6bd..d295f40 100644 (file)
@@ -451,7 +451,6 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
 static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
 {
        struct device *cpu_dev;
-       int cur_cluster = cpu_to_cluster(policy->cpu);
 
        cpu_dev = get_cpu_device(policy->cpu);
        if (!cpu_dev) {
index fc1153a..b8a7d95 100644 (file)
@@ -464,7 +464,7 @@ static void dmc520_init_csrow(struct mem_ctl_info *mci)
                        dimm->grain     = pvt->mem_width_in_bytes;
                        dimm->dtype     = dt;
                        dimm->mtype     = mt;
-                       dimm->edac_mode = EDAC_FLAG_SECDED;
+                       dimm->edac_mode = EDAC_SECDED;
                        dimm->nr_pages  = pages_per_rank / csi->nr_channels;
                }
        }
index 7e7146b..7d08627 100644 (file)
@@ -782,7 +782,7 @@ static void init_csrows(struct mem_ctl_info *mci)
 
                for (j = 0; j < csi->nr_channels; j++) {
                        dimm            = csi->channels[j]->dimm;
-                       dimm->edac_mode = EDAC_FLAG_SECDED;
+                       dimm->edac_mode = EDAC_SECDED;
                        dimm->mtype     = p_data->get_mtype(priv->baseaddr);
                        dimm->nr_pages  = (size >> PAGE_SHIFT) / csi->nr_channels;
                        dimm->grain     = SYNPS_EDAC_ERR_GRAIN;
index c99b78e..f86666c 100644 (file)
@@ -1019,16 +1019,18 @@ create_feature_instance(struct build_feature_devs_info *binfo,
 {
        unsigned int irq_base, nr_irqs;
        struct dfl_feature_info *finfo;
+       u8 revision = 0;
        int ret;
-       u8 revision;
        u64 v;
 
-       v = readq(binfo->ioaddr + ofst);
-       revision = FIELD_GET(DFH_REVISION, v);
+       if (fid != FEATURE_ID_AFU) {
+               v = readq(binfo->ioaddr + ofst);
+               revision = FIELD_GET(DFH_REVISION, v);
 
-       /* read feature size and id if inputs are invalid */
-       size = size ? size : feature_size(v);
-       fid = fid ? fid : feature_id(v);
+               /* read feature size and id if inputs are invalid */
+               size = size ? size : feature_size(v);
+               fid = fid ? fid : feature_id(v);
+       }
 
        if (binfo->len - ofst < size)
                return -EINVAL;
index 1afb41a..ea2ec3c 100644 (file)
@@ -225,8 +225,10 @@ static int machxo2_write_init(struct fpga_manager *mgr,
                goto fail;
 
        get_status(spi, &status);
-       if (test_bit(FAIL, &status))
+       if (test_bit(FAIL, &status)) {
+               ret = -EINVAL;
                goto fail;
+       }
        dump_status_reg(&status);
 
        spi_message_init(&msg);
@@ -313,6 +315,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
        dump_status_reg(&status);
        if (!test_bit(DONE, &status)) {
                machxo2_cleanup(mgr);
+               ret = -EINVAL;
                goto fail;
        }
 
@@ -335,6 +338,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
                        break;
                if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) {
                        machxo2_cleanup(mgr);
+                       ret = -EINVAL;
                        goto fail;
                }
        } while (1);
index 10f303d..3d6ef37 100644 (file)
@@ -395,7 +395,7 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
                reg = ioread32(bank_reg(data, bank, reg_irq_status));
 
                for_each_set_bit(p, &reg, 32)
-                       generic_handle_domain_irq(gc->irq.domain, i * 32 + p);
+                       generic_handle_domain_irq(gc->irq.domain, i * 32 + p * 2);
        }
 
        chained_irq_exit(ic, desc);
index 036b2d9..3335bd5 100644 (file)
@@ -141,7 +141,7 @@ static int rockchip_gpio_get_direction(struct gpio_chip *chip,
        u32 data;
 
        data = rockchip_gpio_readl_bit(bank, offset, bank->gpio_regs->port_ddr);
-       if (data & BIT(offset))
+       if (data)
                return GPIO_LINE_DIRECTION_OUT;
 
        return GPIO_LINE_DIRECTION_IN;
@@ -195,7 +195,7 @@ static int rockchip_gpio_set_debounce(struct gpio_chip *gc,
        unsigned int cur_div_reg;
        u64 div;
 
-       if (!IS_ERR(bank->db_clk)) {
+       if (bank->gpio_type == GPIO_TYPE_V2 && !IS_ERR(bank->db_clk)) {
                div_debounce_support = true;
                freq = clk_get_rate(bank->db_clk);
                max_debounce = (GENMASK(23, 0) + 1) * 2 * 1000000 / freq;
index f99f3c1..39dca14 100644 (file)
@@ -184,7 +184,7 @@ static void uniphier_gpio_irq_mask(struct irq_data *data)
 
        uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, 0);
 
-       return irq_chip_mask_parent(data);
+       irq_chip_mask_parent(data);
 }
 
 static void uniphier_gpio_irq_unmask(struct irq_data *data)
@@ -194,7 +194,7 @@ static void uniphier_gpio_irq_unmask(struct irq_data *data)
 
        uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, mask);
 
-       return irq_chip_unmask_parent(data);
+       irq_chip_unmask_parent(data);
 }
 
 static int uniphier_gpio_irq_set_type(struct irq_data *data, unsigned int type)
index 411525a..47712b6 100644 (file)
@@ -313,9 +313,11 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
 
        ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
        if (ret)
-               gpiochip_free_own_desc(desc);
+               dev_warn(chip->parent,
+                        "Failed to set debounce-timeout for pin 0x%04X, err %d\n",
+                        pin, ret);
 
-       return ret ? ERR_PTR(ret) : desc;
+       return desc;
 }
 
 static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
index dc3c6b3..d356e32 100644 (file)
@@ -758,7 +758,7 @@ enum amd_hw_ip_block_type {
        MAX_HWIP
 };
 
-#define HWIP_MAX_INSTANCE      8
+#define HWIP_MAX_INSTANCE      10
 
 struct amd_powerplay {
        void *pp_handle;
index 3003ee1..1d41c2c 100644 (file)
@@ -192,6 +192,16 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
                kgd2kfd_suspend(adev->kfd.dev, run_pm);
 }
 
+int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev)
+{
+       int r = 0;
+
+       if (adev->kfd.dev)
+               r = kgd2kfd_resume_iommu(adev->kfd.dev);
+
+       return r;
+}
+
 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
 {
        int r = 0;
index ec028cf..3bc52b2 100644 (file)
@@ -137,6 +137,7 @@ int amdgpu_amdkfd_init(void);
 void amdgpu_amdkfd_fini(void);
 
 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
+int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
                        const void *ih_ring_entry);
@@ -327,6 +328,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
                         const struct kgd2kfd_shared_resources *gpu_resources);
 void kgd2kfd_device_exit(struct kfd_dev *kfd);
 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
+int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
 int kgd2kfd_pre_reset(struct kfd_dev *kfd);
 int kgd2kfd_post_reset(struct kfd_dev *kfd);
@@ -365,6 +367,11 @@ static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
 {
 }
 
+static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
+{
+       return 0;
+}
+
 static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
 {
        return 0;
index 2771288..463b9c0 100644 (file)
@@ -1544,20 +1544,18 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
        struct dentry *ent;
        int r, i;
 
-
-
        ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
                                  &fops_ib_preempt);
-       if (!ent) {
+       if (IS_ERR(ent)) {
                DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
-               return -EIO;
+               return PTR_ERR(ent);
        }
 
        ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
                                  &fops_sclk_set);
-       if (!ent) {
+       if (IS_ERR(ent)) {
                DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
-               return -EIO;
+               return PTR_ERR(ent);
        }
 
        /* Register debugfs entries for amdgpu_ttm */
index 41c6b3a..ab3794c 100644 (file)
@@ -2394,6 +2394,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
        if (r)
                goto init_failed;
 
+       r = amdgpu_amdkfd_resume_iommu(adev);
+       if (r)
+               goto init_failed;
+
        r = amdgpu_device_ip_hw_init_phase1(adev);
        if (r)
                goto init_failed;
@@ -3148,6 +3152,10 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
 {
        int r;
 
+       r = amdgpu_amdkfd_resume_iommu(adev);
+       if (r)
+               return r;
+
        r = amdgpu_device_ip_resume_phase1(adev);
        if (r)
                return r;
@@ -4601,6 +4609,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
                                dev_warn(tmp_adev->dev, "asic atom init failed!");
                        } else {
                                dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
+                               r = amdgpu_amdkfd_resume_iommu(tmp_adev);
+                               if (r)
+                                       goto out;
+
                                r = amdgpu_device_ip_resume_phase1(tmp_adev);
                                if (r)
                                        goto out;
index c7797ea..9ff600a 100644 (file)
@@ -598,7 +598,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
                break;
        default:
                adev->gmc.tmz_enabled = false;
-               dev_warn(adev->dev,
+               dev_info(adev->dev,
                         "Trusted Memory Zone (TMZ) feature not supported\n");
                break;
        }
index dc44c94..9873251 100644 (file)
@@ -757,7 +757,7 @@ Out:
        return res;
 }
 
-inline uint32_t amdgpu_ras_eeprom_max_record_count(void)
+uint32_t amdgpu_ras_eeprom_max_record_count(void)
 {
        return RAS_MAX_RECORD_COUNT;
 }
index f95fc61..6bb0057 100644 (file)
@@ -120,7 +120,7 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
 int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
                             struct eeprom_table_record *records, const u32 num);
 
-inline uint32_t amdgpu_ras_eeprom_max_record_count(void);
+uint32_t amdgpu_ras_eeprom_max_record_count(void);
 
 void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
 
index 7b634a1..0554576 100644 (file)
@@ -428,8 +428,8 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
        ent = debugfs_create_file(name,
                                  S_IFREG | S_IRUGO, root,
                                  ring, &amdgpu_debugfs_ring_fops);
-       if (!ent)
-               return -ENOMEM;
+       if (IS_ERR(ent))
+               return PTR_ERR(ent);
 
        i_size_write(ent->d_inode, ring->ring_size + 12);
        ring->ent = ent;
index 38dade4..94126dc 100644 (file)
@@ -515,6 +515,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
                goto out;
        }
 
+       if (bo->type == ttm_bo_type_device &&
+           new_mem->mem_type == TTM_PL_VRAM &&
+           old_mem->mem_type != TTM_PL_VRAM) {
+               /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
+                * accesses the BO after it's moved.
+                */
+               abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+       }
+
        if (adev->mman.buffer_funcs_enabled) {
                if (((old_mem->mem_type == TTM_PL_SYSTEM &&
                      new_mem->mem_type == TTM_PL_VRAM) ||
@@ -545,15 +554,6 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
                        return r;
        }
 
-       if (bo->type == ttm_bo_type_device &&
-           new_mem->mem_type == TTM_PL_VRAM &&
-           old_mem->mem_type != TTM_PL_VRAM) {
-               /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
-                * accesses the BO after it's moved.
-                */
-               abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
-       }
-
 out:
        /* update statistics */
        atomic64_add(bo->base.size, &adev->num_bytes_moved);
index 16a57b7..c2a4d92 100644 (file)
@@ -468,6 +468,7 @@ static const struct kfd_device_info navi10_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 145,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -487,6 +488,7 @@ static const struct kfd_device_info navi12_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 145,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -506,6 +508,7 @@ static const struct kfd_device_info navi14_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 145,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -525,6 +528,7 @@ static const struct kfd_device_info sienna_cichlid_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 4,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -544,6 +548,7 @@ static const struct kfd_device_info navy_flounder_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -562,7 +567,8 @@ static const struct kfd_device_info vangogh_device_info = {
        .mqd_size_aligned = MQD_SIZE_ALIGNED,
        .needs_iommu_device = false,
        .supports_cwsr = true,
-       .needs_pci_atomics = false,
+       .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 1,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 2,
@@ -582,6 +588,7 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 2,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -601,6 +608,7 @@ static const struct kfd_device_info beige_goby_device_info = {
        .needs_iommu_device = false,
        .supports_cwsr = true,
        .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 1,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 8,
@@ -619,7 +627,8 @@ static const struct kfd_device_info yellow_carp_device_info = {
        .mqd_size_aligned = MQD_SIZE_ALIGNED,
        .needs_iommu_device = false,
        .supports_cwsr = true,
-       .needs_pci_atomics = false,
+       .needs_pci_atomics = true,
+       .no_atomic_fw_version = 92,
        .num_sdma_engines = 1,
        .num_xgmi_sdma_engines = 0,
        .num_sdma_queues_per_engine = 2,
@@ -708,20 +717,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
        if (!kfd)
                return NULL;
 
-       /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
-        * 32 and 64-bit requests are possible and must be
-        * supported.
-        */
-       kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd);
-       if (device_info->needs_pci_atomics &&
-           !kfd->pci_atomic_requested) {
-               dev_info(kfd_device,
-                        "skipped device %x:%x, PCI rejects atomics\n",
-                        pdev->vendor, pdev->device);
-               kfree(kfd);
-               return NULL;
-       }
-
        kfd->kgd = kgd;
        kfd->device_info = device_info;
        kfd->pdev = pdev;
@@ -821,6 +816,23 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
        kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
                        - kfd->vm_info.first_vmid_kfd + 1;
 
+       /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
+        * 32 and 64-bit requests are possible and must be
+        * supported.
+        */
+       kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd);
+       if (!kfd->pci_atomic_requested &&
+           kfd->device_info->needs_pci_atomics &&
+           (!kfd->device_info->no_atomic_fw_version ||
+            kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) {
+               dev_info(kfd_device,
+                        "skipped device %x:%x, PCI rejects atomics %d<%d\n",
+                        kfd->pdev->vendor, kfd->pdev->device,
+                        kfd->mec_fw_version,
+                        kfd->device_info->no_atomic_fw_version);
+               return false;
+       }
+
        /* Verify module parameters regarding mapped process number*/
        if ((hws_max_conc_proc < 0)
                        || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
@@ -959,7 +971,6 @@ out:
 void kgd2kfd_device_exit(struct kfd_dev *kfd)
 {
        if (kfd->init_complete) {
-               svm_migrate_fini((struct amdgpu_device *)kfd->kgd);
                device_queue_manager_uninit(kfd->dqm);
                kfd_interrupt_exit(kfd);
                kfd_topology_remove_device(kfd);
@@ -1057,17 +1068,21 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
        return ret;
 }
 
-static int kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
 {
        int err = 0;
 
        err = kfd_iommu_resume(kfd);
-       if (err) {
+       if (err)
                dev_err(kfd_device,
                        "Failed to resume IOMMU for device %x:%x\n",
                        kfd->pdev->vendor, kfd->pdev->device);
-               return err;
-       }
+       return err;
+}
+
+static int kfd_resume(struct kfd_dev *kfd)
+{
+       int err = 0;
 
        err = kfd->dqm->ops.start(kfd->dqm);
        if (err) {
index dab290a..4a16e3c 100644 (file)
@@ -891,9 +891,16 @@ int svm_migrate_init(struct amdgpu_device *adev)
        pgmap->ops = &svm_migrate_pgmap_ops;
        pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
        pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
+
+       /* Device manager releases device-specific resources, memory region and
+        * pgmap when driver disconnects from device.
+        */
        r = devm_memremap_pages(adev->dev, pgmap);
        if (IS_ERR(r)) {
                pr_err("failed to register HMM device memory\n");
+
+               /* Disable SVM support capability */
+               pgmap->type = 0;
                devm_release_mem_region(adev->dev, res->start,
                                        res->end - res->start + 1);
                return PTR_ERR(r);
@@ -908,12 +915,3 @@ int svm_migrate_init(struct amdgpu_device *adev)
 
        return 0;
 }
-
-void svm_migrate_fini(struct amdgpu_device *adev)
-{
-       struct dev_pagemap *pgmap = &adev->kfd.dev->pgmap;
-
-       devm_memunmap_pages(adev->dev, pgmap);
-       devm_release_mem_region(adev->dev, pgmap->range.start,
-                               pgmap->range.end - pgmap->range.start + 1);
-}
index 0de76b5..2f5b339 100644 (file)
@@ -47,7 +47,6 @@ unsigned long
 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
 
 int svm_migrate_init(struct amdgpu_device *adev);
-void svm_migrate_fini(struct amdgpu_device *adev);
 
 #else
 
@@ -55,10 +54,6 @@ static inline int svm_migrate_init(struct amdgpu_device *adev)
 {
        return 0;
 }
-static inline void svm_migrate_fini(struct amdgpu_device *adev)
-{
-       /* empty */
-}
 
 #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
 
index ab83b0d..6d8f9bb 100644 (file)
@@ -207,6 +207,7 @@ struct kfd_device_info {
        bool supports_cwsr;
        bool needs_iommu_device;
        bool needs_pci_atomics;
+       uint32_t no_atomic_fw_version;
        unsigned int num_sdma_engines;
        unsigned int num_xgmi_sdma_engines;
        unsigned int num_sdma_queues_per_engine;
index 9fc8021..9d0f65a 100644 (file)
@@ -118,6 +118,13 @@ static void svm_range_remove_notifier(struct svm_range *prange)
                mmu_interval_notifier_remove(&prange->notifier);
 }
 
+static bool
+svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
+{
+       return dma_addr && !dma_mapping_error(dev, dma_addr) &&
+              !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
+}
+
 static int
 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
                      unsigned long offset, unsigned long npages,
@@ -139,8 +146,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
 
        addr += offset;
        for (i = 0; i < npages; i++) {
-               if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]),
-                             "leaking dma mapping\n"))
+               if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
                        dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
 
                page = hmm_pfn_to_page(hmm_pfns[i]);
@@ -209,7 +215,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
                return;
 
        for (i = offset; i < offset + npages; i++) {
-               if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
+               if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
                        continue;
                pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
                dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
@@ -1165,7 +1171,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        unsigned long last_start;
        int last_domain;
        int r = 0;
-       int64_t i;
+       int64_t i, j;
 
        last_start = prange->start + offset;
 
@@ -1178,7 +1184,11 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        for (i = offset; i < offset + npages; i++) {
                last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
                dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
-               if ((prange->start + i) < prange->last &&
+
+               /* Collect all pages in the same address range and memory domain
+                * that can be mapped with a single call to update mapping.
+                */
+               if (i < offset + npages - 1 &&
                    last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
                        continue;
 
@@ -1201,6 +1211,10 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                                                NULL, dma_addr,
                                                &vm->last_update,
                                                &table_freed);
+
+               for (j = last_start - prange->start; j <= i; j++)
+                       dma_addr[j] |= last_domain;
+
                if (r) {
                        pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
                        goto out;
index 9b1fc54..66c799f 100644 (file)
@@ -998,6 +998,8 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
        uint32_t agp_base, agp_bot, agp_top;
        PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
 
+       memset(pa_config, 0, sizeof(*pa_config));
+
        logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
        pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
 
@@ -6024,21 +6026,23 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
                return 0;
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-       work = kzalloc(sizeof(*work), GFP_ATOMIC);
-       if (!work)
-               return -ENOMEM;
+       if (dm->vblank_control_workqueue) {
+               work = kzalloc(sizeof(*work), GFP_ATOMIC);
+               if (!work)
+                       return -ENOMEM;
 
-       INIT_WORK(&work->work, vblank_control_worker);
-       work->dm = dm;
-       work->acrtc = acrtc;
-       work->enable = enable;
+               INIT_WORK(&work->work, vblank_control_worker);
+               work->dm = dm;
+               work->acrtc = acrtc;
+               work->enable = enable;
 
-       if (acrtc_state->stream) {
-               dc_stream_retain(acrtc_state->stream);
-               work->stream = acrtc_state->stream;
-       }
+               if (acrtc_state->stream) {
+                       dc_stream_retain(acrtc_state->stream);
+                       work->stream = acrtc_state->stream;
+               }
 
-       queue_work(dm->vblank_control_workqueue, &work->work);
+               queue_work(dm->vblank_control_workqueue, &work->work);
+       }
 #endif
 
        return 0;
@@ -6792,14 +6796,15 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
-                                           struct dc_state *dc_state)
+                                           struct dc_state *dc_state,
+                                           struct dsc_mst_fairness_vars *vars)
 {
        struct dc_stream_state *stream = NULL;
        struct drm_connector *connector;
        struct drm_connector_state *new_con_state;
        struct amdgpu_dm_connector *aconnector;
        struct dm_connector_state *dm_conn_state;
-       int i, j, clock, bpp;
+       int i, j, clock;
        int vcpi, pbn_div, pbn = 0;
 
        for_each_new_connector_in_state(state, connector, new_con_state, i) {
@@ -6838,9 +6843,15 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
                }
 
                pbn_div = dm_mst_get_pbn_divider(stream->link);
-               bpp = stream->timing.dsc_cfg.bits_per_pixel;
                clock = stream->timing.pix_clk_100hz / 10;
-               pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
+               /* pbn is calculated by compute_mst_dsc_configs_for_state*/
+               for (j = 0; j < dc_state->stream_count; j++) {
+                       if (vars[j].aconnector == aconnector) {
+                               pbn = vars[j].pbn;
+                               break;
+                       }
+               }
+
                vcpi = drm_dp_mst_atomic_enable_dsc(state,
                                                    aconnector->port,
                                                    pbn, pbn_div,
@@ -7519,6 +7530,32 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
        }
 }
 
+static void amdgpu_set_panel_orientation(struct drm_connector *connector)
+{
+       struct drm_encoder *encoder;
+       struct amdgpu_encoder *amdgpu_encoder;
+       const struct drm_display_mode *native_mode;
+
+       if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+           connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+               return;
+
+       encoder = amdgpu_dm_connector_to_encoder(connector);
+       if (!encoder)
+               return;
+
+       amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+       native_mode = &amdgpu_encoder->native_mode;
+       if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
+               return;
+
+       drm_connector_set_panel_orientation_with_quirk(connector,
+                                                      DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+                                                      native_mode->hdisplay,
+                                                      native_mode->vdisplay);
+}
+
 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
                                              struct edid *edid)
 {
@@ -7547,6 +7584,8 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
                 * restored here.
                 */
                amdgpu_dm_update_freesync_caps(connector, edid);
+
+               amdgpu_set_panel_orientation(connector);
        } else {
                amdgpu_dm_connector->num_modes = 0;
        }
@@ -8058,8 +8097,26 @@ static bool is_content_protection_different(struct drm_connector_state *state,
            state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
                state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 
-       /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
-        * hot-plug, headless s3, dpms
+       /* Stream removed and re-enabled
+        *
+        * Can sometimes overlap with the HPD case,
+        * thus set update_hdcp to false to avoid
+        * setting HDCP multiple times.
+        *
+        * Handles:     DESIRED -> DESIRED (Special case)
+        */
+       if (!(old_state->crtc && old_state->crtc->enabled) &&
+               state->crtc && state->crtc->enabled &&
+               connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+               dm_con_state->update_hdcp = false;
+               return true;
+       }
+
+       /* Hot-plug, headless s3, dpms
+        *
+        * Only start HDCP if the display is connected/enabled.
+        * update_hdcp flag will be set to false until the next
+        * HPD comes in.
         *
         * Handles:     DESIRED -> DESIRED (Special case)
         */
@@ -8648,7 +8705,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                 * If PSR or idle optimizations are enabled then flush out
                 * any pending work before hardware programming.
                 */
-               flush_workqueue(dm->vblank_control_workqueue);
+               if (dm->vblank_control_workqueue)
+                       flush_workqueue(dm->vblank_control_workqueue);
 #endif
 
                bundle->stream_update.stream = acrtc_state->stream;
@@ -8983,7 +9041,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                /* if there mode set or reset, disable eDP PSR */
                if (mode_set_reset_required) {
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-                       flush_workqueue(dm->vblank_control_workqueue);
+                       if (dm->vblank_control_workqueue)
+                               flush_workqueue(dm->vblank_control_workqueue);
 #endif
                        amdgpu_dm_psr_disable_all(dm);
                }
@@ -10243,6 +10302,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
        int ret, i;
        bool lock_and_validation_needed = false;
        struct dm_crtc_state *dm_old_crtc_state;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+       struct dsc_mst_fairness_vars vars[MAX_PIPES];
+#endif
 
        trace_amdgpu_dm_atomic_check_begin(state);
 
@@ -10473,10 +10535,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        goto fail;
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-               if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
+               if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
                        goto fail;
 
-               ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
+               ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
                if (ret)
                        goto fail;
 #endif
@@ -10492,7 +10554,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        goto fail;
                status = dc_validate_global_state(dc, dm_state->context, false);
                if (status != DC_OK) {
-                       DC_LOG_WARNING("DC global validation failure: %s (%d)",
+                       drm_dbg_atomic(dev,
+                                      "DC global validation failure: %s (%d)",
                                       dc_status_to_str(status), status);
                        ret = -EINVAL;
                        goto fail;
index 1bcba69..7af0d58 100644 (file)
@@ -518,12 +518,7 @@ struct dsc_mst_fairness_params {
        uint32_t num_slices_h;
        uint32_t num_slices_v;
        uint32_t bpp_overwrite;
-};
-
-struct dsc_mst_fairness_vars {
-       int pbn;
-       bool dsc_enabled;
-       int bpp_x16;
+       struct amdgpu_dm_connector *aconnector;
 };
 
 static int kbps_to_peak_pbn(int kbps)
@@ -750,12 +745,12 @@ static void try_disable_dsc(struct drm_atomic_state *state,
 
 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
                                             struct dc_state *dc_state,
-                                            struct dc_link *dc_link)
+                                            struct dc_link *dc_link,
+                                            struct dsc_mst_fairness_vars *vars)
 {
        int i;
        struct dc_stream_state *stream;
        struct dsc_mst_fairness_params params[MAX_PIPES];
-       struct dsc_mst_fairness_vars vars[MAX_PIPES];
        struct amdgpu_dm_connector *aconnector;
        int count = 0;
        bool debugfs_overwrite = false;
@@ -776,6 +771,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
                params[count].timing = &stream->timing;
                params[count].sink = stream->sink;
                aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+               params[count].aconnector = aconnector;
                params[count].port = aconnector->port;
                params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
                if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
@@ -798,6 +794,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
        }
        /* Try no compression */
        for (i = 0; i < count; i++) {
+               vars[i].aconnector = params[i].aconnector;
                vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
                vars[i].dsc_enabled = false;
                vars[i].bpp_x16 = 0;
@@ -851,7 +848,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
 }
 
 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
-                                      struct dc_state *dc_state)
+                                      struct dc_state *dc_state,
+                                      struct dsc_mst_fairness_vars *vars)
 {
        int i, j;
        struct dc_stream_state *stream;
@@ -882,7 +880,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
                        return false;
 
                mutex_lock(&aconnector->mst_mgr.lock);
-               if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
+               if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) {
                        mutex_unlock(&aconnector->mst_mgr.lock);
                        return false;
                }
index b38bd68..900d3f7 100644 (file)
@@ -39,8 +39,17 @@ void
 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
+
+struct dsc_mst_fairness_vars {
+       int pbn;
+       bool dsc_enabled;
+       int bpp_x16;
+       struct amdgpu_dm_connector *aconnector;
+};
+
 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
-                                      struct dc_state *dc_state);
+                                      struct dc_state *dc_state,
+                                      struct dsc_mst_fairness_vars *vars);
 #endif
 
 #endif
index c9f47d1..b1bf80d 100644 (file)
@@ -62,7 +62,7 @@ inline void dc_assert_fp_enabled(void)
        depth = *pcpu;
        put_cpu_ptr(&fpu_recursion_depth);
 
-       ASSERT(depth > 1);
+       ASSERT(depth >= 1);
 }
 
 /**
index 8bd7f42..1e44b13 100644 (file)
@@ -2586,13 +2586,21 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link)
 
 int dc_link_get_backlight_level(const struct dc_link *link)
 {
-
        struct abm *abm = get_abm_from_stream_res(link);
+       struct panel_cntl *panel_cntl = link->panel_cntl;
+       struct dc  *dc = link->ctx->dc;
+       struct dmcu *dmcu = dc->res_pool->dmcu;
+       bool fw_set_brightness = true;
 
-       if (abm == NULL || abm->funcs->get_current_backlight == NULL)
-               return DC_ERROR_UNEXPECTED;
+       if (dmcu)
+               fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
 
-       return (int) abm->funcs->get_current_backlight(abm);
+       if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight)
+               return panel_cntl->funcs->get_current_backlight(panel_cntl);
+       else if (abm != NULL && abm->funcs->get_current_backlight != NULL)
+               return (int) abm->funcs->get_current_backlight(abm);
+       else
+               return DC_ERROR_UNEXPECTED;
 }
 
 int dc_link_get_target_backlight_pwm(const struct dc_link *link)
index 330edd6..f6dbc5a 100644 (file)
@@ -1,4 +1,26 @@
-/* Copyright 2015 Advanced Micro Devices, Inc. */
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
 #include "dm_services.h"
 #include "dc.h"
 #include "dc_link_dp.h"
@@ -1840,9 +1862,13 @@ bool perform_link_training_with_retries(
                dp_disable_link_phy(link, signal);
 
                /* Abort link training if failure due to sink being unplugged. */
-               if (status == LINK_TRAINING_ABORT)
-                       break;
-               else if (do_fallback) {
+               if (status == LINK_TRAINING_ABORT) {
+                       enum dc_connection_type type = dc_connection_none;
+
+                       dc_link_detect_sink(link, &type);
+                       if (type == dc_connection_none)
+                               break;
+               } else if (do_fallback) {
                        decide_fallback_link_setting(*link_setting, &current_setting, status);
                        /* Fail link training if reduced link bandwidth no longer meets
                         * stream requirements.
index e14f99b..3c33473 100644 (file)
@@ -42,7 +42,7 @@
 #define DC_LOGGER \
        engine->ctx->logger
 
-#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+#define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0)
 #define IS_DC_I2CAUX_LOGGING_ENABLED() (false)
 #define LOG_FLAG_Error_I2cAux LOG_ERROR
 #define LOG_FLAG_I2cAux_DceAux LOG_I2C_AUX
@@ -76,7 +76,7 @@ enum {
 #define DEFAULT_AUX_ENGINE_MULT   0
 #define DEFAULT_AUX_ENGINE_LENGTH 69
 
-#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+#define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0)
 
 static void release_engine(
        struct dce_aux *engine)
index e923392..e857006 100644 (file)
@@ -49,7 +49,6 @@
 static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
 {
        uint64_t current_backlight;
-       uint32_t round_result;
        uint32_t bl_period, bl_int_count;
        uint32_t bl_pwm, fractional_duty_cycle_en;
        uint32_t bl_period_mask, bl_pwm_mask;
@@ -84,15 +83,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_c
        current_backlight = div_u64(current_backlight, bl_period);
        current_backlight = (current_backlight + 1) >> 1;
 
-       current_backlight = (uint64_t)(current_backlight) * bl_period;
-
-       round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
-
-       round_result = (round_result >> (bl_int_count-1)) & 1;
-
-       current_backlight >>= bl_int_count;
-       current_backlight += round_result;
-
        return (uint32_t)(current_backlight);
 }
 
index 8a08ecc..4884a4e 100644 (file)
 #define TABLE_PMSTATUSLOG        3 // Called by Tools for Agm logging
 #define TABLE_DPMCLOCKS          4 // Called by Driver; defined here, but not used, for backward compatible
 #define TABLE_MOMENTARY_PM       5 // Called by Tools; defined here, but not used, for backward compatible
-#define TABLE_COUNT              6
+#define TABLE_SMU_METRICS        6 // Called by Driver
+#define TABLE_COUNT              7
 
-#define NUM_DSPCLK_LEVELS              8
-#define NUM_SOCCLK_DPM_LEVELS  8
-#define NUM_DCEFCLK_DPM_LEVELS 4
-#define NUM_FCLK_DPM_LEVELS            4
-#define NUM_MEMCLK_DPM_LEVELS  4
+typedef struct SmuMetricsTable_t {
+       //CPU status
+       uint16_t CoreFrequency[6];              //[MHz]
+       uint32_t CorePower[6];                  //[mW]
+       uint16_t CoreTemperature[6];            //[centi-Celsius]
+       uint16_t L3Frequency[2];                //[MHz]
+       uint16_t L3Temperature[2];              //[centi-Celsius]
+       uint16_t C0Residency[6];                //Percentage
 
-#define NUMBER_OF_PSTATES              8
-#define NUMBER_OF_CORES                        8
+       // GFX status
+       uint16_t GfxclkFrequency;               //[MHz]
+       uint16_t GfxTemperature;                //[centi-Celsius]
 
-typedef enum {
-       S3_TYPE_ENTRY,
-       S5_TYPE_ENTRY,
-} Sleep_Type_e;
+       // SOC IP info
+       uint16_t SocclkFrequency;               //[MHz]
+       uint16_t VclkFrequency;                 //[MHz]
+       uint16_t DclkFrequency;                 //[MHz]
+       uint16_t MemclkFrequency;               //[MHz]
 
-typedef enum {
-       GFX_OFF = 0,
-       GFX_ON  = 1,
-} GFX_Mode_e;
+       // power, VF info for CPU/GFX telemetry rails, and then socket power total
+       uint32_t Voltage[2];                    //[mV] indices: VDDCR_VDD, VDDCR_GFX
+       uint32_t Current[2];                    //[mA] indices: VDDCR_VDD, VDDCR_GFX
+       uint32_t Power[2];                      //[mW] indices: VDDCR_VDD, VDDCR_GFX
+       uint32_t CurrentSocketPower;            //[mW]
 
-typedef enum {
-       CPU_P0 = 0,
-       CPU_P1,
-       CPU_P2,
-       CPU_P3,
-       CPU_P4,
-       CPU_P5,
-       CPU_P6,
-       CPU_P7
-} CPU_PState_e;
+       uint16_t SocTemperature;                //[centi-Celsius]
+       uint16_t EdgeTemperature;
+       uint16_t ThrottlerStatus;
+       uint16_t Spare;
 
-typedef enum {
-       CPU_CORE0 = 0,
-       CPU_CORE1,
-       CPU_CORE2,
-       CPU_CORE3,
-       CPU_CORE4,
-       CPU_CORE5,
-       CPU_CORE6,
-       CPU_CORE7
-} CORE_ID_e;
+} SmuMetricsTable_t;
 
-typedef enum {
-       DF_DPM0 = 0,
-       DF_DPM1,
-       DF_DPM2,
-       DF_DPM3,
-       DF_PState_Count
-} DF_PState_e;
-
-typedef enum {
-       GFX_DPM0 = 0,
-       GFX_DPM1,
-       GFX_DPM2,
-       GFX_DPM3,
-       GFX_PState_Count
-} GFX_PState_e;
+typedef struct SmuMetrics_t {
+       SmuMetricsTable_t Current;
+       SmuMetricsTable_t Average;
+       uint32_t SampleStartTime;
+       uint32_t SampleStopTime;
+       uint32_t Accnt;
+} SmuMetrics_t;
 
 #endif
index 6f1b1b5..18b862a 100644 (file)
        __SMU_DUMMY_MAP(SetUclkDpmMode),                \
        __SMU_DUMMY_MAP(LightSBR),                      \
        __SMU_DUMMY_MAP(GfxDriverResetRecovery),        \
-       __SMU_DUMMY_MAP(BoardPowerCalibration),
+       __SMU_DUMMY_MAP(BoardPowerCalibration),   \
+       __SMU_DUMMY_MAP(RequestGfxclk),           \
+       __SMU_DUMMY_MAP(ForceGfxVid),             \
+       __SMU_DUMMY_MAP(UnforceGfxVid),
 
 #undef __SMU_DUMMY_MAP
 #define __SMU_DUMMY_MAP(type)  SMU_MSG_##type
index 6e60887..909a86a 100644 (file)
 #define PPSMC_MSG_SetDriverTableVMID                    0x34
 #define PPSMC_MSG_SetSoftMinCclk                        0x35
 #define PPSMC_MSG_SetSoftMaxCclk                        0x36
-#define PPSMC_Message_Count                             0x37
+#define PPSMC_MSG_GetGfxFrequency                       0x37
+#define PPSMC_MSG_GetGfxVid                             0x38
+#define PPSMC_MSG_ForceGfxFreq                          0x39
+#define PPSMC_MSG_UnForceGfxFreq                        0x3A
+#define PPSMC_MSG_ForceGfxVid                           0x3B
+#define PPSMC_MSG_UnforceGfxVid                         0x3C
+#define PPSMC_MSG_GetEnabledSmuFeatures                 0x3D
+#define PPSMC_Message_Count                             0x3E
 
 #endif
index bdbbeb9..81f82aa 100644 (file)
@@ -6867,6 +6867,8 @@ static int si_dpm_enable(struct amdgpu_device *adev)
        si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
        si_thermal_start_thermal_controller(adev);
 
+       ni_update_current_ps(adev, boot_ps);
+
        return 0;
 }
 
index 3ab1ce4..04863a7 100644 (file)
@@ -1404,7 +1404,7 @@ static int smu_disable_dpms(struct smu_context *smu)
         */
        if (smu->uploading_custom_pp_table &&
            (adev->asic_type >= CHIP_NAVI10) &&
-           (adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
+           (adev->asic_type <= CHIP_BEIGE_GOBY))
                return smu_disable_all_features_with_exception(smu,
                                                               true,
                                                               SMU_FEATURE_COUNT);
index e343cc2..082f018 100644 (file)
@@ -771,8 +771,12 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
        struct smu_11_0_dpm_context *dpm_context = NULL;
        uint32_t gen_speed, lane_width;
 
-       if (amdgpu_ras_intr_triggered())
-               return sysfs_emit(buf, "unavailable\n");
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
+       if (amdgpu_ras_intr_triggered()) {
+               size += sysfs_emit_at(buf, size, "unavailable\n");
+               return size;
+       }
 
        dpm_context = smu_dpm->dpm_context;
 
index b05f954..3d4c65b 100644 (file)
 #undef pr_info
 #undef pr_debug
 
+/* unit: MHz */
+#define CYAN_SKILLFISH_SCLK_MIN                        1000
+#define CYAN_SKILLFISH_SCLK_MAX                        2000
+#define CYAN_SKILLFISH_SCLK_DEFAULT                    1800
+
+/* unit: mV */
+#define CYAN_SKILLFISH_VDDC_MIN                        700
+#define CYAN_SKILLFISH_VDDC_MAX                        1129
+#define CYAN_SKILLFISH_VDDC_MAGIC                      5118 // 0x13fe
+
+static struct gfx_user_settings {
+       uint32_t sclk;
+       uint32_t vddc;
+} cyan_skillfish_user_settings;
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE ( \
+       FEATURE_MASK(FEATURE_FCLK_DPM_BIT)      |       \
+       FEATURE_MASK(FEATURE_SOC_DPM_BIT)       |       \
+       FEATURE_MASK(FEATURE_GFX_DPM_BIT))
+
 static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = {
        MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,                  0),
        MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,                0),
@@ -52,14 +73,473 @@ static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT]
        MSG_MAP(SetDriverDramAddrLow,           PPSMC_MSG_SetDriverTableDramAddrLow,    0),
        MSG_MAP(TransferTableSmu2Dram,          PPSMC_MSG_TransferTableSmu2Dram,        0),
        MSG_MAP(TransferTableDram2Smu,          PPSMC_MSG_TransferTableDram2Smu,        0),
+       MSG_MAP(GetEnabledSmuFeatures,          PPSMC_MSG_GetEnabledSmuFeatures,        0),
+       MSG_MAP(RequestGfxclk,                  PPSMC_MSG_RequestGfxclk,                0),
+       MSG_MAP(ForceGfxVid,                    PPSMC_MSG_ForceGfxVid,                  0),
+       MSG_MAP(UnforceGfxVid,                  PPSMC_MSG_UnforceGfxVid,                0),
+};
+
+static struct cmn2asic_mapping cyan_skillfish_table_map[SMU_TABLE_COUNT] = {
+       TAB_MAP_VALID(SMU_METRICS),
 };
 
+static int cyan_skillfish_tables_init(struct smu_context *smu)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct smu_table *tables = smu_table->tables;
+
+       SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
+                               sizeof(SmuMetrics_t),
+                               PAGE_SIZE,
+                               AMDGPU_GEM_DOMAIN_VRAM);
+
+       smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+       if (!smu_table->metrics_table)
+               goto err0_out;
+
+       smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
+       smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+       if (!smu_table->gpu_metrics_table)
+               goto err1_out;
+
+       smu_table->metrics_time = 0;
+
+       return 0;
+
+err1_out:
+       smu_table->gpu_metrics_table_size = 0;
+       kfree(smu_table->metrics_table);
+err0_out:
+       return -ENOMEM;
+}
+
+static int cyan_skillfish_init_smc_tables(struct smu_context *smu)
+{
+       int ret = 0;
+
+       ret = cyan_skillfish_tables_init(smu);
+       if (ret)
+               return ret;
+
+       return smu_v11_0_init_smc_tables(smu);
+}
+
+static int cyan_skillfish_finit_smc_tables(struct smu_context *smu)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+
+       kfree(smu_table->metrics_table);
+       smu_table->metrics_table = NULL;
+
+       kfree(smu_table->gpu_metrics_table);
+       smu_table->gpu_metrics_table = NULL;
+       smu_table->gpu_metrics_table_size = 0;
+
+       smu_table->metrics_time = 0;
+
+       return 0;
+}
+
+static int
+cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
+                                       MetricsMember_t member,
+                                       uint32_t *value)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+       int ret = 0;
+
+       mutex_lock(&smu->metrics_lock);
+
+       ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
+       if (ret) {
+               mutex_unlock(&smu->metrics_lock);
+               return ret;
+       }
+
+       switch (member) {
+       case METRICS_CURR_GFXCLK:
+               *value = metrics->Current.GfxclkFrequency;
+               break;
+       case METRICS_CURR_SOCCLK:
+               *value = metrics->Current.SocclkFrequency;
+               break;
+       case METRICS_CURR_VCLK:
+               *value = metrics->Current.VclkFrequency;
+               break;
+       case METRICS_CURR_DCLK:
+               *value = metrics->Current.DclkFrequency;
+               break;
+       case METRICS_CURR_UCLK:
+               *value = metrics->Current.MemclkFrequency;
+               break;
+       case METRICS_AVERAGE_SOCKETPOWER:
+               *value = (metrics->Current.CurrentSocketPower << 8) /
+                               1000;
+               break;
+       case METRICS_TEMPERATURE_EDGE:
+               *value = metrics->Current.GfxTemperature / 100 *
+                               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+               break;
+       case METRICS_TEMPERATURE_HOTSPOT:
+               *value = metrics->Current.SocTemperature / 100 *
+                               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+               break;
+       case METRICS_VOLTAGE_VDDSOC:
+               *value = metrics->Current.Voltage[0];
+               break;
+       case METRICS_VOLTAGE_VDDGFX:
+               *value = metrics->Current.Voltage[1];
+               break;
+       case METRICS_THROTTLER_STATUS:
+               *value = metrics->Current.ThrottlerStatus;
+               break;
+       default:
+               *value = UINT_MAX;
+               break;
+       }
+
+       mutex_unlock(&smu->metrics_lock);
+
+       return ret;
+}
+
+static int cyan_skillfish_read_sensor(struct smu_context *smu,
+                                       enum amd_pp_sensors sensor,
+                                       void *data,
+                                       uint32_t *size)
+{
+       int ret = 0;
+
+       if (!data || !size)
+               return -EINVAL;
+
+       mutex_lock(&smu->sensor_lock);
+
+       switch (sensor) {
+       case AMDGPU_PP_SENSOR_GFX_SCLK:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_CURR_GFXCLK,
+                                                  (uint32_t *)data);
+               *(uint32_t *)data *= 100;
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_GFX_MCLK:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_CURR_UCLK,
+                                                  (uint32_t *)data);
+               *(uint32_t *)data *= 100;
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_GPU_POWER:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_AVERAGE_SOCKETPOWER,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_TEMPERATURE_HOTSPOT,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_EDGE_TEMP:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_TEMPERATURE_EDGE,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_VDDNB:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_VOLTAGE_VDDSOC,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       case AMDGPU_PP_SENSOR_VDDGFX:
+               ret = cyan_skillfish_get_smu_metrics_data(smu,
+                                                  METRICS_VOLTAGE_VDDGFX,
+                                                  (uint32_t *)data);
+               *size = 4;
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       mutex_unlock(&smu->sensor_lock);
+
+       return ret;
+}
+
+static int cyan_skillfish_get_current_clk_freq(struct smu_context *smu,
+                                               enum smu_clk_type clk_type,
+                                               uint32_t *value)
+{
+       MetricsMember_t member_type;
+
+       switch (clk_type) {
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+               member_type = METRICS_CURR_GFXCLK;
+               break;
+       case SMU_FCLK:
+       case SMU_MCLK:
+               member_type = METRICS_CURR_UCLK;
+               break;
+       case SMU_SOCCLK:
+               member_type = METRICS_CURR_SOCCLK;
+               break;
+       case SMU_VCLK:
+               member_type = METRICS_CURR_VCLK;
+               break;
+       case SMU_DCLK:
+               member_type = METRICS_CURR_DCLK;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return cyan_skillfish_get_smu_metrics_data(smu, member_type, value);
+}
+
+static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
+                                       enum smu_clk_type clk_type,
+                                       char *buf)
+{
+       int ret = 0, size = 0;
+       uint32_t cur_value = 0;
+
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
+       switch (clk_type) {
+       case SMU_OD_SCLK:
+               ret  = cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, &cur_value);
+               if (ret)
+                       return ret;
+               size += sysfs_emit_at(buf, size,"%s:\n", "OD_SCLK");
+               size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
+               break;
+       case SMU_OD_VDDC_CURVE:
+               ret  = cyan_skillfish_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDGFX, &cur_value);
+               if (ret)
+                       return ret;
+               size += sysfs_emit_at(buf, size,"%s:\n", "OD_VDDC");
+               size += sysfs_emit_at(buf, size, "0: %umV *\n", cur_value);
+               break;
+       case SMU_OD_RANGE:
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+               size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+                                               CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+               size += sysfs_emit_at(buf, size, "VDDC: %7umV  %10umV\n",
+                                               CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+               break;
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+       case SMU_FCLK:
+       case SMU_MCLK:
+       case SMU_SOCCLK:
+       case SMU_VCLK:
+       case SMU_DCLK:
+               ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value);
+               if (ret)
+                       return ret;
+               size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
+               break;
+       default:
+               dev_warn(smu->adev->dev, "Unsupported clock type\n");
+               return ret;
+       }
+
+       return size;
+}
+
+static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+       int ret = 0;
+       uint32_t feature_mask[2];
+       uint64_t feature_enabled;
+
+       /* we need to re-init after suspend so return false */
+       if (adev->in_suspend)
+               return false;
+
+       ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+
+       if (ret)
+               return false;
+
+       feature_enabled = (uint64_t)feature_mask[0] |
+                               ((uint64_t)feature_mask[1] << 32);
+
+       return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static ssize_t cyan_skillfish_get_gpu_metrics(struct smu_context *smu,
+                                               void **table)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct gpu_metrics_v2_2 *gpu_metrics =
+               (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
+       SmuMetrics_t metrics;
+       int i, ret = 0;
+
+       ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+       if (ret)
+               return ret;
+
+       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
+
+       gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
+       gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
+
+       gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
+       gpu_metrics->average_soc_power = metrics.Current.Power[0];
+       gpu_metrics->average_gfx_power = metrics.Current.Power[1];
+
+       gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
+       gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
+       gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
+       gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
+       gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
+       gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
+
+       gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
+       gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
+       gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
+       gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
+       gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
+       gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
+
+       for (i = 0; i < 6; i++) {
+               gpu_metrics->temperature_core[i] = metrics.Current.CoreTemperature[i];
+               gpu_metrics->average_core_power[i] = metrics.Average.CorePower[i];
+               gpu_metrics->current_coreclk[i] = metrics.Current.CoreFrequency[i];
+       }
+
+       for (i = 0; i < 2; i++) {
+               gpu_metrics->temperature_l3[i] = metrics.Current.L3Temperature[i];
+               gpu_metrics->current_l3clk[i] = metrics.Current.L3Frequency[i];
+       }
+
+       gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
+       gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+       *table = (void *)gpu_metrics;
+
+       return sizeof(struct gpu_metrics_v2_2);
+}
+
+static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu,
+                                       enum PP_OD_DPM_TABLE_COMMAND type,
+                                       long input[], uint32_t size)
+{
+       int ret = 0;
+       uint32_t vid;
+
+       switch (type) {
+       case PP_OD_EDIT_VDDC_CURVE:
+               if (size != 3 || input[0] != 0) {
+                       dev_err(smu->adev->dev, "Invalid parameter!\n");
+                       return -EINVAL;
+               }
+
+               if (input[1] <= CYAN_SKILLFISH_SCLK_MIN ||
+                       input[1] > CYAN_SKILLFISH_SCLK_MAX) {
+                       dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
+                                       CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+                       return -EINVAL;
+               }
+
+               if (input[2] <= CYAN_SKILLFISH_VDDC_MIN ||
+                       input[2] > CYAN_SKILLFISH_VDDC_MAX) {
+                       dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
+                                       CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+                       return -EINVAL;
+               }
+
+               cyan_skillfish_user_settings.sclk = input[1];
+               cyan_skillfish_user_settings.vddc = input[2];
+
+               break;
+       case PP_OD_RESTORE_DEFAULT_TABLE:
+               if (size != 0) {
+                       dev_err(smu->adev->dev, "Invalid parameter!\n");
+                       return -EINVAL;
+               }
+
+               cyan_skillfish_user_settings.sclk = CYAN_SKILLFISH_SCLK_DEFAULT;
+               cyan_skillfish_user_settings.vddc = CYAN_SKILLFISH_VDDC_MAGIC;
+
+               break;
+       case PP_OD_COMMIT_DPM_TABLE:
+               if (size != 0) {
+                       dev_err(smu->adev->dev, "Invalid parameter!\n");
+                       return -EINVAL;
+               }
+
+               if (cyan_skillfish_user_settings.sclk < CYAN_SKILLFISH_SCLK_MIN ||
+                   cyan_skillfish_user_settings.sclk > CYAN_SKILLFISH_SCLK_MAX) {
+                       dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
+                                       CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
+                       return -EINVAL;
+               }
+
+               if ((cyan_skillfish_user_settings.vddc != CYAN_SKILLFISH_VDDC_MAGIC) &&
+                       (cyan_skillfish_user_settings.vddc < CYAN_SKILLFISH_VDDC_MIN ||
+                       cyan_skillfish_user_settings.vddc > CYAN_SKILLFISH_VDDC_MAX)) {
+                       dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
+                                       CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
+                       return -EINVAL;
+               }
+
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestGfxclk,
+                                       cyan_skillfish_user_settings.sclk, NULL);
+               if (ret) {
+                       dev_err(smu->adev->dev, "Set sclk failed!\n");
+                       return ret;
+               }
+
+               if (cyan_skillfish_user_settings.vddc == CYAN_SKILLFISH_VDDC_MAGIC) {
+                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_UnforceGfxVid, NULL);
+                       if (ret) {
+                               dev_err(smu->adev->dev, "Unforce vddc failed!\n");
+                               return ret;
+                       }
+               } else {
+                       /*
+                        * PMFW accepts SVI2 VID code, convert voltage to VID:
+                        * vid = (uint32_t)((1.55 - voltage) * 160.0 + 0.00001)
+                        */
+                       vid = (1550 - cyan_skillfish_user_settings.vddc) * 160 / 1000;
+                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ForceGfxVid, vid, NULL);
+                       if (ret) {
+                               dev_err(smu->adev->dev, "Force vddc failed!\n");
+                               return ret;
+                       }
+               }
+
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return ret;
+}
+
 static const struct pptable_funcs cyan_skillfish_ppt_funcs = {
 
        .check_fw_status = smu_v11_0_check_fw_status,
        .check_fw_version = smu_v11_0_check_fw_version,
        .init_power = smu_v11_0_init_power,
        .fini_power = smu_v11_0_fini_power,
+       .init_smc_tables = cyan_skillfish_init_smc_tables,
+       .fini_smc_tables = cyan_skillfish_finit_smc_tables,
+       .read_sensor = cyan_skillfish_read_sensor,
+       .print_clk_levels = cyan_skillfish_print_clk_levels,
+       .is_dpm_running = cyan_skillfish_is_dpm_running,
+       .get_gpu_metrics = cyan_skillfish_get_gpu_metrics,
+       .od_edit_dpm_table = cyan_skillfish_od_edit_dpm_table,
        .register_irq_handler = smu_v11_0_register_irq_handler,
        .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
        .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
@@ -72,5 +552,6 @@ void cyan_skillfish_set_ppt_funcs(struct smu_context *smu)
 {
        smu->ppt_funcs = &cyan_skillfish_ppt_funcs;
        smu->message_map = cyan_skillfish_message_map;
+       smu->table_map = cyan_skillfish_table_map;
        smu->is_apu = true;
 }
index a5fc5d7..b1ad451 100644 (file)
@@ -1279,6 +1279,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
        struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
        uint32_t min_value, max_value;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_GFXCLK:
        case SMU_SCLK:
@@ -1392,7 +1394,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
        case SMU_OD_RANGE:
                if (!smu->od_enabled || !od_table || !od_settings)
                        break;
-               size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
 
                if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
                        navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
@@ -2272,7 +2274,27 @@ static int navi10_baco_enter(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       if (adev->in_runpm)
+       /*
+        * This aims the case below:
+        *   amdgpu driver loaded -> runpm suspend kicked -> sound driver loaded
+        *
+        * For NAVI10 and later ASICs, we rely on PMFW to handle the runpm. To
+        * make that possible, PMFW needs to acknowledge the dstate transition
+        * process for both gfx(function 0) and audio(function 1) function of
+        * the ASIC.
+        *
+        * The PCI device's initial runpm status is RUNPM_SUSPENDED. So as the
+        * device representing the audio function of the ASIC. And that means
+        * even if the sound driver(snd_hda_intel) was not loaded yet, it's still
+        * possible runpm suspend kicked on the ASIC. However without the dstate
+        * transition notification from audio function, pmfw cannot handle the
+        * BACO in/exit correctly. And that will cause driver hang on runpm
+        * resuming.
+        *
+        * To address this, we revert to legacy message way(driver masters the
+        * timing for BACO in/exit) on sound driver missing.
+        */
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
                return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
        else
                return smu_v11_0_baco_enter(smu);
@@ -2282,7 +2304,7 @@ static int navi10_baco_exit(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       if (adev->in_runpm) {
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
                /* Wait for PMFW handling for the Dstate change */
                msleep(10);
                return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
index 5e292c3..ca57221 100644 (file)
@@ -1058,6 +1058,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
        uint32_t min_value, max_value;
        uint32_t smu_version;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_GFXCLK:
        case SMU_SCLK:
@@ -1180,7 +1182,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
                if (!smu->od_enabled || !od_table || !od_settings)
                        break;
 
-               size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
 
                if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
                        sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN,
@@ -2187,7 +2189,7 @@ static int sienna_cichlid_baco_enter(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       if (adev->in_runpm)
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
                return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
        else
                return smu_v11_0_baco_enter(smu);
@@ -2197,7 +2199,7 @@ static int sienna_cichlid_baco_exit(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
 
-       if (adev->in_runpm) {
+       if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
                /* Wait for PMFW handling for the Dstate change */
                msleep(10);
                return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
index 3a34214..f6ef0ce 100644 (file)
@@ -589,10 +589,12 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
        if (ret)
                return ret;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_OD_SCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
                        size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
                        size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -601,7 +603,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
                break;
        case SMU_OD_CCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
+                       size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
                        size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
                        size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -610,7 +612,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
                break;
        case SMU_OD_RANGE:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
                        size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
                        size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
@@ -688,10 +690,12 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
        if (ret)
                return ret;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_OD_SCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
                        size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
                        size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -700,7 +704,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
                break;
        case SMU_OD_CCLK:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
+                       size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
                        size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                        (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
                        size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@@ -709,7 +713,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
                break;
        case SMU_OD_RANGE:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
-                       size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+                       size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
                        size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
                        size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
index 5aa175e..145f13b 100644 (file)
@@ -497,6 +497,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
        if (ret)
                return ret;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_OD_RANGE:
                if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
index ab65202..5019903 100644 (file)
@@ -733,15 +733,19 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
        uint32_t freq_values[3] = {0};
        uint32_t min_clk, max_clk;
 
-       if (amdgpu_ras_intr_triggered())
-               return sysfs_emit(buf, "unavailable\n");
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
+       if (amdgpu_ras_intr_triggered()) {
+               size += sysfs_emit_at(buf, size, "unavailable\n");
+               return size;
+       }
 
        dpm_context = smu_dpm->dpm_context;
 
        switch (type) {
 
        case SMU_OD_SCLK:
-               size = sysfs_emit(buf, "%s:\n", "GFXCLK");
+               size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
                fallthrough;
        case SMU_SCLK:
                ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
@@ -795,7 +799,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
                break;
 
        case SMU_OD_MCLK:
-               size = sysfs_emit(buf, "%s:\n", "MCLK");
+               size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
                fallthrough;
        case SMU_MCLK:
                ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);
index 627ba2e..a403657 100644 (file)
@@ -1052,16 +1052,18 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
        int i, size = 0, ret = 0;
        uint32_t cur_value = 0, value = 0, count = 0;
 
+       smu_cmn_get_sysfs_buf(&buf, &size);
+
        switch (clk_type) {
        case SMU_OD_SCLK:
-               size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
                size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
                (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
                size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
                (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
                break;
        case SMU_OD_RANGE:
-               size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
+               size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
                size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
                                                smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
                break;
index 66711ab..843d2cb 100644 (file)
@@ -1053,3 +1053,24 @@ int smu_cmn_set_mp1_state(struct smu_context *smu,
 
        return ret;
 }
+
+bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
+{
+       struct pci_dev *p = NULL;
+       bool snd_driver_loaded;
+
+       /*
+        * If the ASIC comes with no audio function, we always assume
+        * it is "enabled".
+        */
+       p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+                       adev->pdev->bus->number, 1);
+       if (!p)
+               return true;
+
+       snd_driver_loaded = pci_is_enabled(p) ? true : false;
+
+       pci_dev_put(p);
+
+       return snd_driver_loaded;
+}
index 16993da..beea038 100644 (file)
@@ -110,5 +110,20 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
 int smu_cmn_set_mp1_state(struct smu_context *smu,
                          enum pp_mp1_state mp1_state);
 
+/*
+ * Helper function to make sysfs_emit_at() happy. Align buf to
+ * the current page boundary and record the offset.
+ */
+static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset)
+{
+       if (!*buf || !offset)
+               return;
+
+       *offset = offset_in_page(*buf);
+       *buf -= *offset;
+}
+
+bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
+
 #endif
 #endif
index 76d3856..cf741c5 100644 (file)
@@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
                if (switch_mmu_context) {
                        struct etnaviv_iommu_context *old_context = gpu->mmu_context;
 
-                       etnaviv_iommu_context_get(mmu_context);
-                       gpu->mmu_context = mmu_context;
+                       gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
                        etnaviv_iommu_context_put(old_context);
                }
 
index 8f1b5af..f0b2540 100644 (file)
@@ -294,8 +294,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
                list_del(&mapping->obj_node);
        }
 
-       etnaviv_iommu_context_get(mmu_context);
-       mapping->context = mmu_context;
+       mapping->context = etnaviv_iommu_context_get(mmu_context);
        mapping->use = 1;
 
        ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
index 4dd7d9d..486259e 100644 (file)
@@ -532,8 +532,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
                goto err_submit_objects;
 
        submit->ctx = file->driver_priv;
-       etnaviv_iommu_context_get(submit->ctx->mmu);
-       submit->mmu_context = submit->ctx->mmu;
+       submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
        submit->exec_state = args->exec_state;
        submit->flags = args->flags;
 
index c297fff..cc5b07f 100644 (file)
@@ -569,6 +569,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
        /* We rely on the GPU running, so program the clock */
        etnaviv_gpu_update_clock(gpu);
 
+       gpu->fe_running = false;
+       gpu->exec_state = -1;
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = NULL;
+
        return 0;
 }
 
@@ -637,19 +643,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
                          VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
                          VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
        }
+
+       gpu->fe_running = true;
 }
 
-static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
+static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
+                                         struct etnaviv_iommu_context *context)
 {
-       u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
-                               &gpu->mmu_context->cmdbuf_mapping);
        u16 prefetch;
+       u32 address;
 
        /* setup the MMU */
-       etnaviv_iommu_restore(gpu, gpu->mmu_context);
+       etnaviv_iommu_restore(gpu, context);
 
        /* Start command processor */
        prefetch = etnaviv_buffer_init(gpu);
+       address = etnaviv_cmdbuf_get_va(&gpu->buffer,
+                                       &gpu->mmu_context->cmdbuf_mapping);
 
        etnaviv_gpu_start_fe(gpu, address, prefetch);
 }
@@ -832,7 +842,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
        /* Now program the hardware */
        mutex_lock(&gpu->lock);
        etnaviv_gpu_hw_init(gpu);
-       gpu->exec_state = -1;
        mutex_unlock(&gpu->lock);
 
        pm_runtime_mark_last_busy(gpu->dev);
@@ -1057,8 +1066,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
        spin_unlock(&gpu->event_spinlock);
 
        etnaviv_gpu_hw_init(gpu);
-       gpu->exec_state = -1;
-       gpu->mmu_context = NULL;
 
        mutex_unlock(&gpu->lock);
        pm_runtime_mark_last_busy(gpu->dev);
@@ -1370,14 +1377,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
                goto out_unlock;
        }
 
-       if (!gpu->mmu_context) {
-               etnaviv_iommu_context_get(submit->mmu_context);
-               gpu->mmu_context = submit->mmu_context;
-               etnaviv_gpu_start_fe_idleloop(gpu);
-       } else {
-               etnaviv_iommu_context_get(gpu->mmu_context);
-               submit->prev_mmu_context = gpu->mmu_context;
-       }
+       if (!gpu->fe_running)
+               etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
+
+       if (submit->prev_mmu_context)
+               etnaviv_iommu_context_put(submit->prev_mmu_context);
+       submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
 
        if (submit->nr_pmrs) {
                gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
@@ -1579,7 +1584,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
 
 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
 {
-       if (gpu->initialized && gpu->mmu_context) {
+       if (gpu->initialized && gpu->fe_running) {
                /* Replace the last WAIT with END */
                mutex_lock(&gpu->lock);
                etnaviv_buffer_end(gpu);
@@ -1592,8 +1597,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
                 */
                etnaviv_gpu_wait_idle(gpu, 100);
 
-               etnaviv_iommu_context_put(gpu->mmu_context);
-               gpu->mmu_context = NULL;
+               gpu->fe_running = false;
        }
 
        gpu->exec_state = -1;
@@ -1741,6 +1745,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
        etnaviv_gpu_hw_suspend(gpu);
 #endif
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+
        if (gpu->initialized) {
                etnaviv_cmdbuf_free(&gpu->buffer);
                etnaviv_iommu_global_fini(gpu);
index 8ea4869..1c75c8e 100644 (file)
@@ -101,6 +101,7 @@ struct etnaviv_gpu {
        struct workqueue_struct *wq;
        struct drm_gpu_scheduler sched;
        bool initialized;
+       bool fe_running;
 
        /* 'ring'-buffer: */
        struct etnaviv_cmdbuf buffer;
index 1a7c89a..afe5dd6 100644 (file)
@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
        struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
        u32 pgtable;
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = etnaviv_iommu_context_get(context);
+
        /* set base addresses */
        gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
        gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
index f8bf488..d664ae2 100644 (file)
@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
        if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
                return;
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = etnaviv_iommu_context_get(context);
+
        prefetch = etnaviv_buffer_config_mmuv2(gpu,
                                (u32)v2_context->mtlb_dma,
                                (u32)context->global->bad_page_dma);
@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
        if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
                return;
 
+       if (gpu->mmu_context)
+               etnaviv_iommu_context_put(gpu->mmu_context);
+       gpu->mmu_context = etnaviv_iommu_context_get(context);
+
        gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
                  lower_32_bits(context->global->v2.pta_dma));
        gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
index dab1b58..9fb1a2a 100644 (file)
@@ -199,6 +199,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
                 */
                list_for_each_entry_safe(m, n, &list, scan_node) {
                        etnaviv_iommu_remove_mapping(context, m);
+                       etnaviv_iommu_context_put(m->context);
                        m->context = NULL;
                        list_del_init(&m->mmu_node);
                        list_del_init(&m->scan_node);
index d1d6902..e4a0b7d 100644 (file)
@@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
 struct etnaviv_iommu_context *
 etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
                           struct etnaviv_cmdbuf_suballoc *suballoc);
-static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
+static inline struct etnaviv_iommu_context *
+etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
 {
        kref_get(&ctx->refcount);
+       return ctx;
 }
 void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
index 642a5b5..335ba9f 100644 (file)
@@ -19,7 +19,6 @@ subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
 subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
 # clang warnings
 subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
-subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
 subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
 subdir-ccflags-y += $(call cc-disable-warning, frame-address)
 subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
index e91e0e0..4b94256 100644 (file)
@@ -222,31 +222,42 @@ static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
 
 struct intel_sa_info {
        u16 displayrtids;
-       u8 deburst, deprogbwlimit;
+       u8 deburst, deprogbwlimit, derating;
 };
 
 static const struct intel_sa_info icl_sa_info = {
        .deburst = 8,
        .deprogbwlimit = 25, /* GB/s */
        .displayrtids = 128,
+       .derating = 10,
 };
 
 static const struct intel_sa_info tgl_sa_info = {
        .deburst = 16,
        .deprogbwlimit = 34, /* GB/s */
        .displayrtids = 256,
+       .derating = 10,
 };
 
 static const struct intel_sa_info rkl_sa_info = {
        .deburst = 16,
        .deprogbwlimit = 20, /* GB/s */
        .displayrtids = 128,
+       .derating = 10,
 };
 
 static const struct intel_sa_info adls_sa_info = {
        .deburst = 16,
        .deprogbwlimit = 38, /* GB/s */
        .displayrtids = 256,
+       .derating = 10,
+};
+
+static const struct intel_sa_info adlp_sa_info = {
+       .deburst = 16,
+       .deprogbwlimit = 38, /* GB/s */
+       .displayrtids = 256,
+       .derating = 20,
 };
 
 static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
@@ -302,7 +313,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
                        bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct);
 
                        bi->deratedbw[j] = min(maxdebw,
-                                              bw * 9 / 10); /* 90% */
+                                              bw * (100 - sa->derating) / 100);
 
                        drm_dbg_kms(&dev_priv->drm,
                                    "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
@@ -400,7 +411,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
 
        if (IS_DG2(dev_priv))
                dg2_get_bw_info(dev_priv);
-       else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
+       else if (IS_ALDERLAKE_P(dev_priv))
+               icl_get_bw_info(dev_priv, &adlp_sa_info);
+       else if (IS_ALDERLAKE_S(dev_priv))
                icl_get_bw_info(dev_priv, &adls_sa_info);
        else if (IS_ROCKETLAKE(dev_priv))
                icl_get_bw_info(dev_priv, &rkl_sa_info);
index 3c3c6cb..b3c8e1c 100644 (file)
@@ -805,11 +805,14 @@ void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv)
  */
 void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
 {
+       int id;
+
        if (!HAS_DMC(dev_priv))
                return;
 
        intel_dmc_ucode_suspend(dev_priv);
        drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
 
-       kfree(dev_priv->dmc.dmc_info[DMC_FW_MAIN].payload);
+       for (id = 0; id < DMC_FW_MAX; id++)
+               kfree(dev_priv->dmc.dmc_info[id].payload);
 }
index 04175f3..abe3d61 100644 (file)
@@ -2445,11 +2445,14 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
         */
        if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
                             intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
-                            sizeof(intel_dp->edp_dpcd))
+                            sizeof(intel_dp->edp_dpcd)) {
                drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
                            (int)sizeof(intel_dp->edp_dpcd),
                            intel_dp->edp_dpcd);
 
+               intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
+       }
+
        /*
         * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
         * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
index 053a3c2..508a514 100644 (file)
@@ -848,7 +848,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
        }
 
        if (ret)
-               intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
+               ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
 
        if (intel_dp->set_idle_link_train)
                intel_dp->set_idle_link_train(intel_dp, crtc_state);
index cff7267..9ccf4b2 100644 (file)
@@ -986,6 +986,9 @@ void i915_gem_context_release(struct kref *ref)
        trace_i915_context_free(ctx);
        GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
+       if (ctx->syncobj)
+               drm_syncobj_put(ctx->syncobj);
+
        mutex_destroy(&ctx->engines_mutex);
        mutex_destroy(&ctx->lut_mutex);
 
@@ -1205,9 +1208,6 @@ static void context_close(struct i915_gem_context *ctx)
        if (vm)
                i915_vm_close(vm);
 
-       if (ctx->syncobj)
-               drm_syncobj_put(ctx->syncobj);
-
        ctx->file_priv = ERR_PTR(-EBADF);
 
        /*
index 35eedc1..6ea1315 100644 (file)
@@ -356,11 +356,8 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
 {
        struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
 
-       if (likely(obj)) {
-               /* This releases all gem object bindings to the backend. */
+       if (likely(obj))
                i915_ttm_free_cached_io_st(obj);
-               __i915_gem_free_object(obj);
-       }
 }
 
 static struct intel_memory_region *
@@ -875,8 +872,12 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
 {
        struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
 
+       /* This releases all gem object bindings to the backend. */
+       __i915_gem_free_object(obj);
+
        i915_gem_object_release_memory_region(obj);
        mutex_destroy(&obj->ttm.get_io_page.lock);
+
        if (obj->ttm.created)
                call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
 }
index ffae7df..4a6bb64 100644 (file)
@@ -59,13 +59,13 @@ static int igt_dmabuf_import_self(void *arg)
                err = PTR_ERR(import);
                goto out_dmabuf;
        }
+       import_obj = to_intel_bo(import);
 
        if (import != &obj->base) {
                pr_err("i915_gem_prime_import created a new object!\n");
                err = -EINVAL;
                goto out_import;
        }
-       import_obj = to_intel_bo(import);
 
        i915_gem_object_lock(import_obj, NULL);
        err = __i915_gem_object_get_pages(import_obj);
@@ -128,6 +128,8 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg)
                pr_err("i915_gem_prime_import failed with the wrong err=%ld\n",
                       PTR_ERR(import));
                err = PTR_ERR(import);
+       } else {
+               err = 0;
        }
 
        dma_buf_put(dmabuf);
@@ -176,6 +178,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
                err = PTR_ERR(import);
                goto out_dmabuf;
        }
+       import_obj = to_intel_bo(import);
 
        if (import == &obj->base) {
                pr_err("i915_gem_prime_import reused gem object!\n");
@@ -183,8 +186,6 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
                goto out_import;
        }
 
-       import_obj = to_intel_bo(import);
-
        i915_gem_object_lock(import_obj, NULL);
        err = __i915_gem_object_get_pages(import_obj);
        if (err) {
index b20f562..a2c34e5 100644 (file)
@@ -581,6 +581,20 @@ static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
        return I915_MMAP_TYPE_GTT;
 }
 
+static struct drm_i915_gem_object *
+create_sys_or_internal(struct drm_i915_private *i915,
+                      unsigned long size)
+{
+       if (HAS_LMEM(i915)) {
+               struct intel_memory_region *sys_region =
+                       i915->mm.regions[INTEL_REGION_SMEM];
+
+               return __i915_gem_object_create_user(i915, size, &sys_region, 1);
+       }
+
+       return i915_gem_object_create_internal(i915, size);
+}
+
 static bool assert_mmap_offset(struct drm_i915_private *i915,
                               unsigned long size,
                               int expected)
@@ -589,7 +603,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
        u64 offset;
        int ret;
 
-       obj = i915_gem_object_create_internal(i915, size);
+       obj = create_sys_or_internal(i915, size);
        if (IS_ERR(obj))
                return expected && expected == PTR_ERR(obj);
 
@@ -633,6 +647,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
        struct drm_mm_node *hole, *next;
        int loop, err = 0;
        u64 offset;
+       int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
 
        /* Disable background reaper */
        disable_retire_worker(i915);
@@ -683,14 +698,14 @@ static int igt_mmap_offset_exhaustion(void *arg)
        }
 
        /* Too large */
-       if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
+       if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
                pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
                err = -EINVAL;
                goto out;
        }
 
        /* Fill the hole, further allocation attempts should then fail */
-       obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+       obj = create_sys_or_internal(i915, PAGE_SIZE);
        if (IS_ERR(obj)) {
                err = PTR_ERR(obj);
                pr_err("Unable to create object for reclaimed hole\n");
@@ -703,7 +718,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
                goto err_obj;
        }
 
-       if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
+       if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
                pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
                err = -EINVAL;
                goto err_obj;
@@ -839,10 +854,9 @@ static int wc_check(struct drm_i915_gem_object *obj)
 
 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
 {
-       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        bool no_map;
 
-       if (HAS_LMEM(i915))
+       if (obj->ops->mmap_offset)
                return type == I915_MMAP_TYPE_FIXED;
        else if (type == I915_MMAP_TYPE_FIXED)
                return false;
index d812b27..591a522 100644 (file)
@@ -1973,8 +1973,14 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
 u32 intel_rps_read_punit_req(struct intel_rps *rps)
 {
        struct intel_uncore *uncore = rps_to_uncore(rps);
+       struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
+       intel_wakeref_t wakeref;
+       u32 freq = 0;
 
-       return intel_uncore_read(uncore, GEN6_RPNSWREQ);
+       with_intel_runtime_pm_if_in_use(rpm, wakeref)
+               freq = intel_uncore_read(uncore, GEN6_RPNSWREQ);
+
+       return freq;
 }
 
 static u32 intel_rps_get_req(u32 pureq)
index b104fb7..86c3185 100644 (file)
@@ -172,11 +172,6 @@ void intel_uc_driver_remove(struct intel_uc *uc)
        __uc_free_load_err_log(uc);
 }
 
-static inline bool guc_communication_enabled(struct intel_guc *guc)
-{
-       return intel_guc_ct_enabled(&guc->ct);
-}
-
 /*
  * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
  * register using the same bits used in the CT message payload. Since our
@@ -210,7 +205,7 @@ static void guc_get_mmio_msg(struct intel_guc *guc)
 static void guc_handle_mmio_msg(struct intel_guc *guc)
 {
        /* we need communication to be enabled to reply to GuC */
-       GEM_BUG_ON(!guc_communication_enabled(guc));
+       GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct));
 
        spin_lock_irq(&guc->irq_lock);
        if (guc->mmio_msg) {
@@ -226,7 +221,7 @@ static int guc_enable_communication(struct intel_guc *guc)
        struct drm_i915_private *i915 = gt->i915;
        int ret;
 
-       GEM_BUG_ON(guc_communication_enabled(guc));
+       GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
 
        ret = i915_inject_probe_error(i915, -ENXIO);
        if (ret)
@@ -662,7 +657,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
                return 0;
 
        /* Make sure we enable communication if and only if it's disabled */
-       GEM_BUG_ON(enable_communication == guc_communication_enabled(guc));
+       GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct));
 
        if (enable_communication)
                guc_enable_communication(guc);
index b0ece71..ce77457 100644 (file)
@@ -57,7 +57,7 @@ nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size)
                args->v0.count = 0;
                args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
                args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
-               args->v0.pwrsrc = -ENOSYS;
+               args->v0.pwrsrc = -ENODEV;
                args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN;
        }
 
index 0473583..482fb0a 100644 (file)
@@ -119,7 +119,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
 #endif
 
        if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
-               rdev->agp = radeon_agp_head_init(rdev->ddev);
+               rdev->agp = radeon_agp_head_init(dev);
        if (rdev->agp) {
                rdev->agp->agp_mtrr = arch_phys_wc_add(
                        rdev->agp->agp_info.aper_base,
index 8ab3247..13c6b85 100644 (file)
@@ -1123,7 +1123,7 @@ static int cdn_dp_suspend(struct device *dev)
        return ret;
 }
 
-static int cdn_dp_resume(struct device *dev)
+static __maybe_unused int cdn_dp_resume(struct device *dev)
 {
        struct cdn_dp_device *dp = dev_get_drvdata(dev);
 
index cb38b1a..82cbb29 100644 (file)
@@ -383,7 +383,8 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
        else
                gfp_flags |= GFP_HIGHUSER;
 
-       for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages;
+       for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
+            num_pages;
             order = min_t(unsigned int, order, __fls(num_pages))) {
                bool apply_caching = false;
                struct ttm_pool_type *pt;
index 4a11150..b4b4653 100644 (file)
@@ -167,8 +167,6 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
        struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
        bool connected = false;
 
-       WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
-
        if (vc4_hdmi->hpd_gpio &&
            gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
                connected = true;
@@ -189,12 +187,10 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
                        }
                }
 
-               pm_runtime_put(&vc4_hdmi->pdev->dev);
                return connector_status_connected;
        }
 
        cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
-       pm_runtime_put(&vc4_hdmi->pdev->dev);
        return connector_status_disconnected;
 }
 
@@ -436,7 +432,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
        struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
        struct drm_connector *connector = &vc4_hdmi->connector;
        struct drm_connector_state *cstate = connector->state;
-       struct drm_crtc *crtc = cstate->crtc;
+       struct drm_crtc *crtc = encoder->crtc;
        const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
        union hdmi_infoframe frame;
        int ret;
@@ -541,11 +537,8 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder,
 
 static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
 {
+       struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
-       struct drm_connector *connector = &vc4_hdmi->connector;
-       struct drm_connector_state *cstate = connector->state;
-       struct drm_crtc *crtc = cstate->crtc;
-       struct drm_display_mode *mode = &crtc->state->adjusted_mode;
 
        if (!vc4_hdmi_supports_scrambling(encoder, mode))
                return;
@@ -566,18 +559,17 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
 static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
 {
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
-       struct drm_connector *connector = &vc4_hdmi->connector;
-       struct drm_connector_state *cstate = connector->state;
+       struct drm_crtc *crtc = encoder->crtc;
 
        /*
-        * At boot, connector->state will be NULL. Since we don't know the
+        * At boot, encoder->crtc will be NULL. Since we don't know the
         * state of the scrambler and in order to avoid any
         * inconsistency, let's disable it all the time.
         */
-       if (cstate && !vc4_hdmi_supports_scrambling(encoder, &cstate->crtc->mode))
+       if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode))
                return;
 
-       if (cstate && !vc4_hdmi_mode_needs_scrambling(&cstate->crtc->mode))
+       if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode))
                return;
 
        if (delayed_work_pending(&vc4_hdmi->scrambling_work))
@@ -635,6 +627,7 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
                vc4_hdmi->variant->phy_disable(vc4_hdmi);
 
        clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
+       clk_disable_unprepare(vc4_hdmi->hsm_clock);
        clk_disable_unprepare(vc4_hdmi->pixel_clock);
 
        ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
@@ -898,9 +891,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
                vc4_hdmi_encoder_get_connector_state(encoder, state);
        struct vc4_hdmi_connector_state *vc4_conn_state =
                conn_state_to_vc4_hdmi_conn_state(conn_state);
-       struct drm_crtc_state *crtc_state =
-               drm_atomic_get_new_crtc_state(state, conn_state->crtc);
-       struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+       struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
        unsigned long bvb_rate, pixel_rate, hsm_rate;
        int ret;
@@ -947,6 +938,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
                return;
        }
 
+       ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+       if (ret) {
+               DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
+               clk_disable_unprepare(vc4_hdmi->pixel_clock);
+               return;
+       }
+
        vc4_hdmi_cec_update_clk_div(vc4_hdmi);
 
        if (pixel_rate > 297000000)
@@ -959,6 +957,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
        ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
        if (ret) {
                DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
+               clk_disable_unprepare(vc4_hdmi->hsm_clock);
                clk_disable_unprepare(vc4_hdmi->pixel_clock);
                return;
        }
@@ -966,6 +965,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
        ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
        if (ret) {
                DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
+               clk_disable_unprepare(vc4_hdmi->hsm_clock);
                clk_disable_unprepare(vc4_hdmi->pixel_clock);
                return;
        }
@@ -985,11 +985,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
 static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
                                             struct drm_atomic_state *state)
 {
-       struct drm_connector_state *conn_state =
-               vc4_hdmi_encoder_get_connector_state(encoder, state);
-       struct drm_crtc_state *crtc_state =
-               drm_atomic_get_new_crtc_state(state, conn_state->crtc);
-       struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+       struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
 
@@ -1012,11 +1008,7 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
 static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
                                              struct drm_atomic_state *state)
 {
-       struct drm_connector_state *conn_state =
-               vc4_hdmi_encoder_get_connector_state(encoder, state);
-       struct drm_crtc_state *crtc_state =
-               drm_atomic_get_new_crtc_state(state, conn_state->crtc);
-       struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+       struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
        struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
        bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -1204,8 +1196,8 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
 
 static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate)
 {
-       struct drm_connector *connector = &vc4_hdmi->connector;
-       struct drm_crtc *crtc = connector->state->crtc;
+       struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
+       struct drm_crtc *crtc = encoder->crtc;
        const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
        u32 n, cts;
        u64 tmp;
@@ -1238,13 +1230,13 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai)
 static int vc4_hdmi_audio_startup(struct device *dev, void *data)
 {
        struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
-       struct drm_connector *connector = &vc4_hdmi->connector;
+       struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
 
        /*
         * If the HDMI encoder hasn't probed, or the encoder is
         * currently in DVI mode, treat the codec dai as missing.
         */
-       if (!connector->state || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
+       if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
                                VC4_HDMI_RAM_PACKET_ENABLE))
                return -ENODEV;
 
@@ -2114,29 +2106,6 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int vc4_hdmi_runtime_suspend(struct device *dev)
-{
-       struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
-
-       clk_disable_unprepare(vc4_hdmi->hsm_clock);
-
-       return 0;
-}
-
-static int vc4_hdmi_runtime_resume(struct device *dev)
-{
-       struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
-       int ret;
-
-       ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-#endif
-
 static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
 {
        const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
@@ -2391,18 +2360,11 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
        {}
 };
 
-static const struct dev_pm_ops vc4_hdmi_pm_ops = {
-       SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
-                          vc4_hdmi_runtime_resume,
-                          NULL)
-};
-
 struct platform_driver vc4_hdmi_driver = {
        .probe = vc4_hdmi_dev_probe,
        .remove = vc4_hdmi_dev_remove,
        .driver = {
                .name = "vc4_hdmi",
                .of_match_table = vc4_hdmi_dt_match,
-               .pm = &vc4_hdmi_pm_ops,
        },
 };
index 79b138f..05c007b 100644 (file)
@@ -255,13 +255,13 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
        if (!privdata->cl_data)
                return -ENOMEM;
 
-       rc = devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
+       mp2_select_ops(privdata);
+
+       rc = amd_sfh_hid_client_init(privdata);
        if (rc)
                return rc;
 
-       mp2_select_ops(privdata);
-
-       return amd_sfh_hid_client_init(privdata);
+       return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
 }
 
 static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
index 833fcf0..6ccfa0c 100644 (file)
@@ -336,12 +336,19 @@ static int apple_event(struct hid_device *hdev, struct hid_field *field,
 
 /*
  * MacBook JIS keyboard has wrong logical maximum
+ * Magic Keyboard JIS has wrong logical maximum
  */
 static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
        struct apple_sc *asc = hid_get_drvdata(hdev);
 
+       if(*rsize >=71 && rdesc[70] == 0x65 && rdesc[64] == 0x65) {
+               hid_info(hdev,
+                        "fixing up Magic Keyboard JIS report descriptor\n");
+               rdesc[64] = rdesc[70] = 0xe7;
+       }
+
        if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&
                        rdesc[53] == 0x65 && rdesc[59] == 0x65) {
                hid_info(hdev,
index 0790fbd..467d789 100644 (file)
@@ -56,15 +56,22 @@ static int betopff_init(struct hid_device *hid)
 {
        struct betopff_device *betopff;
        struct hid_report *report;
-       struct hid_input *hidinput =
-                       list_first_entry(&hid->inputs, struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =
                        &hid->report_enum[HID_OUTPUT_REPORT].report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int field_count = 0;
        int error;
        int i, j;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+
+       hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output reports found\n");
                return -ENODEV;
index 95e0807..d70cd3d 100644 (file)
@@ -198,7 +198,9 @@ static int u2fzero_rng_read(struct hwrng *rng, void *data,
        }
 
        ret = u2fzero_recv(dev, &req, &resp);
-       if (ret < 0)
+
+       /* ignore errors or packets without data */
+       if (ret < offsetof(struct u2f_hid_msg, init.data))
                return 0;
 
        /* only take the minimum amount of data it is safe to take */
index fd51769..33a6908 100644 (file)
@@ -4746,6 +4746,12 @@ static const struct wacom_features wacom_features_0x393 =
        { "Wacom Intuos Pro S", 31920, 19950, 8191, 63,
          INTUOSP2S_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7,
          .touch_max = 10 };
+static const struct wacom_features wacom_features_0x3c6 =
+       { "Wacom Intuos BT S", 15200, 9500, 4095, 63,
+         INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
+static const struct wacom_features wacom_features_0x3c8 =
+       { "Wacom Intuos BT M", 21600, 13500, 4095, 63,
+         INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
 
 static const struct wacom_features wacom_features_HID_ANY_ID =
        { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
@@ -4919,6 +4925,8 @@ const struct hid_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x37A) },
        { USB_DEVICE_WACOM(0x37B) },
        { BT_DEVICE_WACOM(0x393) },
+       { BT_DEVICE_WACOM(0x3c6) },
+       { BT_DEVICE_WACOM(0x3c8) },
        { USB_DEVICE_WACOM(0x4001) },
        { USB_DEVICE_WACOM(0x4004) },
        { USB_DEVICE_WACOM(0x5000) },
index 2aee356..314015d 100644 (file)
@@ -245,6 +245,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
        mutex_unlock(&ring_info->ring_buffer_mutex);
 
        kfree(ring_info->pkt_buffer);
+       ring_info->pkt_buffer = NULL;
        ring_info->pkt_buffer_size = 0;
 }
 
index fc0760f..4305456 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include <linux/platform_device.h>
+#include <linux/slab.h>
 
 #include "coresight-config.h"
 #include "coresight-etm-perf.h"
index 4d5924e..aca7b59 100644 (file)
@@ -409,6 +409,7 @@ config MESON_IRQ_GPIO
 config GOLDFISH_PIC
        bool "Goldfish programmable interrupt controller"
        depends on MIPS && (GOLDFISH || COMPILE_TEST)
+       select GENERIC_IRQ_CHIP
        select IRQ_DOMAIN
        help
          Say yes here to enable Goldfish interrupt controller driver used
index 7557ab5..53e0fb0 100644 (file)
@@ -359,16 +359,16 @@ static void armada_370_xp_ipi_send_mask(struct irq_data *d,
                ARMADA_370_XP_SW_TRIG_INT_OFFS);
 }
 
-static void armada_370_xp_ipi_eoi(struct irq_data *d)
+static void armada_370_xp_ipi_ack(struct irq_data *d)
 {
        writel(~BIT(d->hwirq), per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
 }
 
 static struct irq_chip ipi_irqchip = {
        .name           = "IPI",
+       .irq_ack        = armada_370_xp_ipi_ack,
        .irq_mask       = armada_370_xp_ipi_mask,
        .irq_unmask     = armada_370_xp_ipi_unmask,
-       .irq_eoi        = armada_370_xp_ipi_eoi,
        .ipi_send_mask  = armada_370_xp_ipi_send_mask,
 };
 
index 7f40dca..eb0882d 100644 (file)
@@ -4501,7 +4501,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
 
        if (err) {
                if (i > 0)
-                       its_vpe_irq_domain_free(domain, virq, i - 1);
+                       its_vpe_irq_domain_free(domain, virq, i);
 
                its_lpi_free(bitmap, base, nr_ids);
                its_free_prop_table(vprop_page);
index d329ec3..5f22c9d 100644 (file)
@@ -107,6 +107,8 @@ static DEFINE_RAW_SPINLOCK(cpu_map_lock);
 
 #endif
 
+static DEFINE_STATIC_KEY_FALSE(needs_rmw_access);
+
 /*
  * The GIC mapping of CPU interfaces does not necessarily match
  * the logical CPU numbering.  Let's use a mapping as returned
@@ -774,6 +776,25 @@ static int gic_pm_init(struct gic_chip_data *gic)
 #endif
 
 #ifdef CONFIG_SMP
+static void rmw_writeb(u8 bval, void __iomem *addr)
+{
+       static DEFINE_RAW_SPINLOCK(rmw_lock);
+       unsigned long offset = (unsigned long)addr & 3UL;
+       unsigned long shift = offset * 8;
+       unsigned long flags;
+       u32 val;
+
+       raw_spin_lock_irqsave(&rmw_lock, flags);
+
+       addr -= offset;
+       val = readl_relaxed(addr);
+       val &= ~GENMASK(shift + 7, shift);
+       val |= bval << shift;
+       writel_relaxed(val, addr);
+
+       raw_spin_unlock_irqrestore(&rmw_lock, flags);
+}
+
 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
                            bool force)
 {
@@ -788,7 +809,10 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
        if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
                return -EINVAL;
 
-       writeb_relaxed(gic_cpu_map[cpu], reg);
+       if (static_branch_unlikely(&needs_rmw_access))
+               rmw_writeb(gic_cpu_map[cpu], reg);
+       else
+               writeb_relaxed(gic_cpu_map[cpu], reg);
        irq_data_update_effective_affinity(d, cpumask_of(cpu));
 
        return IRQ_SET_MASK_OK_DONE;
@@ -1375,6 +1399,30 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
        return true;
 }
 
+static bool gic_enable_rmw_access(void *data)
+{
+       /*
+        * The EMEV2 class of machines has a broken interconnect, and
+        * locks up on accesses that are less than 32bit. So far, only
+        * the affinity setting requires it.
+        */
+       if (of_machine_is_compatible("renesas,emev2")) {
+               static_branch_enable(&needs_rmw_access);
+               return true;
+       }
+
+       return false;
+}
+
+static const struct gic_quirk gic_quirks[] = {
+       {
+               .desc           = "broken byte access",
+               .compatible     = "arm,pl390",
+               .init           = gic_enable_rmw_access,
+       },
+       { },
+};
+
 static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
 {
        if (!gic || !node)
@@ -1391,6 +1439,8 @@ static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
        if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
                gic->percpu_offset = 0;
 
+       gic_enable_of_quirks(node, gic_quirks, gic);
+
        return 0;
 
 error:
index f565317..12df216 100644 (file)
@@ -25,7 +25,7 @@
 /* The maximum IRQ pin number of mbigen chip(start from 0) */
 #define MAXIMUM_IRQ_PIN_NUM            1407
 
-/**
+/*
  * In mbigen vector register
  * bit[21:12]: event id value
  * bit[11:0]:  device id
 /* offset of vector register in mbigen node */
 #define REG_MBIGEN_VEC_OFFSET          0x200
 
-/**
+/*
  * offset of clear register in mbigen node
  * This register is used to clear the status
  * of interrupt
  */
 #define REG_MBIGEN_CLEAR_OFFSET                0xa000
 
-/**
+/*
  * offset of interrupt type register
  * This register is used to configure interrupt
  * trigger type
index b0d46ac..72c06e8 100644 (file)
@@ -223,12 +223,12 @@ static int rza1_irqc_probe(struct platform_device *pdev)
                goto out_put_node;
        }
 
-       priv->chip.name = "rza1-irqc",
-       priv->chip.irq_mask = irq_chip_mask_parent,
-       priv->chip.irq_unmask = irq_chip_unmask_parent,
-       priv->chip.irq_eoi = rza1_irqc_eoi,
-       priv->chip.irq_retrigger = irq_chip_retrigger_hierarchy,
-       priv->chip.irq_set_type = rza1_irqc_set_type,
+       priv->chip.name = "rza1-irqc";
+       priv->chip.irq_mask = irq_chip_mask_parent;
+       priv->chip.irq_unmask = irq_chip_unmask_parent;
+       priv->chip.irq_eoi = rza1_irqc_eoi;
+       priv->chip.irq_retrigger = irq_chip_retrigger_hierarchy;
+       priv->chip.irq_set_type = rza1_irqc_set_type;
        priv->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
 
        priv->irq_domain = irq_domain_add_hierarchy(parent, 0, IRQC_NUM_IRQ,
index 94fb63a..fe63d5e 100644 (file)
@@ -570,7 +570,7 @@ fail_msg_node:
 fail_db_node:
        of_node_put(smu->db_node);
 fail_bootmem:
-       memblock_free(__pa(smu), sizeof(struct smu_device));
+       memblock_free_ptr(smu, sizeof(struct smu_device));
        smu = NULL;
 fail_np:
        of_node_put(np);
index edf4ee6..cf128b3 100644 (file)
@@ -275,8 +275,8 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
 
        bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL);
        if (bus_nr < 0) {
-               rc = bus_nr;
-               goto err_free;
+               kfree(bus);
+               return ERR_PTR(bus_nr);
        }
 
        bus->bus_nr = bus_nr;
@@ -291,12 +291,12 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
        dev_set_name(&bus->dev, "mcb:%d", bus_nr);
        rc = device_add(&bus->dev);
        if (rc)
-               goto err_free;
+               goto err_put;
 
        return bus;
-err_free:
-       put_device(carrier);
-       kfree(bus);
+
+err_put:
+       put_device(&bus->dev);
        return ERR_PTR(rc);
 }
 EXPORT_SYMBOL_NS_GPL(mcb_alloc_bus, MCB);
index ae8fe54..6c0c3d0 100644 (file)
@@ -5700,10 +5700,6 @@ static int md_alloc(dev_t dev, char *name)
        disk->flags |= GENHD_FL_EXT_DEVT;
        disk->events |= DISK_EVENT_MEDIA_CHANGE;
        mddev->gendisk = disk;
-       /* As soon as we call add_disk(), another thread could get
-        * through to md_open, so make sure it doesn't get too far
-        */
-       mutex_lock(&mddev->open_mutex);
        add_disk(disk);
 
        error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
@@ -5718,7 +5714,6 @@ static int md_alloc(dev_t dev, char *name)
        if (mddev->kobj.sd &&
            sysfs_create_group(&mddev->kobj, &md_bitmap_group))
                pr_debug("pointless warning\n");
-       mutex_unlock(&mddev->open_mutex);
  abort:
        mutex_unlock(&disks_mutex);
        if (!error && mddev->kobj.sd) {
index d402e45..7d0ab19 100644 (file)
@@ -1140,8 +1140,8 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
                        continue;
                length = 0;
                switch (c) {
-               /* SOF0: baseline JPEG */
-               case SOF0:
+               /* JPEG_MARKER_SOF0: baseline JPEG */
+               case JPEG_MARKER_SOF0:
                        if (get_word_be(&jpeg_buffer, &word))
                                break;
                        length = (long)word - 2;
@@ -1172,7 +1172,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
                        notfound = 0;
                        break;
 
-               case DQT:
+               case JPEG_MARKER_DQT:
                        if (get_word_be(&jpeg_buffer, &word))
                                break;
                        length = (long)word - 2;
@@ -1185,7 +1185,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
                        skip(&jpeg_buffer, length);
                        break;
 
-               case DHT:
+               case JPEG_MARKER_DHT:
                        if (get_word_be(&jpeg_buffer, &word))
                                break;
                        length = (long)word - 2;
@@ -1198,15 +1198,15 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
                        skip(&jpeg_buffer, length);
                        break;
 
-               case SOS:
+               case JPEG_MARKER_SOS:
                        sos = jpeg_buffer.curr - 2; /* 0xffda */
                        break;
 
                /* skip payload-less markers */
-               case RST ... RST + 7:
-               case SOI:
-               case EOI:
-               case TEM:
+               case JPEG_MARKER_RST ... JPEG_MARKER_RST + 7:
+               case JPEG_MARKER_SOI:
+               case JPEG_MARKER_EOI:
+               case JPEG_MARKER_TEM:
                        break;
 
                /* skip uninteresting payload markers */
index a77d93c..8473a01 100644 (file)
 #define EXYNOS3250_IRQ_TIMEOUT         0x10000000
 
 /* a selection of JPEG markers */
-#define TEM                            0x01
-#define SOF0                           0xc0
-#define DHT                            0xc4
-#define RST                            0xd0
-#define SOI                            0xd8
-#define EOI                            0xd9
-#define        SOS                             0xda
-#define DQT                            0xdb
-#define DHP                            0xde
+#define JPEG_MARKER_TEM                                0x01
+#define JPEG_MARKER_SOF0                               0xc0
+#define JPEG_MARKER_DHT                                0xc4
+#define JPEG_MARKER_RST                                0xd0
+#define JPEG_MARKER_SOI                                0xd8
+#define JPEG_MARKER_EOI                                0xd9
+#define        JPEG_MARKER_SOS                         0xda
+#define JPEG_MARKER_DQT                                0xdb
+#define JPEG_MARKER_DHP                                0xde
 
 /* Flags that indicate a format can be used for capture/output */
 #define SJPEG_FMT_FLAG_ENC_CAPTURE     (1 << 0)
@@ -187,11 +187,11 @@ struct s5p_jpeg_marker {
  * @fmt:       driver-specific format of this queue
  * @w:         image width
  * @h:         image height
- * @sos:       SOS marker's position relative to the buffer beginning
- * @dht:       DHT markers' positions relative to the buffer beginning
- * @dqt:       DQT markers' positions relative to the buffer beginning
- * @sof:       SOF0 marker's position relative to the buffer beginning
- * @sof_len:   SOF0 marker's payload length (without length field itself)
+ * @sos:       JPEG_MARKER_SOS's position relative to the buffer beginning
+ * @dht:       JPEG_MARKER_DHT' positions relative to the buffer beginning
+ * @dqt:       JPEG_MARKER_DQT' positions relative to the buffer beginning
+ * @sof:       JPEG_MARKER_SOF0's position relative to the buffer beginning
+ * @sof_len:   JPEG_MARKER_SOF0's payload length (without length field itself)
  * @size:      image buffer size in bytes
  */
 struct s5p_jpeg_q_data {
index 3e729a1..48d52ba 100644 (file)
@@ -24,6 +24,7 @@ static const u8 COMMAND_VERSION[] = { 'v' };
 // End transmit and repeat reset command so we exit sump mode
 static const u8 COMMAND_RESET[] = { 0xff, 0xff, 0, 0, 0, 0, 0 };
 static const u8 COMMAND_SMODE_ENTER[] = { 's' };
+static const u8 COMMAND_SMODE_EXIT[] = { 0 };
 static const u8 COMMAND_TXSTART[] = { 0x26, 0x24, 0x25, 0x03 };
 
 #define REPLY_XMITCOUNT 't'
@@ -309,12 +310,30 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
                buf[i] = cpu_to_be16(v);
        }
 
-       buf[count] = cpu_to_be16(0xffff);
+       buf[count] = 0xffff;
 
        irtoy->tx_buf = buf;
        irtoy->tx_len = size;
        irtoy->emitted = 0;
 
+       // There is an issue where if the unit is receiving IR while the
+       // first TXSTART command is sent, the device might end up hanging
+       // with its led on. It does not respond to any command when this
+       // happens. To work around this, re-enter sample mode.
+       err = irtoy_command(irtoy, COMMAND_SMODE_EXIT,
+                           sizeof(COMMAND_SMODE_EXIT), STATE_RESET);
+       if (err) {
+               dev_err(irtoy->dev, "exit sample mode: %d\n", err);
+               return err;
+       }
+
+       err = irtoy_command(irtoy, COMMAND_SMODE_ENTER,
+                           sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND);
+       if (err) {
+               dev_err(irtoy->dev, "enter sample mode: %d\n", err);
+               return err;
+       }
+
        err = irtoy_command(irtoy, COMMAND_TXSTART, sizeof(COMMAND_TXSTART),
                            STATE_TX);
        kfree(buf);
index 1b6076a..6669625 100644 (file)
@@ -267,13 +267,13 @@ int bcm_vk_tty_init(struct bcm_vk *vk, char *name)
                struct device *tty_dev;
 
                tty_port_init(&vk->tty[i].port);
-               tty_dev = tty_port_register_device(&vk->tty[i].port, tty_drv,
-                                                  i, dev);
+               tty_dev = tty_port_register_device_attr(&vk->tty[i].port,
+                                                       tty_drv, i, dev, vk,
+                                                       NULL);
                if (IS_ERR(tty_dev)) {
                        err = PTR_ERR(tty_dev);
                        goto unwind;
                }
-               dev_set_drvdata(tty_dev, vk);
                vk->tty[i].is_opened = false;
        }
 
index 2e1befb..6939818 100644 (file)
@@ -1090,7 +1090,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
 
        /* check for 64-bit DMA address supported (DAC) */
        /* check for 32-bit DMA address supported (SAC) */
-       if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) ||
+       if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) &&
            dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
                dev_err(&pci_dev->dev,
                        "err: neither DMA32 nor DMA64 supported\n");
index 7b0516c..91b5754 100644 (file)
@@ -405,7 +405,7 @@ static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
 {
        bool next_entry_found = false;
-       struct hl_cs *next;
+       struct hl_cs *next, *first_cs;
 
        if (!cs_needs_timeout(cs))
                return;
@@ -415,9 +415,16 @@ static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
        /* We need to handle tdr only once for the complete staged submission.
         * Hence, we choose the CS that reaches this function first which is
         * the CS marked as 'staged_last'.
+        * In case single staged cs was submitted which has both first and last
+        * indications, then "cs_find_first" below will return NULL, since we
+        * removed the cs node from the list before getting here,
+        * in such cases just continue with the cs to cancel it's TDR work.
         */
-       if (cs->staged_cs && cs->staged_last)
-               cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
+       if (cs->staged_cs && cs->staged_last) {
+               first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
+               if (first_cs)
+                       cs = first_cs;
+       }
 
        spin_unlock(&hdev->cs_mirror_lock);
 
@@ -1288,6 +1295,12 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
        if (rc)
                goto free_cs_object;
 
+       /* If this is a staged submission we must return the staged sequence
+        * rather than the internal CS sequence
+        */
+       if (cs->staged_cs)
+               *cs_seq = cs->staged_sequence;
+
        /* Validate ALL the CS chunks before submitting the CS */
        for (i = 0 ; i < num_chunks ; i++) {
                struct hl_cs_chunk *chunk = &cs_chunk_array[i];
@@ -1988,6 +2001,15 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
                        goto free_cs_chunk_array;
                }
 
+               if (!hdev->nic_ports_mask) {
+                       atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+                       atomic64_inc(&cntr->validation_drop_cnt);
+                       dev_err(hdev->dev,
+                               "Collective operations not supported when NIC ports are disabled");
+                       rc = -EINVAL;
+                       goto free_cs_chunk_array;
+               }
+
                collective_engine_id = chunk->collective_engine_id;
        }
 
@@ -2026,9 +2048,10 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
                        spin_unlock(&ctx->sig_mgr.lock);
 
                        if (!handle_found) {
-                               dev_err(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
+                               /* treat as signal CS already finished */
+                               dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
                                                signal_seq);
-                               rc = -EINVAL;
+                               rc = 0;
                                goto free_cs_chunk_array;
                        }
 
@@ -2613,7 +2636,8 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
                 * completed after the poll function.
                 */
                if (!mcs_data.completion_bitmap) {
-                       dev_err(hdev->dev, "Multi-CS got completion on wait but no CS completed\n");
+                       dev_warn_ratelimited(hdev->dev,
+                               "Multi-CS got completion on wait but no CS completed\n");
                        rc = -EFAULT;
                }
        }
@@ -2740,10 +2764,20 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
        else
                interrupt = &hdev->user_interrupt[interrupt_offset];
 
+       /* Add pending user interrupt to relevant list for the interrupt
+        * handler to monitor
+        */
+       spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+       list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
+       spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+
+       /* We check for completion value as interrupt could have been received
+        * before we added the node to the wait list
+        */
        if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
                dev_err(hdev->dev, "Failed to copy completion value from user\n");
                rc = -EFAULT;
-               goto free_fence;
+               goto remove_pending_user_interrupt;
        }
 
        if (completion_value >= target_value)
@@ -2752,14 +2786,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
                *status = CS_WAIT_STATUS_BUSY;
 
        if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
-               goto free_fence;
-
-       /* Add pending user interrupt to relevant list for the interrupt
-        * handler to monitor
-        */
-       spin_lock_irqsave(&interrupt->wait_list_lock, flags);
-       list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
-       spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+               goto remove_pending_user_interrupt;
 
 wait_again:
        /* Wait for interrupt handler to signal completion */
@@ -2770,6 +2797,15 @@ wait_again:
         * If comparison fails, keep waiting until timeout expires
         */
        if (completion_rc > 0) {
+               spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+               /* reinit_completion must be called before we check for user
+                * completion value, otherwise, if interrupt is received after
+                * the comparison and before the next wait_for_completion,
+                * we will reach timeout and fail
+                */
+               reinit_completion(&pend->fence.completion);
+               spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+
                if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
                        dev_err(hdev->dev, "Failed to copy completion value from user\n");
                        rc = -EFAULT;
@@ -2780,11 +2816,7 @@ wait_again:
                if (completion_value >= target_value) {
                        *status = CS_WAIT_STATUS_COMPLETED;
                } else {
-                       spin_lock_irqsave(&interrupt->wait_list_lock, flags);
-                       reinit_completion(&pend->fence.completion);
                        timeout = completion_rc;
-
-                       spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
                        goto wait_again;
                }
        } else if (completion_rc == -ERESTARTSYS) {
@@ -2802,7 +2834,6 @@ remove_pending_user_interrupt:
        list_del(&pend->wait_list_node);
        spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
 
-free_fence:
        kfree(pend);
        hl_ctx_put(ctx);
 
index 76b7de8..0743319 100644 (file)
@@ -437,6 +437,7 @@ void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
                        struct hl_cs_compl *cs_cmpl)
 {
        struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl;
+       u32 offset = 0;
 
        cs_cmpl->hw_sob = handle->hw_sob;
 
@@ -446,9 +447,13 @@ void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
         * set offset 1 for example he mean to wait only for the first
         * signal only, which will be pre_sob_val, and if he set offset 2
         * then the value required is (pre_sob_val + 1) and so on...
+        * if user set wait offset to 0, then treat it as legacy wait cs,
+        * wait for the next signal.
         */
-       cs_cmpl->sob_val = handle->pre_sob_val +
-                       (job->encaps_sig_wait_offset - 1);
+       if (job->encaps_sig_wait_offset)
+               offset = job->encaps_sig_wait_offset - 1;
+
+       cs_cmpl->sob_val = handle->pre_sob_val + offset;
 }
 
 static int init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
index 383865b..14da87b 100644 (file)
@@ -395,7 +395,7 @@ static struct hl_hw_obj_name_entry gaudi_so_id_to_str[] = {
 
 static struct hl_hw_obj_name_entry gaudi_monitor_id_to_str[] = {
        { .id = 200, .name = "MON_OBJ_DMA_DOWN_FEEDBACK_RESET" },
-       { .id = 201, .name = "MON_OBJ_DMA_UP_FEADBACK_RESET" },
+       { .id = 201, .name = "MON_OBJ_DMA_UP_FEEDBACK_RESET" },
        { .id = 203, .name = "MON_OBJ_DRAM_TO_SRAM_QUEUE_FENCE" },
        { .id = 204, .name = "MON_OBJ_TPC_0_CLK_GATE" },
        { .id = 205, .name = "MON_OBJ_TPC_1_CLK_GATE" },
@@ -5802,6 +5802,7 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
 {
        struct gaudi_device *gaudi = hdev->asic_specific;
        struct packet_msg_prot *cq_pkt;
+       u64 msi_addr;
        u32 tmp;
 
        cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2);
@@ -5823,10 +5824,12 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
        cq_pkt->ctl = cpu_to_le32(tmp);
        cq_pkt->value = cpu_to_le32(1);
 
-       if (!gaudi->multi_msi_mode)
-               msi_vec = 0;
+       if (gaudi->multi_msi_mode)
+               msi_addr = mmPCIE_MSI_INTR_0 + msi_vec * 4;
+       else
+               msi_addr = mmPCIE_CORE_MSI_REQ;
 
-       cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_MSI_INTR_0 + msi_vec * 4);
+       cq_pkt->addr = cpu_to_le64(CFG_BASE + msi_addr);
 }
 
 static void gaudi_update_eq_ci(struct hl_device *hdev, u32 val)
index cb265c0..25ac87c 100644 (file)
@@ -8,16 +8,21 @@
 #include "gaudiP.h"
 #include "../include/gaudi/asic_reg/gaudi_regs.h"
 
-#define GAUDI_NUMBER_OF_RR_REGS                24
-#define GAUDI_NUMBER_OF_LBW_RANGES     12
+#define GAUDI_NUMBER_OF_LBW_RR_REGS    28
+#define GAUDI_NUMBER_OF_HBW_RR_REGS    24
+#define GAUDI_NUMBER_OF_LBW_RANGES     10
 
-static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+       mmDMA_IF_W_S_SOB_HIT_WPROT,
        mmDMA_IF_W_S_DMA0_HIT_WPROT,
        mmDMA_IF_W_S_DMA1_HIT_WPROT,
+       mmDMA_IF_E_S_SOB_HIT_WPROT,
        mmDMA_IF_E_S_DMA0_HIT_WPROT,
        mmDMA_IF_E_S_DMA1_HIT_WPROT,
+       mmDMA_IF_W_N_SOB_HIT_WPROT,
        mmDMA_IF_W_N_DMA0_HIT_WPROT,
        mmDMA_IF_W_N_DMA1_HIT_WPROT,
+       mmDMA_IF_E_N_SOB_HIT_WPROT,
        mmDMA_IF_E_N_DMA0_HIT_WPROT,
        mmDMA_IF_E_N_DMA1_HIT_WPROT,
        mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AW,
@@ -38,13 +43,17 @@ static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AW,
 };
 
-static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+       mmDMA_IF_W_S_SOB_HIT_RPROT,
        mmDMA_IF_W_S_DMA0_HIT_RPROT,
        mmDMA_IF_W_S_DMA1_HIT_RPROT,
+       mmDMA_IF_E_S_SOB_HIT_RPROT,
        mmDMA_IF_E_S_DMA0_HIT_RPROT,
        mmDMA_IF_E_S_DMA1_HIT_RPROT,
+       mmDMA_IF_W_N_SOB_HIT_RPROT,
        mmDMA_IF_W_N_DMA0_HIT_RPROT,
        mmDMA_IF_W_N_DMA1_HIT_RPROT,
+       mmDMA_IF_E_N_SOB_HIT_RPROT,
        mmDMA_IF_E_N_DMA0_HIT_RPROT,
        mmDMA_IF_E_N_DMA1_HIT_RPROT,
        mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AR,
@@ -65,13 +74,17 @@ static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AR,
 };
 
-static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+       mmDMA_IF_W_S_SOB_MIN_WPROT_0,
        mmDMA_IF_W_S_DMA0_MIN_WPROT_0,
        mmDMA_IF_W_S_DMA1_MIN_WPROT_0,
+       mmDMA_IF_E_S_SOB_MIN_WPROT_0,
        mmDMA_IF_E_S_DMA0_MIN_WPROT_0,
        mmDMA_IF_E_S_DMA1_MIN_WPROT_0,
+       mmDMA_IF_W_N_SOB_MIN_WPROT_0,
        mmDMA_IF_W_N_DMA0_MIN_WPROT_0,
        mmDMA_IF_W_N_DMA1_MIN_WPROT_0,
+       mmDMA_IF_E_N_SOB_MIN_WPROT_0,
        mmDMA_IF_E_N_DMA0_MIN_WPROT_0,
        mmDMA_IF_E_N_DMA1_MIN_WPROT_0,
        mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0,
@@ -92,13 +105,17 @@ static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0,
 };
 
-static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+       mmDMA_IF_W_S_SOB_MAX_WPROT_0,
        mmDMA_IF_W_S_DMA0_MAX_WPROT_0,
        mmDMA_IF_W_S_DMA1_MAX_WPROT_0,
+       mmDMA_IF_E_S_SOB_MAX_WPROT_0,
        mmDMA_IF_E_S_DMA0_MAX_WPROT_0,
        mmDMA_IF_E_S_DMA1_MAX_WPROT_0,
+       mmDMA_IF_W_N_SOB_MAX_WPROT_0,
        mmDMA_IF_W_N_DMA0_MAX_WPROT_0,
        mmDMA_IF_W_N_DMA1_MAX_WPROT_0,
+       mmDMA_IF_E_N_SOB_MAX_WPROT_0,
        mmDMA_IF_E_N_DMA0_MAX_WPROT_0,
        mmDMA_IF_E_N_DMA1_MAX_WPROT_0,
        mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0,
@@ -119,13 +136,17 @@ static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0,
 };
 
-static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+       mmDMA_IF_W_S_SOB_MIN_RPROT_0,
        mmDMA_IF_W_S_DMA0_MIN_RPROT_0,
        mmDMA_IF_W_S_DMA1_MIN_RPROT_0,
+       mmDMA_IF_E_S_SOB_MIN_RPROT_0,
        mmDMA_IF_E_S_DMA0_MIN_RPROT_0,
        mmDMA_IF_E_S_DMA1_MIN_RPROT_0,
+       mmDMA_IF_W_N_SOB_MIN_RPROT_0,
        mmDMA_IF_W_N_DMA0_MIN_RPROT_0,
        mmDMA_IF_W_N_DMA1_MIN_RPROT_0,
+       mmDMA_IF_E_N_SOB_MIN_RPROT_0,
        mmDMA_IF_E_N_DMA0_MIN_RPROT_0,
        mmDMA_IF_E_N_DMA1_MIN_RPROT_0,
        mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0,
@@ -146,13 +167,17 @@ static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0,
 };
 
-static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+       mmDMA_IF_W_S_SOB_MAX_RPROT_0,
        mmDMA_IF_W_S_DMA0_MAX_RPROT_0,
        mmDMA_IF_W_S_DMA1_MAX_RPROT_0,
+       mmDMA_IF_E_S_SOB_MAX_RPROT_0,
        mmDMA_IF_E_S_DMA0_MAX_RPROT_0,
        mmDMA_IF_E_S_DMA1_MAX_RPROT_0,
+       mmDMA_IF_W_N_SOB_MAX_RPROT_0,
        mmDMA_IF_W_N_DMA0_MAX_RPROT_0,
        mmDMA_IF_W_N_DMA1_MAX_RPROT_0,
+       mmDMA_IF_E_N_SOB_MAX_RPROT_0,
        mmDMA_IF_E_N_DMA0_MAX_RPROT_0,
        mmDMA_IF_E_N_DMA1_MAX_RPROT_0,
        mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0,
@@ -173,7 +198,7 @@ static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0,
 };
 
-static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AW,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AW,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AW,
@@ -200,7 +225,7 @@ static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AW
 };
 
-static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AR,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AR,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AR,
@@ -227,7 +252,7 @@ static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AR
 };
 
-static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
@@ -254,7 +279,7 @@ static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0
 };
 
-static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
@@ -281,7 +306,7 @@ static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0
 };
 
-static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
@@ -308,7 +333,7 @@ static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0
 };
 
-static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
@@ -335,7 +360,7 @@ static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0
 };
 
-static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
@@ -362,7 +387,7 @@ static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0
 };
 
-static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
@@ -389,7 +414,7 @@ static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0
 };
 
-static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
@@ -416,7 +441,7 @@ static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
        mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0
 };
 
-static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
        mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
        mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0,
        mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
@@ -12849,50 +12874,44 @@ static void gaudi_init_range_registers_lbw(struct hl_device *hdev)
        u32 lbw_rng_end[GAUDI_NUMBER_OF_LBW_RANGES];
        int i, j;
 
-       lbw_rng_start[0]  = (0xFBFE0000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[0]    = (0xFBFFF000 & 0x3FFFFFF) + 1;
+       lbw_rng_start[0]  = (0xFC0E8000 & 0x3FFFFFF) - 1; /* 0x000E7FFF */
+       lbw_rng_end[0]    = (0xFC11FFFF & 0x3FFFFFF) + 1; /* 0x00120000 */
 
-       lbw_rng_start[1]  = (0xFC0E8000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[1]    = (0xFC120000 & 0x3FFFFFF) + 1;
+       lbw_rng_start[1]  = (0xFC1E8000 & 0x3FFFFFF) - 1; /* 0x001E7FFF */
+       lbw_rng_end[1]    = (0xFC48FFFF & 0x3FFFFFF) + 1; /* 0x00490000 */
 
-       lbw_rng_start[2]  = (0xFC1E8000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[2]    = (0xFC48FFFF & 0x3FFFFFF) + 1;
+       lbw_rng_start[2]  = (0xFC600000 & 0x3FFFFFF) - 1; /* 0x005FFFFF */
+       lbw_rng_end[2]    = (0xFCC48FFF & 0x3FFFFFF) + 1; /* 0x00C49000 */
 
-       lbw_rng_start[3]  = (0xFC600000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[3]    = (0xFCC48FFF & 0x3FFFFFF) + 1;
+       lbw_rng_start[3]  = (0xFCC4A000 & 0x3FFFFFF) - 1; /* 0x00C49FFF */
+       lbw_rng_end[3]    = (0xFCCDFFFF & 0x3FFFFFF) + 1; /* 0x00CE0000 */
 
-       lbw_rng_start[4]  = (0xFCC4A000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[4]    = (0xFCCDFFFF & 0x3FFFFFF) + 1;
+       lbw_rng_start[4]  = (0xFCCE4000 & 0x3FFFFFF) - 1; /* 0x00CE3FFF */
+       lbw_rng_end[4]    = (0xFCD1FFFF & 0x3FFFFFF) + 1; /* 0x00D20000 */
 
-       lbw_rng_start[5]  = (0xFCCE4000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[5]    = (0xFCD1FFFF & 0x3FFFFFF) + 1;
+       lbw_rng_start[5]  = (0xFCD24000 & 0x3FFFFFF) - 1; /* 0x00D23FFF */
+       lbw_rng_end[5]    = (0xFCD5FFFF & 0x3FFFFFF) + 1; /* 0x00D60000 */
 
-       lbw_rng_start[6]  = (0xFCD24000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[6]    = (0xFCD5FFFF & 0x3FFFFFF) + 1;
+       lbw_rng_start[6]  = (0xFCD64000 & 0x3FFFFFF) - 1; /* 0x00D63FFF */
+       lbw_rng_end[6]    = (0xFCD9FFFF & 0x3FFFFFF) + 1; /* 0x00DA0000 */
 
-       lbw_rng_start[7]  = (0xFCD64000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[7]    = (0xFCD9FFFF & 0x3FFFFFF) + 1;
+       lbw_rng_start[7]  = (0xFCDA4000 & 0x3FFFFFF) - 1; /* 0x00DA3FFF */
+       lbw_rng_end[7]    = (0xFCDDFFFF & 0x3FFFFFF) + 1; /* 0x00DE0000 */
 
-       lbw_rng_start[8]  = (0xFCDA4000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[8]    = (0xFCDDFFFF & 0x3FFFFFF) + 1;
+       lbw_rng_start[8]  = (0xFCDE4000 & 0x3FFFFFF) - 1; /* 0x00DE3FFF */
+       lbw_rng_end[8]    = (0xFCE05FFF & 0x3FFFFFF) + 1; /* 0x00E06000 */
 
-       lbw_rng_start[9]  = (0xFCDE4000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[9]    = (0xFCE05FFF & 0x3FFFFFF) + 1;
+       lbw_rng_start[9]  = (0xFCFC9000 & 0x3FFFFFF) - 1; /* 0x00FC8FFF */
+       lbw_rng_end[9]    = (0xFFFFFFFE & 0x3FFFFFF) + 1; /* 0x03FFFFFF */
 
-       lbw_rng_start[10]  = (0xFEC43000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[10]    = (0xFEC43FFF & 0x3FFFFFF) + 1;
-
-       lbw_rng_start[11] = (0xFE484000 & 0x3FFFFFF) - 1;
-       lbw_rng_end[11]   = (0xFE484FFF & 0x3FFFFFF) + 1;
-
-       for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+       for (i = 0 ; i < GAUDI_NUMBER_OF_LBW_RR_REGS ; i++) {
                WREG32(gaudi_rr_lbw_hit_aw_regs[i],
                                (1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1);
                WREG32(gaudi_rr_lbw_hit_ar_regs[i],
                                (1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1);
        }
 
-       for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++)
+       for (i = 0 ; i < GAUDI_NUMBER_OF_LBW_RR_REGS ; i++)
                for (j = 0 ; j < GAUDI_NUMBER_OF_LBW_RANGES ; j++) {
                        WREG32(gaudi_rr_lbw_min_aw_regs[i] + (j << 2),
                                                        lbw_rng_start[j]);
@@ -12939,12 +12958,12 @@ static void gaudi_init_range_registers_hbw(struct hl_device *hdev)
         * 6th range is the host
         */
 
-       for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+       for (i = 0 ; i < GAUDI_NUMBER_OF_HBW_RR_REGS ; i++) {
                WREG32(gaudi_rr_hbw_hit_aw_regs[i], 0x1F);
                WREG32(gaudi_rr_hbw_hit_ar_regs[i], 0x1D);
        }
 
-       for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+       for (i = 0 ; i < GAUDI_NUMBER_OF_HBW_RR_REGS ; i++) {
                WREG32(gaudi_rr_hbw_base_low_aw_regs[i], dram_addr_lo);
                WREG32(gaudi_rr_hbw_base_low_ar_regs[i], dram_addr_lo);
 
index ffdfbd9..1a65766 100644 (file)
 #define mmPCIE_AUX_FLR_CTRL                                          0xC07394
 #define mmPCIE_AUX_DBI                                               0xC07490
 
+#define mmPCIE_CORE_MSI_REQ                                          0xC04100
+
 #define mmPSOC_PCI_PLL_NR                                            0xC72100
 #define mmSRAM_W_PLL_NR                                              0x4C8100
 #define mmPSOC_HBM_PLL_NR                                            0xC74100
index 6578cc6..380f9aa 100644 (file)
@@ -1802,10 +1802,15 @@ static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t)
 
        spin_lock_irqsave(&host->irq_lock, flags);
 
-       if (!host->data_status)
+       /*
+        * Only inject an error if we haven't already got an error or data over
+        * interrupt.
+        */
+       if (!host->data_status) {
                host->data_status = SDMMC_INT_DCRC;
-       set_bit(EVENT_DATA_ERROR, &host->pending_events);
-       tasklet_schedule(&host->tasklet);
+               set_bit(EVENT_DATA_ERROR, &host->pending_events);
+               tasklet_schedule(&host->tasklet);
+       }
 
        spin_unlock_irqrestore(&host->irq_lock, flags);
 
@@ -2721,12 +2726,16 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
                }
 
                if (pending & DW_MCI_DATA_ERROR_FLAGS) {
+                       spin_lock(&host->irq_lock);
+
                        /* if there is an error report DATA_ERROR */
                        mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
                        host->data_status = pending;
                        smp_wmb(); /* drain writebuffer */
                        set_bit(EVENT_DATA_ERROR, &host->pending_events);
                        tasklet_schedule(&host->tasklet);
+
+                       spin_unlock(&host->irq_lock);
                }
 
                if (pending & SDMMC_INT_DATA_OVER) {
index 6fc4cf3..a4407f3 100644 (file)
@@ -561,6 +561,8 @@ static void renesas_sdhi_reset(struct tmio_mmc_host *host)
                /* Unknown why but without polling reset status, it will hang */
                read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
                                  false, priv->rstc);
+               /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
+               sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
                priv->needs_adjust_hs400 = false;
                renesas_sdhi_set_clock(host, host->clk_cache);
        } else if (priv->scc_ctl) {
index a533a90..a7aeb3c 100644 (file)
@@ -351,9 +351,25 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
 static void b53_mdio_remove(struct mdio_device *mdiodev)
 {
        struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
-       struct dsa_switch *ds = dev->ds;
 
-       dsa_unregister_switch(ds);
+       if (!dev)
+               return;
+
+       b53_switch_remove(dev);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void b53_mdio_shutdown(struct mdio_device *mdiodev)
+{
+       struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
+
+       if (!dev)
+               return;
+
+       b53_switch_shutdown(dev);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 static const struct of_device_id b53_of_match[] = {
@@ -373,6 +389,7 @@ MODULE_DEVICE_TABLE(of, b53_of_match);
 static struct mdio_driver b53_mdio_driver = {
        .probe  = b53_mdio_probe,
        .remove = b53_mdio_remove,
+       .shutdown = b53_mdio_shutdown,
        .mdiodrv.driver = {
                .name = "bcm53xx",
                .of_match_table = b53_of_match,
index 82680e0..ae4c79d 100644 (file)
@@ -316,9 +316,21 @@ static int b53_mmap_remove(struct platform_device *pdev)
        if (dev)
                b53_switch_remove(dev);
 
+       platform_set_drvdata(pdev, NULL);
+
        return 0;
 }
 
+static void b53_mmap_shutdown(struct platform_device *pdev)
+{
+       struct b53_device *dev = platform_get_drvdata(pdev);
+
+       if (dev)
+               b53_switch_shutdown(dev);
+
+       platform_set_drvdata(pdev, NULL);
+}
+
 static const struct of_device_id b53_mmap_of_table[] = {
        { .compatible = "brcm,bcm3384-switch" },
        { .compatible = "brcm,bcm6328-switch" },
@@ -331,6 +343,7 @@ MODULE_DEVICE_TABLE(of, b53_mmap_of_table);
 static struct platform_driver b53_mmap_driver = {
        .probe = b53_mmap_probe,
        .remove = b53_mmap_remove,
+       .shutdown = b53_mmap_shutdown,
        .driver = {
                .name = "b53-switch",
                .of_match_table = b53_mmap_of_table,
index 5d068ac..959a52d 100644 (file)
@@ -228,6 +228,11 @@ static inline void b53_switch_remove(struct b53_device *dev)
        dsa_unregister_switch(dev->ds);
 }
 
+static inline void b53_switch_shutdown(struct b53_device *dev)
+{
+       dsa_switch_shutdown(dev->ds);
+}
+
 #define b53_build_op(type_op_size, val_type)                           \
 static inline int b53_##type_op_size(struct b53_device *dev, u8 page,  \
                                     u8 reg, val_type val)              \
index ecb9f7f..01e37b7 100644 (file)
@@ -321,9 +321,21 @@ static int b53_spi_remove(struct spi_device *spi)
        if (dev)
                b53_switch_remove(dev);
 
+       spi_set_drvdata(spi, NULL);
+
        return 0;
 }
 
+static void b53_spi_shutdown(struct spi_device *spi)
+{
+       struct b53_device *dev = spi_get_drvdata(spi);
+
+       if (dev)
+               b53_switch_shutdown(dev);
+
+       spi_set_drvdata(spi, NULL);
+}
+
 static const struct of_device_id b53_spi_of_match[] = {
        { .compatible = "brcm,bcm5325" },
        { .compatible = "brcm,bcm5365" },
@@ -344,6 +356,7 @@ static struct spi_driver b53_spi_driver = {
        },
        .probe  = b53_spi_probe,
        .remove = b53_spi_remove,
+       .shutdown = b53_spi_shutdown,
 };
 
 module_spi_driver(b53_spi_driver);
index 3f4249d..4591bb1 100644 (file)
@@ -629,17 +629,34 @@ static int b53_srab_probe(struct platform_device *pdev)
 static int b53_srab_remove(struct platform_device *pdev)
 {
        struct b53_device *dev = platform_get_drvdata(pdev);
-       struct b53_srab_priv *priv = dev->priv;
 
-       b53_srab_intr_set(priv, false);
+       if (!dev)
+               return 0;
+
+       b53_srab_intr_set(dev->priv, false);
        b53_switch_remove(dev);
 
+       platform_set_drvdata(pdev, NULL);
+
        return 0;
 }
 
+static void b53_srab_shutdown(struct platform_device *pdev)
+{
+       struct b53_device *dev = platform_get_drvdata(pdev);
+
+       if (!dev)
+               return;
+
+       b53_switch_shutdown(dev);
+
+       platform_set_drvdata(pdev, NULL);
+}
+
 static struct platform_driver b53_srab_driver = {
        .probe = b53_srab_probe,
        .remove = b53_srab_remove,
+       .shutdown = b53_srab_shutdown,
        .driver = {
                .name = "b53-srab-switch",
                .of_match_table = b53_srab_of_match,
index 6ce9ec1..7578a5c 100644 (file)
@@ -68,7 +68,7 @@ static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
        struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
        unsigned int port, count = 0;
 
-       for (port = 0; port < ARRAY_SIZE(priv->port_sts); port++) {
+       for (port = 0; port < ds->num_ports; port++) {
                if (dsa_is_cpu_port(ds, port))
                        continue;
                if (priv->port_sts[port].enabled)
@@ -1512,6 +1512,9 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
 {
        struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
 
+       if (!priv)
+               return 0;
+
        priv->wol_ports_mask = 0;
        /* Disable interrupts */
        bcm_sf2_intr_disable(priv);
@@ -1523,6 +1526,8 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
        if (priv->type == BCM7278_DEVICE_ID)
                reset_control_assert(priv->rcdev);
 
+       platform_set_drvdata(pdev, NULL);
+
        return 0;
 }
 
@@ -1530,6 +1535,9 @@ static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
 {
        struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
 
+       if (!priv)
+               return;
+
        /* For a kernel about to be kexec'd we want to keep the GPHY on for a
         * successful MDIO bus scan to occur. If we did turn off the GPHY
         * before (e.g: port_disable), this will also power it back on.
@@ -1538,6 +1546,10 @@ static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
         */
        if (priv->hw_params.num_gphy == 1)
                bcm_sf2_gphy_enable_set(priv->dev->ds, true);
+
+       dsa_switch_shutdown(priv->dev->ds);
+
+       platform_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM_SLEEP
index bfdf332..e638e3e 100644 (file)
@@ -340,10 +340,29 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
 static void dsa_loop_drv_remove(struct mdio_device *mdiodev)
 {
        struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
-       struct dsa_loop_priv *ps = ds->priv;
+       struct dsa_loop_priv *ps;
+
+       if (!ds)
+               return;
+
+       ps = ds->priv;
 
        dsa_unregister_switch(ds);
        dev_put(ps->netdev);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void dsa_loop_drv_shutdown(struct mdio_device *mdiodev)
+{
+       struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+       if (!ds)
+               return;
+
+       dsa_switch_shutdown(ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 static struct mdio_driver dsa_loop_drv = {
@@ -352,6 +371,7 @@ static struct mdio_driver dsa_loop_drv = {
        },
        .probe  = dsa_loop_drv_probe,
        .remove = dsa_loop_drv_remove,
+       .shutdown = dsa_loop_drv_shutdown,
 };
 
 #define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
index 542cfc4..354655f 100644 (file)
@@ -1916,6 +1916,9 @@ static int hellcreek_remove(struct platform_device *pdev)
 {
        struct hellcreek *hellcreek = platform_get_drvdata(pdev);
 
+       if (!hellcreek)
+               return 0;
+
        hellcreek_hwtstamp_free(hellcreek);
        hellcreek_ptp_free(hellcreek);
        dsa_unregister_switch(hellcreek->ds);
@@ -1924,6 +1927,18 @@ static int hellcreek_remove(struct platform_device *pdev)
        return 0;
 }
 
+static void hellcreek_shutdown(struct platform_device *pdev)
+{
+       struct hellcreek *hellcreek = platform_get_drvdata(pdev);
+
+       if (!hellcreek)
+               return;
+
+       dsa_switch_shutdown(hellcreek->ds);
+
+       platform_set_drvdata(pdev, NULL);
+}
+
 static const struct hellcreek_platform_data de1soc_r1_pdata = {
        .name            = "r4c30",
        .num_ports       = 4,
@@ -1946,6 +1961,7 @@ MODULE_DEVICE_TABLE(of, hellcreek_of_match);
 static struct platform_driver hellcreek_driver = {
        .probe  = hellcreek_probe,
        .remove = hellcreek_remove,
+       .shutdown = hellcreek_shutdown,
        .driver = {
                .name = "hellcreek",
                .of_match_table = hellcreek_of_match,
index d7ce281..89f9202 100644 (file)
@@ -1379,6 +1379,12 @@ int lan9303_remove(struct lan9303 *chip)
 }
 EXPORT_SYMBOL(lan9303_remove);
 
+void lan9303_shutdown(struct lan9303 *chip)
+{
+       dsa_switch_shutdown(chip->ds);
+}
+EXPORT_SYMBOL(lan9303_shutdown);
+
 MODULE_AUTHOR("Juergen Borleis <kernel@pengutronix.de>");
 MODULE_DESCRIPTION("Core driver for SMSC/Microchip LAN9303 three port ethernet switch");
 MODULE_LICENSE("GPL v2");
index 11f590b..c7f73ef 100644 (file)
@@ -10,3 +10,4 @@ extern const struct lan9303_phy_ops lan9303_indirect_phy_ops;
 
 int lan9303_probe(struct lan9303 *chip, struct device_node *np);
 int lan9303_remove(struct lan9303 *chip);
+void lan9303_shutdown(struct lan9303 *chip);
index 9bffaef..8ca4713 100644 (file)
@@ -67,13 +67,28 @@ static int lan9303_i2c_probe(struct i2c_client *client,
 
 static int lan9303_i2c_remove(struct i2c_client *client)
 {
-       struct lan9303_i2c *sw_dev;
+       struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
 
-       sw_dev = i2c_get_clientdata(client);
        if (!sw_dev)
-               return -ENODEV;
+               return 0;
+
+       lan9303_remove(&sw_dev->chip);
+
+       i2c_set_clientdata(client, NULL);
+
+       return 0;
+}
+
+static void lan9303_i2c_shutdown(struct i2c_client *client)
+{
+       struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
+
+       if (!sw_dev)
+               return;
+
+       lan9303_shutdown(&sw_dev->chip);
 
-       return lan9303_remove(&sw_dev->chip);
+       i2c_set_clientdata(client, NULL);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -97,6 +112,7 @@ static struct i2c_driver lan9303_i2c_driver = {
        },
        .probe = lan9303_i2c_probe,
        .remove = lan9303_i2c_remove,
+       .shutdown = lan9303_i2c_shutdown,
        .id_table = lan9303_i2c_id,
 };
 module_i2c_driver(lan9303_i2c_driver);
index 9cbe804..bbb7032 100644 (file)
@@ -138,6 +138,20 @@ static void lan9303_mdio_remove(struct mdio_device *mdiodev)
                return;
 
        lan9303_remove(&sw_dev->chip);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
+{
+       struct lan9303_mdio *sw_dev = dev_get_drvdata(&mdiodev->dev);
+
+       if (!sw_dev)
+               return;
+
+       lan9303_shutdown(&sw_dev->chip);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -155,6 +169,7 @@ static struct mdio_driver lan9303_mdio_driver = {
        },
        .probe  = lan9303_mdio_probe,
        .remove = lan9303_mdio_remove,
+       .shutdown = lan9303_mdio_shutdown,
 };
 mdio_module_driver(lan9303_mdio_driver);
 
index 64d6dfa..3ff4b7e 100644 (file)
@@ -1885,6 +1885,12 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph
 
        reset_control_assert(gphy_fw->reset);
 
+       /* The vendor BSP uses a 200ms delay after asserting the reset line.
+        * Without this some users are observing that the PHY is not coming up
+        * on the MDIO bus.
+        */
+       msleep(200);
+
        ret = request_firmware(&fw, gphy_fw->fw_name, dev);
        if (ret) {
                dev_err(dev, "failed to load firmware: %s, error: %i\n",
@@ -2178,6 +2184,9 @@ static int gswip_remove(struct platform_device *pdev)
        struct gswip_priv *priv = platform_get_drvdata(pdev);
        int i;
 
+       if (!priv)
+               return 0;
+
        /* disable the switch */
        gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
 
@@ -2191,9 +2200,23 @@ static int gswip_remove(struct platform_device *pdev)
        for (i = 0; i < priv->num_gphy_fw; i++)
                gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
 
+       platform_set_drvdata(pdev, NULL);
+
        return 0;
 }
 
+static void gswip_shutdown(struct platform_device *pdev)
+{
+       struct gswip_priv *priv = platform_get_drvdata(pdev);
+
+       if (!priv)
+               return;
+
+       dsa_switch_shutdown(priv->ds);
+
+       platform_set_drvdata(pdev, NULL);
+}
+
 static const struct gswip_hw_info gswip_xrx200 = {
        .max_ports = 7,
        .cpu_port = 6,
@@ -2217,6 +2240,7 @@ MODULE_DEVICE_TABLE(of, gswip_of_match);
 static struct platform_driver gswip_driver = {
        .probe = gswip_probe,
        .remove = gswip_remove,
+       .shutdown = gswip_shutdown,
        .driver = {
                .name = "gswip",
                .of_match_table = gswip_of_match,
index ea7550d..866767b 100644 (file)
@@ -94,6 +94,8 @@ static int ksz8795_spi_remove(struct spi_device *spi)
        if (dev)
                ksz_switch_remove(dev);
 
+       spi_set_drvdata(spi, NULL);
+
        return 0;
 }
 
@@ -101,8 +103,15 @@ static void ksz8795_spi_shutdown(struct spi_device *spi)
 {
        struct ksz_device *dev = spi_get_drvdata(spi);
 
-       if (dev && dev->dev_ops->shutdown)
+       if (!dev)
+               return;
+
+       if (dev->dev_ops->shutdown)
                dev->dev_ops->shutdown(dev);
+
+       dsa_switch_shutdown(dev->ds);
+
+       spi_set_drvdata(spi, NULL);
 }
 
 static const struct of_device_id ksz8795_dt_ids[] = {
index 1129348..5883fa7 100644 (file)
@@ -191,6 +191,18 @@ static void ksz8863_smi_remove(struct mdio_device *mdiodev)
 
        if (dev)
                ksz_switch_remove(dev);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
+{
+       struct ksz_device *dev = dev_get_drvdata(&mdiodev->dev);
+
+       if (dev)
+               dsa_switch_shutdown(dev->ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 static const struct of_device_id ksz8863_dt_ids[] = {
@@ -203,6 +215,7 @@ MODULE_DEVICE_TABLE(of, ksz8863_dt_ids);
 static struct mdio_driver ksz8863_driver = {
        .probe  = ksz8863_smi_probe,
        .remove = ksz8863_smi_remove,
+       .shutdown = ksz8863_smi_shutdown,
        .mdiodrv.driver = {
                .name   = "ksz8863-switch",
                .of_match_table = ksz8863_dt_ids,
index 4e053a2..f3afb8b 100644 (file)
@@ -56,7 +56,10 @@ static int ksz9477_i2c_remove(struct i2c_client *i2c)
 {
        struct ksz_device *dev = i2c_get_clientdata(i2c);
 
-       ksz_switch_remove(dev);
+       if (dev)
+               ksz_switch_remove(dev);
+
+       i2c_set_clientdata(i2c, NULL);
 
        return 0;
 }
@@ -65,8 +68,15 @@ static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
 {
        struct ksz_device *dev = i2c_get_clientdata(i2c);
 
-       if (dev && dev->dev_ops->shutdown)
+       if (!dev)
+               return;
+
+       if (dev->dev_ops->shutdown)
                dev->dev_ops->shutdown(dev);
+
+       dsa_switch_shutdown(dev->ds);
+
+       i2c_set_clientdata(i2c, NULL);
 }
 
 static const struct i2c_device_id ksz9477_i2c_id[] = {
index 15bc11b..e3cb0e6 100644 (file)
@@ -72,6 +72,8 @@ static int ksz9477_spi_remove(struct spi_device *spi)
        if (dev)
                ksz_switch_remove(dev);
 
+       spi_set_drvdata(spi, NULL);
+
        return 0;
 }
 
@@ -79,8 +81,10 @@ static void ksz9477_spi_shutdown(struct spi_device *spi)
 {
        struct ksz_device *dev = spi_get_drvdata(spi);
 
-       if (dev && dev->dev_ops->shutdown)
-               dev->dev_ops->shutdown(dev);
+       if (dev)
+               dsa_switch_shutdown(dev->ds);
+
+       spi_set_drvdata(spi, NULL);
 }
 
 static const struct of_device_id ksz9477_dt_ids[] = {
index d0cba2d..094737e 100644 (file)
@@ -3286,6 +3286,9 @@ mt7530_remove(struct mdio_device *mdiodev)
        struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
        int ret = 0;
 
+       if (!priv)
+               return;
+
        ret = regulator_disable(priv->core_pwr);
        if (ret < 0)
                dev_err(priv->dev,
@@ -3301,11 +3304,26 @@ mt7530_remove(struct mdio_device *mdiodev)
 
        dsa_unregister_switch(priv->ds);
        mutex_destroy(&priv->reg_mutex);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void mt7530_shutdown(struct mdio_device *mdiodev)
+{
+       struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+       if (!priv)
+               return;
+
+       dsa_switch_shutdown(priv->ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 static struct mdio_driver mt7530_mdio_driver = {
        .probe  = mt7530_probe,
        .remove = mt7530_remove,
+       .shutdown = mt7530_shutdown,
        .mdiodrv.driver = {
                .name = "mt7530",
                .of_match_table = mt7530_of_match,
index 24b8219..a4c6eb9 100644 (file)
@@ -290,7 +290,24 @@ static void mv88e6060_remove(struct mdio_device *mdiodev)
 {
        struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
 
+       if (!ds)
+               return;
+
        dsa_unregister_switch(ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void mv88e6060_shutdown(struct mdio_device *mdiodev)
+{
+       struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+       if (!ds)
+               return;
+
+       dsa_switch_shutdown(ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 static const struct of_device_id mv88e6060_of_match[] = {
@@ -303,6 +320,7 @@ static const struct of_device_id mv88e6060_of_match[] = {
 static struct mdio_driver mv88e6060_driver = {
        .probe  = mv88e6060_probe,
        .remove = mv88e6060_remove,
+       .shutdown = mv88e6060_shutdown,
        .mdiodrv.driver = {
                .name = "mv88e6060",
                .of_match_table = mv88e6060_of_match,
index c45ca24..8ab0be7 100644 (file)
@@ -3071,7 +3071,7 @@ static void mv88e6xxx_teardown(struct dsa_switch *ds)
 {
        mv88e6xxx_teardown_devlink_params(ds);
        dsa_devlink_resources_unregister(ds);
-       mv88e6xxx_teardown_devlink_regions(ds);
+       mv88e6xxx_teardown_devlink_regions_global(ds);
 }
 
 static int mv88e6xxx_setup(struct dsa_switch *ds)
@@ -3215,7 +3215,7 @@ unlock:
        if (err)
                goto out_resources;
 
-       err = mv88e6xxx_setup_devlink_regions(ds);
+       err = mv88e6xxx_setup_devlink_regions_global(ds);
        if (err)
                goto out_params;
 
@@ -3229,6 +3229,16 @@ out_resources:
        return err;
 }
 
+static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
+{
+       return mv88e6xxx_setup_devlink_regions_port(ds, port);
+}
+
+static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
+{
+       mv88e6xxx_teardown_devlink_regions_port(ds, port);
+}
+
 /* prod_id for switch families which do not have a PHY model number */
 static const u16 family_prod_id_table[] = {
        [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
@@ -6116,6 +6126,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
        .change_tag_protocol    = mv88e6xxx_change_tag_protocol,
        .setup                  = mv88e6xxx_setup,
        .teardown               = mv88e6xxx_teardown,
+       .port_setup             = mv88e6xxx_port_setup,
+       .port_teardown          = mv88e6xxx_port_teardown,
        .phylink_validate       = mv88e6xxx_validate,
        .phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state,
        .phylink_mac_config     = mv88e6xxx_mac_config,
@@ -6389,7 +6401,12 @@ out:
 static void mv88e6xxx_remove(struct mdio_device *mdiodev)
 {
        struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
-       struct mv88e6xxx_chip *chip = ds->priv;
+       struct mv88e6xxx_chip *chip;
+
+       if (!ds)
+               return;
+
+       chip = ds->priv;
 
        if (chip->info->ptp_support) {
                mv88e6xxx_hwtstamp_free(chip);
@@ -6410,6 +6427,20 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
                mv88e6xxx_g1_irq_free(chip);
        else
                mv88e6xxx_irq_poll_free(chip);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void mv88e6xxx_shutdown(struct mdio_device *mdiodev)
+{
+       struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+       if (!ds)
+               return;
+
+       dsa_switch_shutdown(ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 static const struct of_device_id mv88e6xxx_of_match[] = {
@@ -6433,6 +6464,7 @@ MODULE_DEVICE_TABLE(of, mv88e6xxx_of_match);
 static struct mdio_driver mv88e6xxx_driver = {
        .probe  = mv88e6xxx_probe,
        .remove = mv88e6xxx_remove,
+       .shutdown = mv88e6xxx_shutdown,
        .mdiodrv.driver = {
                .name = "mv88e6085",
                .of_match_table = mv88e6xxx_of_match,
index 0c0f5ea..3810683 100644 (file)
@@ -647,26 +647,25 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = {
        },
 };
 
-static void
-mv88e6xxx_teardown_devlink_regions_global(struct mv88e6xxx_chip *chip)
+void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds)
 {
+       struct mv88e6xxx_chip *chip = ds->priv;
        int i;
 
        for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++)
                dsa_devlink_region_destroy(chip->regions[i]);
 }
 
-static void
-mv88e6xxx_teardown_devlink_regions_port(struct mv88e6xxx_chip *chip,
-                                       int port)
+void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port)
 {
+       struct mv88e6xxx_chip *chip = ds->priv;
+
        dsa_devlink_region_destroy(chip->ports[port].region);
 }
 
-static int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds,
-                                               struct mv88e6xxx_chip *chip,
-                                               int port)
+int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port)
 {
+       struct mv88e6xxx_chip *chip = ds->priv;
        struct devlink_region *region;
 
        region = dsa_devlink_port_region_create(ds,
@@ -681,40 +680,10 @@ static int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds,
        return 0;
 }
 
-static void
-mv88e6xxx_teardown_devlink_regions_ports(struct mv88e6xxx_chip *chip)
-{
-       int port;
-
-       for (port = 0; port < mv88e6xxx_num_ports(chip); port++)
-               mv88e6xxx_teardown_devlink_regions_port(chip, port);
-}
-
-static int mv88e6xxx_setup_devlink_regions_ports(struct dsa_switch *ds,
-                                                struct mv88e6xxx_chip *chip)
-{
-       int port;
-       int err;
-
-       for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
-               err = mv88e6xxx_setup_devlink_regions_port(ds, chip, port);
-               if (err)
-                       goto out;
-       }
-
-       return 0;
-
-out:
-       while (port-- > 0)
-               mv88e6xxx_teardown_devlink_regions_port(chip, port);
-
-       return err;
-}
-
-static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
-                                                 struct mv88e6xxx_chip *chip)
+int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
 {
        bool (*cond)(struct mv88e6xxx_chip *chip);
+       struct mv88e6xxx_chip *chip = ds->priv;
        struct devlink_region_ops *ops;
        struct devlink_region *region;
        u64 size;
@@ -753,30 +722,6 @@ out:
        return PTR_ERR(region);
 }
 
-int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds)
-{
-       struct mv88e6xxx_chip *chip = ds->priv;
-       int err;
-
-       err = mv88e6xxx_setup_devlink_regions_global(ds, chip);
-       if (err)
-               return err;
-
-       err = mv88e6xxx_setup_devlink_regions_ports(ds, chip);
-       if (err)
-               mv88e6xxx_teardown_devlink_regions_global(chip);
-
-       return err;
-}
-
-void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds)
-{
-       struct mv88e6xxx_chip *chip = ds->priv;
-
-       mv88e6xxx_teardown_devlink_regions_ports(chip);
-       mv88e6xxx_teardown_devlink_regions_global(chip);
-}
-
 int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
                               struct devlink_info_req *req,
                               struct netlink_ext_ack *extack)
index 3d72db3..65ce6a6 100644 (file)
@@ -12,8 +12,10 @@ int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
                                struct devlink_param_gset_ctx *ctx);
 int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
                                struct devlink_param_gset_ctx *ctx);
-int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds);
-void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds);
+int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds);
+void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds);
+int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port);
+void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port);
 
 int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
                               struct devlink_info_req *req,
index 3656e67..a3a9636 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright 2019-2021 NXP Semiconductors
+/* Copyright 2019-2021 NXP
  *
  * This is an umbrella module for all network switches that are
  * register-compatible with Ocelot and that perform I/O to their host CPU
index 5854bab..54024b6 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright 2019 NXP Semiconductors
+/* Copyright 2019 NXP
  */
 #ifndef _MSCC_FELIX_H
 #define _MSCC_FELIX_H
index f966a25..11b42fd 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
 /* Copyright 2017 Microsemi Corporation
- * Copyright 2018-2019 NXP Semiconductors
+ * Copyright 2018-2019 NXP
  */
 #include <linux/fsl/enetc_mdio.h>
 #include <soc/mscc/ocelot_qsys.h>
@@ -1472,9 +1472,10 @@ err_pci_enable:
 
 static void felix_pci_remove(struct pci_dev *pdev)
 {
-       struct felix *felix;
+       struct felix *felix = pci_get_drvdata(pdev);
 
-       felix = pci_get_drvdata(pdev);
+       if (!felix)
+               return;
 
        dsa_unregister_switch(felix->ds);
 
@@ -1482,6 +1483,20 @@ static void felix_pci_remove(struct pci_dev *pdev)
        kfree(felix);
 
        pci_disable_device(pdev);
+
+       pci_set_drvdata(pdev, NULL);
+}
+
+static void felix_pci_shutdown(struct pci_dev *pdev)
+{
+       struct felix *felix = pci_get_drvdata(pdev);
+
+       if (!felix)
+               return;
+
+       dsa_switch_shutdown(felix->ds);
+
+       pci_set_drvdata(pdev, NULL);
 }
 
 static struct pci_device_id felix_ids[] = {
@@ -1498,6 +1513,7 @@ static struct pci_driver felix_vsc9959_pci_driver = {
        .id_table       = felix_ids,
        .probe          = felix_pci_probe,
        .remove         = felix_pci_remove,
+       .shutdown       = felix_pci_shutdown,
 };
 module_pci_driver(felix_vsc9959_pci_driver);
 
index deae923..de1d34a 100644 (file)
@@ -1245,18 +1245,33 @@ err_alloc_felix:
 
 static int seville_remove(struct platform_device *pdev)
 {
-       struct felix *felix;
+       struct felix *felix = platform_get_drvdata(pdev);
 
-       felix = platform_get_drvdata(pdev);
+       if (!felix)
+               return 0;
 
        dsa_unregister_switch(felix->ds);
 
        kfree(felix->ds);
        kfree(felix);
 
+       platform_set_drvdata(pdev, NULL);
+
        return 0;
 }
 
+static void seville_shutdown(struct platform_device *pdev)
+{
+       struct felix *felix = platform_get_drvdata(pdev);
+
+       if (!felix)
+               return;
+
+       dsa_switch_shutdown(felix->ds);
+
+       platform_set_drvdata(pdev, NULL);
+}
+
 static const struct of_device_id seville_of_match[] = {
        { .compatible = "mscc,vsc9953-switch" },
        { },
@@ -1266,6 +1281,7 @@ MODULE_DEVICE_TABLE(of, seville_of_match);
 static struct platform_driver seville_vsc9953_driver = {
        .probe          = seville_probe,
        .remove         = seville_remove,
+       .shutdown       = seville_shutdown,
        .driver = {
                .name           = "mscc_seville",
                .of_match_table = of_match_ptr(seville_of_match),
index 563d8a2..a6bfb6a 100644 (file)
@@ -1083,6 +1083,9 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
        struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
        unsigned int i;
 
+       if (!priv)
+               return;
+
        for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
                struct ar9331_sw_port *port = &priv->port[i];
 
@@ -1094,6 +1097,20 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
        dsa_unregister_switch(&priv->ds);
 
        reset_control_assert(priv->sw_reset);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void ar9331_sw_shutdown(struct mdio_device *mdiodev)
+{
+       struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+       if (!priv)
+               return;
+
+       dsa_switch_shutdown(&priv->ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 static const struct of_device_id ar9331_sw_of_match[] = {
@@ -1104,6 +1121,7 @@ static const struct of_device_id ar9331_sw_of_match[] = {
 static struct mdio_driver ar9331_sw_mdio_driver = {
        .probe = ar9331_sw_probe,
        .remove = ar9331_sw_remove,
+       .shutdown = ar9331_sw_shutdown,
        .mdiodrv.driver = {
                .name = AR9331_SW_NAME,
                .of_match_table = ar9331_sw_of_match,
index 1f63f50..a984f06 100644 (file)
@@ -643,10 +643,8 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
 }
 
 static int
-qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data)
+qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
 {
-       struct qca8k_priv *priv = salve_bus->priv;
-       struct mii_bus *bus = priv->bus;
        u16 r1, r2, page;
        u32 val;
        int ret;
@@ -682,10 +680,8 @@ exit:
 }
 
 static int
-qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum)
+qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
 {
-       struct qca8k_priv *priv = salve_bus->priv;
-       struct mii_bus *bus = priv->bus;
        u16 r1, r2, page;
        u32 val;
        int ret;
@@ -727,6 +723,24 @@ exit:
 }
 
 static int
+qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
+{
+       struct qca8k_priv *priv = slave_bus->priv;
+       struct mii_bus *bus = priv->bus;
+
+       return qca8k_mdio_write(bus, phy, regnum, data);
+}
+
+static int
+qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
+{
+       struct qca8k_priv *priv = slave_bus->priv;
+       struct mii_bus *bus = priv->bus;
+
+       return qca8k_mdio_read(bus, phy, regnum);
+}
+
+static int
 qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
 {
        struct qca8k_priv *priv = ds->priv;
@@ -775,8 +789,8 @@ qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
 
        bus->priv = (void *)priv;
        bus->name = "qca8k slave mii";
-       bus->read = qca8k_mdio_read;
-       bus->write = qca8k_mdio_write;
+       bus->read = qca8k_internal_mdio_read;
+       bus->write = qca8k_internal_mdio_write;
        snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
                 ds->index);
 
@@ -1866,10 +1880,27 @@ qca8k_sw_remove(struct mdio_device *mdiodev)
        struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
        int i;
 
+       if (!priv)
+               return;
+
        for (i = 0; i < QCA8K_NUM_PORTS; i++)
                qca8k_port_set_status(priv, i, 0);
 
        dsa_unregister_switch(priv->ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
+{
+       struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+       if (!priv)
+               return;
+
+       dsa_switch_shutdown(priv->ds);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -1926,6 +1957,7 @@ static const struct of_device_id qca8k_of_match[] = {
 static struct mdio_driver qca8kmdio_driver = {
        .probe  = qca8k_sw_probe,
        .remove = qca8k_sw_remove,
+       .shutdown = qca8k_sw_shutdown,
        .mdiodrv.driver = {
                .name = "qca8k",
                .of_match_table = qca8k_of_match,
index 8e49d4f..2fcfd91 100644 (file)
@@ -368,7 +368,7 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
        smi->slave_mii_bus->parent = smi->dev;
        smi->ds->slave_mii_bus = smi->slave_mii_bus;
 
-       ret = of_mdiobus_register(smi->slave_mii_bus, mdio_np);
+       ret = devm_of_mdiobus_register(smi->dev, smi->slave_mii_bus, mdio_np);
        if (ret) {
                dev_err(smi->dev, "unable to register MDIO bus %s\n",
                        smi->slave_mii_bus->id);
@@ -464,16 +464,33 @@ static int realtek_smi_probe(struct platform_device *pdev)
 
 static int realtek_smi_remove(struct platform_device *pdev)
 {
-       struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
+       struct realtek_smi *smi = platform_get_drvdata(pdev);
+
+       if (!smi)
+               return 0;
 
        dsa_unregister_switch(smi->ds);
        if (smi->slave_mii_bus)
                of_node_put(smi->slave_mii_bus->dev.of_node);
        gpiod_set_value(smi->reset, 1);
 
+       platform_set_drvdata(pdev, NULL);
+
        return 0;
 }
 
+static void realtek_smi_shutdown(struct platform_device *pdev)
+{
+       struct realtek_smi *smi = platform_get_drvdata(pdev);
+
+       if (!smi)
+               return;
+
+       dsa_switch_shutdown(smi->ds);
+
+       platform_set_drvdata(pdev, NULL);
+}
+
 static const struct of_device_id realtek_smi_of_match[] = {
        {
                .compatible = "realtek,rtl8366rb",
@@ -495,6 +512,7 @@ static struct platform_driver realtek_smi_driver = {
        },
        .probe  = realtek_smi_probe,
        .remove = realtek_smi_remove,
+       .shutdown = realtek_smi_shutdown,
 };
 module_platform_driver(realtek_smi_driver);
 
index 387a1f2..5bbf170 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #include <linux/packing.h>
index 05c7f4c..0569ff0 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
- * Copyright 2020 NXP Semiconductors
+ * Copyright 2020 NXP
  */
 #include "sja1105.h"
 
index 6c10ffa..72b9b39 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright 2020, NXP Semiconductors
+/* Copyright 2020 NXP
  */
 #include "sja1105.h"
 #include "sja1105_vl.h"
index 2f8cc66..7c0db80 100644 (file)
@@ -3335,13 +3335,29 @@ static int sja1105_probe(struct spi_device *spi)
 static int sja1105_remove(struct spi_device *spi)
 {
        struct sja1105_private *priv = spi_get_drvdata(spi);
-       struct dsa_switch *ds = priv->ds;
 
-       dsa_unregister_switch(ds);
+       if (!priv)
+               return 0;
+
+       dsa_unregister_switch(priv->ds);
+
+       spi_set_drvdata(spi, NULL);
 
        return 0;
 }
 
+static void sja1105_shutdown(struct spi_device *spi)
+{
+       struct sja1105_private *priv = spi_get_drvdata(spi);
+
+       if (!priv)
+               return;
+
+       dsa_switch_shutdown(priv->ds);
+
+       spi_set_drvdata(spi, NULL);
+}
+
 static const struct of_device_id sja1105_dt_ids[] = {
        { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
        { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
@@ -3365,6 +3381,7 @@ static struct spi_driver sja1105_driver = {
        },
        .probe  = sja1105_probe,
        .remove = sja1105_remove,
+       .shutdown = sja1105_shutdown,
 };
 
 module_spi_driver(sja1105_driver);
index 705d390..215dd17 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright 2021, NXP Semiconductors
+/* Copyright 2021 NXP
  */
 #include <linux/pcs/pcs-xpcs.h>
 #include <linux/of_mdio.h>
index d60a530..d3c9ad6 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
  * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
  */
index 7a422ef..baba204 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #include "sja1105_static_config.h"
index bce0f5c..6a372d5 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #ifndef _SJA1105_STATIC_CONFIG_H
index ec7b65d..6802f40 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright 2020, NXP Semiconductors
+/* Copyright 2020 NXP
  */
 #include <net/tc_act/tc_gate.h>
 #include <linux/dsa/8021q.h>
index 173d789..51fba0d 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright 2020, NXP Semiconductors
+/* Copyright 2020 NXP
  */
 #ifndef _SJA1105_VL_H
 #define _SJA1105_VL_H
index 19ce4aa..a4b1447 100644 (file)
@@ -1225,6 +1225,12 @@ int vsc73xx_remove(struct vsc73xx *vsc)
 }
 EXPORT_SYMBOL(vsc73xx_remove);
 
+void vsc73xx_shutdown(struct vsc73xx *vsc)
+{
+       dsa_switch_shutdown(vsc->ds);
+}
+EXPORT_SYMBOL(vsc73xx_shutdown);
+
 MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
 MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 driver");
 MODULE_LICENSE("GPL v2");
index 2a57f33..fe4b154 100644 (file)
@@ -116,7 +116,26 @@ static int vsc73xx_platform_remove(struct platform_device *pdev)
 {
        struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev);
 
-       return vsc73xx_remove(&vsc_platform->vsc);
+       if (!vsc_platform)
+               return 0;
+
+       vsc73xx_remove(&vsc_platform->vsc);
+
+       platform_set_drvdata(pdev, NULL);
+
+       return 0;
+}
+
+static void vsc73xx_platform_shutdown(struct platform_device *pdev)
+{
+       struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev);
+
+       if (!vsc_platform)
+               return;
+
+       vsc73xx_shutdown(&vsc_platform->vsc);
+
+       platform_set_drvdata(pdev, NULL);
 }
 
 static const struct vsc73xx_ops vsc73xx_platform_ops = {
@@ -144,6 +163,7 @@ MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
 static struct platform_driver vsc73xx_platform_driver = {
        .probe = vsc73xx_platform_probe,
        .remove = vsc73xx_platform_remove,
+       .shutdown = vsc73xx_platform_shutdown,
        .driver = {
                .name = "vsc73xx-platform",
                .of_match_table = vsc73xx_of_match,
index 81eca4a..6453989 100644 (file)
@@ -163,7 +163,26 @@ static int vsc73xx_spi_remove(struct spi_device *spi)
 {
        struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
 
-       return vsc73xx_remove(&vsc_spi->vsc);
+       if (!vsc_spi)
+               return 0;
+
+       vsc73xx_remove(&vsc_spi->vsc);
+
+       spi_set_drvdata(spi, NULL);
+
+       return 0;
+}
+
+static void vsc73xx_spi_shutdown(struct spi_device *spi)
+{
+       struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
+
+       if (!vsc_spi)
+               return;
+
+       vsc73xx_shutdown(&vsc_spi->vsc);
+
+       spi_set_drvdata(spi, NULL);
 }
 
 static const struct vsc73xx_ops vsc73xx_spi_ops = {
@@ -191,6 +210,7 @@ MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
 static struct spi_driver vsc73xx_spi_driver = {
        .probe = vsc73xx_spi_probe,
        .remove = vsc73xx_spi_remove,
+       .shutdown = vsc73xx_spi_shutdown,
        .driver = {
                .name = "vsc73xx-spi",
                .of_match_table = vsc73xx_of_match,
index 7478f8d..30b9515 100644 (file)
@@ -27,3 +27,4 @@ struct vsc73xx_ops {
 int vsc73xx_is_addr_valid(u8 block, u8 subblock);
 int vsc73xx_probe(struct vsc73xx *vsc);
 int vsc73xx_remove(struct vsc73xx *vsc);
+void vsc73xx_shutdown(struct vsc73xx *vsc);
index 130abb0..4694209 100644 (file)
@@ -822,6 +822,12 @@ void xrs700x_switch_remove(struct xrs700x *priv)
 }
 EXPORT_SYMBOL(xrs700x_switch_remove);
 
+void xrs700x_switch_shutdown(struct xrs700x *priv)
+{
+       dsa_switch_shutdown(priv->ds);
+}
+EXPORT_SYMBOL(xrs700x_switch_shutdown);
+
 MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
 MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA driver");
 MODULE_LICENSE("GPL v2");
index ff62cf6..4d58257 100644 (file)
@@ -40,3 +40,4 @@ struct xrs700x {
 struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv);
 int xrs700x_switch_register(struct xrs700x *priv);
 void xrs700x_switch_remove(struct xrs700x *priv);
+void xrs700x_switch_shutdown(struct xrs700x *priv);
index 489d938..6deae38 100644 (file)
@@ -109,11 +109,28 @@ static int xrs700x_i2c_remove(struct i2c_client *i2c)
 {
        struct xrs700x *priv = i2c_get_clientdata(i2c);
 
+       if (!priv)
+               return 0;
+
        xrs700x_switch_remove(priv);
 
+       i2c_set_clientdata(i2c, NULL);
+
        return 0;
 }
 
+static void xrs700x_i2c_shutdown(struct i2c_client *i2c)
+{
+       struct xrs700x *priv = i2c_get_clientdata(i2c);
+
+       if (!priv)
+               return;
+
+       xrs700x_switch_shutdown(priv);
+
+       i2c_set_clientdata(i2c, NULL);
+}
+
 static const struct i2c_device_id xrs700x_i2c_id[] = {
        { "xrs700x-switch", 0 },
        {},
@@ -137,6 +154,7 @@ static struct i2c_driver xrs700x_i2c_driver = {
        },
        .probe  = xrs700x_i2c_probe,
        .remove = xrs700x_i2c_remove,
+       .shutdown = xrs700x_i2c_shutdown,
        .id_table = xrs700x_i2c_id,
 };
 
index 44f58be..d01cf10 100644 (file)
@@ -136,7 +136,24 @@ static void xrs700x_mdio_remove(struct mdio_device *mdiodev)
 {
        struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev);
 
+       if (!priv)
+               return;
+
        xrs700x_switch_remove(priv);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void xrs700x_mdio_shutdown(struct mdio_device *mdiodev)
+{
+       struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev);
+
+       if (!priv)
+               return;
+
+       xrs700x_switch_shutdown(priv);
+
+       dev_set_drvdata(&mdiodev->dev, NULL);
 }
 
 static const struct of_device_id __maybe_unused xrs700x_mdio_dt_ids[] = {
@@ -155,6 +172,7 @@ static struct mdio_driver xrs700x_mdio_driver = {
        },
        .probe  = xrs700x_mdio_probe,
        .remove = xrs700x_mdio_remove,
+       .shutdown = xrs700x_mdio_shutdown,
 };
 
 mdio_module_driver(xrs700x_mdio_driver);
index 8d90fed..6f0ea2f 100644 (file)
@@ -1050,7 +1050,7 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
 #ifdef VORTEX_BUS_MASTER
        if (vp->bus_master) {
                /* Set the bus-master controller to transfer the packet. */
-               outl((int) (skb->data), ioaddr + Wn7_MasterAddr);
+               outl(isa_virt_to_bus(skb->data), ioaddr + Wn7_MasterAddr);
                outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
                vp->tx_skb = skb;
                outw(StartDMADown, ioaddr + EL3_CMD);
index 53660bc..9afc712 100644 (file)
@@ -922,13 +922,16 @@ static void __init ne_add_devices(void)
        }
 }
 
-#ifdef MODULE
 static int __init ne_init(void)
 {
        int retval;
-       ne_add_devices();
+
+       if (IS_MODULE(CONFIG_NE2000))
+               ne_add_devices();
+
        retval = platform_driver_probe(&ne_driver, ne_drv_probe);
-       if (retval) {
+
+       if (IS_MODULE(CONFIG_NE2000) && retval) {
                if (io[0] == 0)
                        pr_notice("ne.c: You must supply \"io=0xNNN\""
                               " value(s) for ISA cards.\n");
@@ -941,18 +944,8 @@ static int __init ne_init(void)
        return retval;
 }
 module_init(ne_init);
-#else /* MODULE */
-static int __init ne_init(void)
-{
-       int retval = platform_driver_probe(&ne_driver, ne_drv_probe);
-
-       /* Unregister unused platform_devices. */
-       ne_loop_rm_unreg(0);
-       return retval;
-}
-module_init(ne_init);
 
-#ifdef CONFIG_NETDEV_LEGACY_INIT
+#if !defined(MODULE) && defined(CONFIG_NETDEV_LEGACY_INIT)
 struct net_device * __init ne_probe(int unit)
 {
        int this_dev;
@@ -994,7 +987,6 @@ struct net_device * __init ne_probe(int unit)
        return ERR_PTR(-ENODEV);
 }
 #endif
-#endif /* MODULE */
 
 static void __exit ne_exit(void)
 {
index b5df7ad..032e892 100644 (file)
@@ -748,7 +748,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
 #ifdef XMT_VIA_SKB
                        skb_save[i] = p->tmd_skb[i];
 #endif
-                       buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
+                       buffer[i] = (unsigned long)isa_bus_to_virt(tmdp->u.buffer);
                        blen[i] = tmdp->blen;
                        tmdp->u.s.status = 0x0;
                }
index dee9ff7..d4b1976 100644 (file)
@@ -413,13 +413,13 @@ static int atl_resume_common(struct device *dev, bool deep)
        if (deep) {
                /* Reinitialize Nic/Vecs objects */
                aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
+       }
 
+       if (netif_running(nic->ndev)) {
                ret = aq_nic_init(nic);
                if (ret)
                        goto err_exit;
-       }
 
-       if (netif_running(nic->ndev)) {
                ret = aq_nic_start(nic);
                if (ret)
                        goto err_exit;
index 85fa0ab..9513cfb 100644 (file)
@@ -129,6 +129,8 @@ static int bgmac_probe(struct bcma_device *core)
        bcma_set_drvdata(core, bgmac);
 
        err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr);
+       if (err == -EPROBE_DEFER)
+               return err;
 
        /* If no MAC address assigned via device tree, check SPROM */
        if (err) {
index f255fd0..6fbf735 100644 (file)
@@ -1224,7 +1224,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
 
        /* SR-IOV capability was enabled but there are no VFs*/
        if (iov->total == 0) {
-               err = -EINVAL;
+               err = 0;
                goto failed;
        }
 
index ea0c45d..62f84cc 100644 (file)
@@ -391,7 +391,7 @@ static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
         * netif_tx_queue_stopped().
         */
        smp_mb();
-       if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
+       if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
                netif_tx_wake_queue(txq);
                return false;
        }
@@ -764,7 +764,7 @@ next_tx_int:
        smp_mb();
 
        if (unlikely(netif_tx_queue_stopped(txq)) &&
-           bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+           bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
            READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
                netif_tx_wake_queue(txq);
 }
@@ -2213,12 +2213,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
                        DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
                                     bp->current_interval * 10);
                fw_health->tmr_counter = fw_health->tmr_multiplier;
-               if (!fw_health->enabled) {
+               if (!fw_health->enabled)
                        fw_health->last_fw_heartbeat =
                                bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
-                       fw_health->last_fw_reset_cnt =
-                               bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
-               }
+               fw_health->last_fw_reset_cnt =
+                       bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
                netif_info(bp, drv, bp->dev,
                           "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
                           fw_health->master, fw_health->last_fw_reset_cnt,
@@ -2417,7 +2416,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
                if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
                        tx_pkts++;
                        /* return full budget so NAPI will complete. */
-                       if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
+                       if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
                                rx_pkts = budget;
                                raw_cons = NEXT_RAW_CMP(raw_cons);
                                if (budget)
@@ -2730,6 +2729,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
                int j;
 
+               if (!txr->tx_buf_ring)
+                       continue;
+
                for (j = 0; j < max_idx;) {
                        struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
                        struct sk_buff *skb;
@@ -2814,6 +2816,9 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
        }
 
 skip_rx_tpa_free:
+       if (!rxr->rx_buf_ring)
+               goto skip_rx_buf_free;
+
        for (i = 0; i < max_idx; i++) {
                struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
                dma_addr_t mapping = rx_buf->mapping;
@@ -2836,6 +2841,11 @@ skip_rx_tpa_free:
                        kfree(data);
                }
        }
+
+skip_rx_buf_free:
+       if (!rxr->rx_agg_ring)
+               goto skip_rx_agg_free;
+
        for (i = 0; i < max_agg_idx; i++) {
                struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
                struct page *page = rx_agg_buf->page;
@@ -2852,6 +2862,8 @@ skip_rx_tpa_free:
 
                __free_page(page);
        }
+
+skip_rx_agg_free:
        if (rxr->rx_page) {
                __free_page(rxr->rx_page);
                rxr->rx_page = NULL;
@@ -2900,6 +2912,9 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
        struct pci_dev *pdev = bp->pdev;
        int i;
 
+       if (!rmem->pg_arr)
+               goto skip_pages;
+
        for (i = 0; i < rmem->nr_pages; i++) {
                if (!rmem->pg_arr[i])
                        continue;
@@ -2909,6 +2924,7 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
 
                rmem->pg_arr[i] = NULL;
        }
+skip_pages:
        if (rmem->pg_tbl) {
                size_t pg_tbl_size = rmem->nr_pages * 8;
 
@@ -3228,10 +3244,14 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
 
 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
 {
+       struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
        kfree(cpr->cp_desc_ring);
        cpr->cp_desc_ring = NULL;
+       ring->ring_mem.pg_arr = NULL;
        kfree(cpr->cp_desc_mapping);
        cpr->cp_desc_mapping = NULL;
+       ring->ring_mem.dma_arr = NULL;
 }
 
 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
@@ -3620,7 +3640,7 @@ static int bnxt_init_tx_rings(struct bnxt *bp)
        u16 i;
 
        bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
-                                  MAX_SKB_FRAGS + 1);
+                                  BNXT_MIN_TX_DESC_CNT);
 
        for (i = 0; i < bp->tx_nr_rings; i++) {
                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
@@ -12207,6 +12227,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                        return;
                }
 
+               if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
+                   bp->fw_health->enabled) {
+                       bp->fw_health->last_fw_reset_cnt =
+                               bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+               }
                bp->fw_reset_state = 0;
                /* Make sure fw_reset_state is 0 before clearing the flag */
                smp_mb__before_atomic();
index ec046e7..19fe647 100644 (file)
@@ -629,6 +629,11 @@ struct nqe_cn {
 #define BNXT_MAX_RX_JUM_DESC_CNT       (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
 #define BNXT_MAX_TX_DESC_CNT           (TX_DESC_CNT * MAX_TX_PAGES - 1)
 
+/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1.  We need one extra
+ * BD because the first TX BD is always a long BD.
+ */
+#define BNXT_MIN_TX_DESC_CNT           (MAX_SKB_FRAGS + 2)
+
 #define RX_RING(x)     (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
 #define RX_IDX(x)      ((x) & (RX_DESC_CNT - 1))
 
index b056e3c..7260910 100644 (file)
@@ -798,7 +798,7 @@ static int bnxt_set_ringparam(struct net_device *dev,
 
        if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
            (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
-           (ering->tx_pending <= MAX_SKB_FRAGS))
+           (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
                return -EINVAL;
 
        if (netif_running(dev))
index 46fae1a..e6a4a76 100644 (file)
@@ -1884,9 +1884,6 @@ bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
 {
        struct bnxt_flower_indr_block_cb_priv *cb_priv;
 
-       /* All callback list access should be protected by RTNL. */
-       ASSERT_RTNL();
-
        list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
                if (cb_priv->tunnel_netdev == netdev)
                        return cb_priv;
index 8b7b599..f66d22d 100644 (file)
@@ -111,9 +111,9 @@ static void macb_remove(struct pci_dev *pdev)
        struct platform_device *plat_dev = pci_get_drvdata(pdev);
        struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
 
-       platform_device_unregister(plat_dev);
        clk_unregister(plat_data->pclk);
        clk_unregister(plat_data->hclk);
+       platform_device_unregister(plat_dev);
 }
 
 static const struct pci_device_id dev_id_table[] = {
index 3ca93ad..042327b 100644 (file)
@@ -419,7 +419,7 @@ static void enetc_rx_dim_work(struct work_struct *w)
 
 static void enetc_rx_net_dim(struct enetc_int_vector *v)
 {
-       struct dim_sample dim_sample;
+       struct dim_sample dim_sample = {};
 
        v->comp_cnt++;
 
@@ -1879,7 +1879,6 @@ static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
 static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
 {
        struct pci_dev *pdev = priv->si->pdev;
-       cpumask_t cpu_mask;
        int i, j, err;
 
        for (i = 0; i < priv->bdr_int_num; i++) {
@@ -1908,9 +1907,7 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
 
                        enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
                }
-               cpumask_clear(&cpu_mask);
-               cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
-               irq_set_affinity_hint(irq, &cpu_mask);
+               irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus()));
        }
 
        return 0;
index ee1468e..91f02c5 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/* Copyright 2021 NXP Semiconductors
+/* Copyright 2021 NXP
  *
  * The Integrated Endpoint Register Block (IERB) is configured by pre-boot
  * software and is supposed to be to ENETC what a NVRAM is to a 'real' PCIe
index b3b774e..c2ce47c 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-/* Copyright 2021 NXP Semiconductors */
+/* Copyright 2021 NXP */
 
 #include <linux/pci.h>
 #include <linux/platform_device.h>
index 80bd5c6..ec87b37 100644 (file)
@@ -4176,5 +4176,4 @@ static struct platform_driver fec_driver = {
 
 module_platform_driver(fec_driver);
 
-MODULE_ALIAS("platform:"DRIVER_NAME);
 MODULE_LICENSE("GPL");
index 22af3d6..adc54a7 100644 (file)
@@ -61,6 +61,9 @@ static unsigned int tx_sgl = 1;
 module_param(tx_sgl, uint, 0600);
 MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
 
+static bool page_pool_enabled = true;
+module_param(page_pool_enabled, bool, 0400);
+
 #define HNS3_SGL_SIZE(nfrag)   (sizeof(struct scatterlist) * (nfrag) + \
                                 sizeof(struct sg_table))
 #define HNS3_MAX_SGL_SIZE      ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \
@@ -73,6 +76,7 @@ MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to opt
 #define HNS3_OUTER_VLAN_TAG    2
 
 #define HNS3_MIN_TX_LEN                33U
+#define HNS3_MIN_TUN_PKT_LEN   65U
 
 /* hns3_pci_tbl - PCI Device ID Table
  *
@@ -1424,8 +1428,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
                               l4.tcp->doff);
                break;
        case IPPROTO_UDP:
-               if (hns3_tunnel_csum_bug(skb))
-                       return skb_checksum_help(skb);
+               if (hns3_tunnel_csum_bug(skb)) {
+                       int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
+
+                       return ret ? ret : skb_checksum_help(skb);
+               }
 
                hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
                hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
@@ -4753,7 +4760,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
                goto out_with_desc_cb;
 
        if (!HNAE3_IS_TX_RING(ring)) {
-               hns3_alloc_page_pool(ring);
+               if (page_pool_enabled)
+                       hns3_alloc_page_pool(ring);
 
                ret = hns3_alloc_ring_buffers(ring);
                if (ret)
index 68ed171..87d96f8 100644 (file)
@@ -1724,6 +1724,10 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
        }
 
        bd_num = le32_to_cpu(req->bd_num);
+       if (!bd_num) {
+               dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
+               return -EINVAL;
+       }
 
        desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
        if (!desc_src)
index 718c16d..bb9b026 100644 (file)
@@ -2445,12 +2445,12 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
                return;
        }
 
-       dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n",
+       dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vport(%u), queue_id(%u)\n",
                vf_id, q_id);
 
        if (vf_id) {
                if (vf_id >= hdev->num_alloc_vport) {
-                       dev_err(dev, "invalid vf id(%u)\n", vf_id);
+                       dev_err(dev, "invalid vport(%u)\n", vf_id);
                        return;
                }
 
@@ -2463,8 +2463,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
 
                ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
                if (ret)
-                       dev_err(dev, "inform reset to vf(%u) failed %d!\n",
-                               hdev->vport->vport_id, ret);
+                       dev_err(dev, "inform reset to vport(%u) failed %d!\n",
+                               vf_id, ret);
        } else {
                set_bit(HNAE3_FUNC_RESET, reset_requests);
        }
index e55ba2e..47fea89 100644 (file)
@@ -1528,9 +1528,10 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
 static int hclge_configure(struct hclge_dev *hdev)
 {
        struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+       const struct cpumask *cpumask = cpu_online_mask;
        struct hclge_cfg cfg;
        unsigned int i;
-       int ret;
+       int node, ret;
 
        ret = hclge_get_cfg(hdev, &cfg);
        if (ret)
@@ -1595,11 +1596,12 @@ static int hclge_configure(struct hclge_dev *hdev)
 
        hclge_init_kdump_kernel_config(hdev);
 
-       /* Set the init affinity based on pci func number */
-       i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
-       i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
-       cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
-                       &hdev->affinity_mask);
+       /* Set the affinity based on numa node */
+       node = dev_to_node(&hdev->pdev->dev);
+       if (node != NUMA_NO_NODE)
+               cpumask = cpumask_of_node(node);
+
+       cpumask_copy(&hdev->affinity_mask, cpumask);
 
        return ret;
 }
@@ -3659,7 +3661,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
                if (ret) {
                        dev_err(&hdev->pdev->dev,
                                "set vf(%u) rst failed %d!\n",
-                               vport->vport_id, ret);
+                               vport->vport_id - HCLGE_VF_VPORT_START_NUM,
+                               ret);
                        return ret;
                }
 
@@ -3674,7 +3677,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
                if (ret)
                        dev_warn(&hdev->pdev->dev,
                                 "inform reset to vf(%u) failed %d!\n",
-                                vport->vport_id, ret);
+                                vport->vport_id - HCLGE_VF_VPORT_START_NUM,
+                                ret);
        }
 
        return 0;
@@ -4739,6 +4743,24 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
        return 0;
 }
 
+static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
+                                u8 *hash_algo)
+{
+       switch (hfunc) {
+       case ETH_RSS_HASH_TOP:
+               *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
+               return 0;
+       case ETH_RSS_HASH_XOR:
+               *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
+               return 0;
+       case ETH_RSS_HASH_NO_CHANGE:
+               *hash_algo = vport->rss_algo;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
                         const  u8 *key, const  u8 hfunc)
 {
@@ -4748,30 +4770,27 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
        u8 hash_algo;
        int ret, i;
 
+       ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
+       if (ret) {
+               dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
+               return ret;
+       }
+
        /* Set the RSS Hash Key if specififed by the user */
        if (key) {
-               switch (hfunc) {
-               case ETH_RSS_HASH_TOP:
-                       hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
-                       break;
-               case ETH_RSS_HASH_XOR:
-                       hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
-                       break;
-               case ETH_RSS_HASH_NO_CHANGE:
-                       hash_algo = vport->rss_algo;
-                       break;
-               default:
-                       return -EINVAL;
-               }
-
                ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
                if (ret)
                        return ret;
 
                /* Update the shadow RSS key with user specified qids */
                memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
-               vport->rss_algo = hash_algo;
+       } else {
+               ret = hclge_set_rss_algo_key(hdev, hash_algo,
+                                            vport->rss_hash_key);
+               if (ret)
+                       return ret;
        }
+       vport->rss_algo = hash_algo;
 
        /* Update the shadow RSS table with user specified qids */
        for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
@@ -6625,10 +6644,13 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
                u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
                u16 tqps;
 
+               /* To keep consistent with user's configuration, minus 1 when
+                * printing 'vf', because vf id from ethtool is added 1 for vf.
+                */
                if (vf > hdev->num_req_vfs) {
                        dev_err(&hdev->pdev->dev,
-                               "Error: vf id (%u) > max vf num (%u)\n",
-                               vf, hdev->num_req_vfs);
+                               "Error: vf id (%u) should be less than %u\n",
+                               vf - 1, hdev->num_req_vfs);
                        return -EINVAL;
                }
 
@@ -8125,11 +8147,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
        hclge_clear_arfs_rules(hdev);
        spin_unlock_bh(&hdev->fd_rule_lock);
 
-       /* If it is not PF reset, the firmware will disable the MAC,
+       /* If it is not PF reset or FLR, the firmware will disable the MAC,
         * so it only need to stop phy here.
         */
        if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
-           hdev->reset_type != HNAE3_FUNC_RESET) {
+           hdev->reset_type != HNAE3_FUNC_RESET &&
+           hdev->reset_type != HNAE3_FLR_RESET) {
                hclge_mac_stop_phy(hdev);
                hclge_update_link_status(hdev);
                return;
@@ -9794,6 +9817,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
        if (is_kill && !vlan_id)
                return 0;
 
+       if (vlan_id >= VLAN_N_VID)
+               return -EINVAL;
+
        ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
        if (ret) {
                dev_err(&hdev->pdev->dev,
@@ -10700,7 +10726,8 @@ static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
        return 0;
 }
 
-static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
+static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
+                                 u8 *reset_status)
 {
        struct hclge_reset_tqp_queue_cmd *req;
        struct hclge_desc desc;
@@ -10718,7 +10745,9 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
                return ret;
        }
 
-       return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+       *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+
+       return 0;
 }
 
 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
@@ -10737,7 +10766,7 @@ static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
        struct hclge_vport *vport = hclge_get_vport(handle);
        struct hclge_dev *hdev = vport->back;
        u16 reset_try_times = 0;
-       int reset_status;
+       u8 reset_status;
        u16 queue_gid;
        int ret;
        u16 i;
@@ -10753,7 +10782,11 @@ static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
                }
 
                while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
-                       reset_status = hclge_get_reset_status(hdev, queue_gid);
+                       ret = hclge_get_reset_status(hdev, queue_gid,
+                                                    &reset_status);
+                       if (ret)
+                               return ret;
+
                        if (reset_status)
                                break;
 
@@ -11446,11 +11479,11 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
                struct hclge_vport *vport = &hdev->vport[i];
                int ret;
 
-                /* Send cmd to clear VF's FUNC_RST_ING */
+                /* Send cmd to clear vport's FUNC_RST_ING */
                ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
                if (ret)
                        dev_warn(&hdev->pdev->dev,
-                                "clear vf(%u) rst failed %d!\n",
+                                "clear vport(%u) rst failed %d!\n",
                                 vport->vport_id, ret);
        }
 }
index 2ce5302..65d78ee 100644 (file)
@@ -566,7 +566,7 @@ static int hclge_reset_vf(struct hclge_vport *vport)
        struct hclge_dev *hdev = vport->back;
 
        dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
-                vport->vport_id);
+                vport->vport_id - HCLGE_VF_VPORT_START_NUM);
 
        return hclge_func_reset_cmd(hdev, vport->vport_id);
 }
@@ -590,9 +590,17 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
                                     struct hclge_mbx_vf_to_pf_cmd *mbx_req,
                                     struct hclge_respond_to_vf_msg *resp_msg)
 {
+       struct hnae3_handle *handle = &vport->nic;
+       struct hclge_dev *hdev = vport->back;
        u16 queue_id, qid_in_pf;
 
        memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
+       if (queue_id >= handle->kinfo.num_tqps) {
+               dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
+                       queue_id, mbx_req->mbx_src_vfid);
+               return;
+       }
+
        qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
        memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
        resp_msg->len = sizeof(qid_in_pf);
index 78d5bf1..44618cc 100644 (file)
@@ -581,7 +581,7 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
                ret = hclge_cmd_send(&hdev->hw, &desc, 1);
                if (ret) {
                        dev_err(&hdev->pdev->dev,
-                               "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
+                               "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
                                vport->vport_id, shap_cfg_cmd->qs_id,
                                max_tx_rate, ret);
                        return ret;
index 82e7270..5fdac86 100644 (file)
@@ -816,40 +816,56 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
        return 0;
 }
 
+static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc,
+                                  u8 *hash_algo)
+{
+       switch (hfunc) {
+       case ETH_RSS_HASH_TOP:
+               *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
+               return 0;
+       case ETH_RSS_HASH_XOR:
+               *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
+               return 0;
+       case ETH_RSS_HASH_NO_CHANGE:
+               *hash_algo = hdev->rss_cfg.hash_algo;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
                           const u8 *key, const u8 hfunc)
 {
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
        struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+       u8 hash_algo;
        int ret, i;
 
        if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+               ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo);
+               if (ret)
+                       return ret;
+
                /* Set the RSS Hash Key if specififed by the user */
                if (key) {
-                       switch (hfunc) {
-                       case ETH_RSS_HASH_TOP:
-                               rss_cfg->hash_algo =
-                                       HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
-                               break;
-                       case ETH_RSS_HASH_XOR:
-                               rss_cfg->hash_algo =
-                                       HCLGEVF_RSS_HASH_ALGO_SIMPLE;
-                               break;
-                       case ETH_RSS_HASH_NO_CHANGE:
-                               break;
-                       default:
-                               return -EINVAL;
-                       }
-
-                       ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
-                                                      key);
-                       if (ret)
+                       ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key);
+                       if (ret) {
+                               dev_err(&hdev->pdev->dev,
+                                       "invalid hfunc type %u\n", hfunc);
                                return ret;
+                       }
 
                        /* Update the shadow RSS key with user specified qids */
                        memcpy(rss_cfg->rss_hash_key, key,
                               HCLGEVF_RSS_KEY_SIZE);
+               } else {
+                       ret = hclgevf_set_rss_algo_key(hdev, hash_algo,
+                                                      rss_cfg->rss_hash_key);
+                       if (ret)
+                               return ret;
                }
+               rss_cfg->hash_algo = hash_algo;
        }
 
        /* update the shadow RSS table with user specified qids */
@@ -2465,6 +2481,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
 
        hclgevf_enable_vector(&hdev->misc_vector, false);
        event_cause = hclgevf_check_evt_cause(hdev, &clearval);
+       if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
+               hclgevf_clear_event_cause(hdev, clearval);
 
        switch (event_cause) {
        case HCLGEVF_VECTOR0_EVENT_RST:
@@ -2477,10 +2495,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
                break;
        }
 
-       if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
-               hclgevf_clear_event_cause(hdev, clearval);
+       if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
                hclgevf_enable_vector(&hdev->misc_vector, true);
-       }
 
        return IRQ_HANDLED;
 }
index b8a4014..b482f6f 100644 (file)
@@ -1144,7 +1144,7 @@ static struct net_device * __init i82596_probe(void)
                        err = -ENODEV;
                        goto out;
                }
-               memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN);        /* YUCK! Get addr from NOVRAM */
+               memcpy(eth_addr, absolute_pointer(0xfffc1f2c), ETH_ALEN); /* YUCK! Get addr from NOVRAM */
                dev->base_addr = MVME_I596_BASE;
                dev->irq = (unsigned) MVME16x_IRQ_I596;
                goto found;
index a775c69..a4579b3 100644 (file)
@@ -4700,6 +4700,22 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
                return 0;
        }
 
+       if (adapter->failover_pending) {
+               adapter->init_done_rc = -EAGAIN;
+               netdev_dbg(netdev, "Failover pending, ignoring login response\n");
+               complete(&adapter->init_done);
+               /* login response buffer will be released on reset */
+               return 0;
+       }
+
+       if (adapter->failover_pending) {
+               adapter->init_done_rc = -EAGAIN;
+               netdev_dbg(netdev, "Failover pending, ignoring login response\n");
+               complete(&adapter->init_done);
+               /* login response buffer will be released on reset */
+               return 0;
+       }
+
        netdev->mtu = adapter->req_mtu - ETH_HLEN;
 
        netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
index b0b6f90..ed8ea63 100644 (file)
@@ -335,6 +335,7 @@ config IGC
        tristate "Intel(R) Ethernet Controller I225-LM/I225-V support"
        default n
        depends on PCI
+       depends on PTP_1588_CLOCK_OPTIONAL
        help
          This driver supports Intel(R) Ethernet Controller I225-LM/I225-V
          family of adapters.
index eadcb99..3c4f08d 100644 (file)
@@ -695,6 +695,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
 {
        if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
                set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+               set_bit(ICE_FLAG_AUX_ENA, pf->flags);
                ice_plug_aux_dev(pf);
        }
 }
@@ -707,5 +708,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
 {
        ice_unplug_aux_dev(pf);
        clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+       clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
 }
 #endif /* _ICE_H_ */
index 1f2afdf..adcc9a2 100644 (file)
@@ -271,6 +271,12 @@ int ice_plug_aux_dev(struct ice_pf *pf)
        struct auxiliary_device *adev;
        int ret;
 
+       /* if this PF doesn't support a technology that requires auxiliary
+        * devices, then gracefully exit
+        */
+       if (!ice_is_aux_ena(pf))
+               return 0;
+
        iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
        if (!iadev)
                return -ENOMEM;
index b877efa..0e19b4d 100644 (file)
@@ -6350,7 +6350,9 @@ static int igc_probe(struct pci_dev *pdev,
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
-       netdev->vlan_features |= netdev->features;
+       netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+       netdev->mpls_features |= NETIF_F_HW_CSUM;
+       netdev->hw_enc_features |= netdev->vlan_features;
 
        /* MTU range: 68 - 9216 */
        netdev->min_mtu = ETH_MIN_MTU;
index b5f68f6..7bb1f20 100644 (file)
@@ -186,6 +186,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
        int hash;
        int i;
 
+       if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
+               return -EEXIST;
+
        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
                struct flow_match_meta match;
 
index a2f61a8..8af7f28 100644 (file)
@@ -372,6 +372,9 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
        int nhoff = skb_network_offset(skb);
        int ret = 0;
 
+       if (skb->encapsulation)
+               return -EPROTONOSUPPORT;
+
        if (skb->protocol != htons(ETH_P_IP))
                return -EPROTONOSUPPORT;
 
@@ -1269,7 +1272,6 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work)
        if (!netif_carrier_ok(dev)) {
                if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
                        if (priv->port_state.link_state) {
-                               priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
                                netif_carrier_on(dev);
                                en_dbg(LINK, priv, "Link Up\n");
                        }
@@ -1557,26 +1559,36 @@ static void mlx4_en_service_task(struct work_struct *work)
        mutex_unlock(&mdev->state_lock);
 }
 
-static void mlx4_en_linkstate(struct work_struct *work)
+static void mlx4_en_linkstate(struct mlx4_en_priv *priv)
+{
+       struct mlx4_en_port_state *port_state = &priv->port_state;
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct net_device *dev = priv->dev;
+       bool up;
+
+       if (mlx4_en_QUERY_PORT(mdev, priv->port))
+               port_state->link_state = MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN;
+
+       up = port_state->link_state == MLX4_PORT_STATE_DEV_EVENT_PORT_UP;
+       if (up == netif_carrier_ok(dev))
+               netif_carrier_event(dev);
+       if (!up) {
+               en_info(priv, "Link Down\n");
+               netif_carrier_off(dev);
+       } else {
+               en_info(priv, "Link Up\n");
+               netif_carrier_on(dev);
+       }
+}
+
+static void mlx4_en_linkstate_work(struct work_struct *work)
 {
        struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
                                                 linkstate_task);
        struct mlx4_en_dev *mdev = priv->mdev;
-       int linkstate = priv->link_state;
 
        mutex_lock(&mdev->state_lock);
-       /* If observable port state changed set carrier state and
-        * report to system log */
-       if (priv->last_link_state != linkstate) {
-               if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
-                       en_info(priv, "Link Down\n");
-                       netif_carrier_off(priv->dev);
-               } else {
-                       en_info(priv, "Link Up\n");
-                       netif_carrier_on(priv->dev);
-               }
-       }
-       priv->last_link_state = linkstate;
+       mlx4_en_linkstate(priv);
        mutex_unlock(&mdev->state_lock);
 }
 
@@ -2079,9 +2091,11 @@ static int mlx4_en_open(struct net_device *dev)
        mlx4_en_clear_stats(dev);
 
        err = mlx4_en_start_port(dev);
-       if (err)
+       if (err) {
                en_err(priv, "Failed starting port:%d\n", priv->port);
-
+               goto out;
+       }
+       mlx4_en_linkstate(priv);
 out:
        mutex_unlock(&mdev->state_lock);
        return err;
@@ -3168,7 +3182,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        spin_lock_init(&priv->stats_lock);
        INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
        INIT_WORK(&priv->restart_task, mlx4_en_restart);
-       INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
+       INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate_work);
        INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
        INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
 #ifdef CONFIG_RFS_ACCEL
index f3d1a20..6bf558c 100644 (file)
@@ -552,7 +552,6 @@ struct mlx4_en_priv {
 
        struct mlx4_hwq_resources res;
        int link_state;
-       int last_link_state;
        bool port_up;
        int port;
        int registered;
index e84287f..dcf9f27 100644 (file)
@@ -658,11 +658,10 @@ static const struct devlink_param enable_rdma_param =
 
 static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
 {
-       struct mlx5_core_dev *dev = devlink_priv(devlink);
        union devlink_param_value value;
        int err;
 
-       if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
+       if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
                return 0;
 
        err = devlink_param_register(devlink, &enable_rdma_param);
@@ -679,9 +678,7 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
 
 static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink)
 {
-       struct mlx5_core_dev *dev = devlink_priv(devlink);
-
-       if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
+       if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
                return;
 
        devlink_param_unpublish(devlink, &enable_rdma_param);
index 3f8a980..f9cf9fb 100644 (file)
@@ -1007,7 +1007,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
        err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
        if (err) {
                mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
-               return err;
+               goto err_cancel_work;
        }
 
        err = mlx5_fw_tracer_create_mkey(tracer);
@@ -1031,6 +1031,7 @@ err_notifier_unregister:
        mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
 err_dealloc_pd:
        mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
+err_cancel_work:
        cancel_work_sync(&tracer->read_fw_strings_work);
        return err;
 }
index 669a75f..7b8c818 100644 (file)
@@ -922,7 +922,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work);
 
 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
-int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter);
 
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
                          u16 vid);
index 0c38c2e..b5ddaa8 100644 (file)
@@ -137,7 +137,7 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
        u16 vport_num, esw_owner_vhca_id;
        struct netlink_ext_ack *extack;
        int ifindex = upper->ifindex;
-       int err;
+       int err = 0;
 
        if (!netif_is_bridge_master(upper))
                return 0;
@@ -244,7 +244,7 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
        struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
        const struct switchdev_attr *attr = port_attr_info->attr;
        u16 vport_num, esw_owner_vhca_id;
-       int err;
+       int err = 0;
 
        if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
                                                             &esw_owner_vhca_id))
index 51a4d80..de03684 100644 (file)
@@ -300,9 +300,6 @@ mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
 {
        struct mlx5e_rep_indr_block_priv *cb_priv;
 
-       /* All callback list access should be protected by RTNL. */
-       ASSERT_RTNL();
-
        list_for_each_entry(cb_priv,
                            &rpriv->uplink_priv.tc_indr_block_priv_list,
                            list)
index bf0313e..13056cb 100644 (file)
@@ -572,7 +572,7 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
        if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
                u32 rqn;
 
-               if (mlx5e_channels_get_ptp_rqn(chs, &rqn))
+               if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
                        rqn = res->drop_rqn;
 
                err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
index 2cfd129..306fb5d 100644 (file)
@@ -1884,7 +1884,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
        return set_pflag_cqe_based_moder(netdev, enable, true);
 }
 
-int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val, bool rx_filter)
 {
        bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
        struct mlx5e_params new_params;
@@ -1896,8 +1896,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
        if (curr_val == new_val)
                return 0;
 
-       if (new_val && !priv->profile->rx_ptp_support &&
-           priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
+       if (new_val && !priv->profile->rx_ptp_support && rx_filter) {
                netdev_err(priv->netdev,
                           "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
                return -EINVAL;
@@ -1905,7 +1904,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
 
        new_params = priv->channels.params;
        MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
-       if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
+       if (rx_filter)
                new_params.ptp_rx = new_val;
 
        if (new_params.ptp_rx == priv->channels.params.ptp_rx)
@@ -1928,12 +1927,14 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
+       bool rx_filter;
        int err;
 
        if (!MLX5_CAP_GEN(mdev, cqe_compression))
                return -EOPNOTSUPP;
 
-       err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
+       rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE;
+       err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter);
        if (err)
                return err;
 
index 47efd85..3fd515e 100644 (file)
@@ -3554,14 +3554,14 @@ static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filte
 
        if (!rx_filter)
                /* Reset CQE compression to Admin default */
-               return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def);
+               return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false);
 
        if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
                return 0;
 
        /* Disable CQE compression */
        netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
-       err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+       err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true);
        if (err)
                netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
 
index 9fe8e3c..fe501ba 100644 (file)
@@ -1682,14 +1682,13 @@ static int build_match_list(struct match_list *match_head,
 
                curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
                if (!curr_match) {
+                       rcu_read_unlock();
                        free_match_list(match_head, ft_locked);
-                       err = -ENOMEM;
-                       goto out;
+                       return -ENOMEM;
                }
                curr_match->g = g;
                list_add_tail(&curr_match->list, &match_head->list);
        }
-out:
        rcu_read_unlock();
        return err;
 }
index 49ca57c..ca5690b 100644 (file)
@@ -927,9 +927,12 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
        struct mlx5_core_dev *dev1;
        struct mlx5_lag *ldev;
 
+       ldev = mlx5_lag_dev(dev);
+       if (!ldev)
+               return;
+
        mlx5_dev_list_lock();
 
-       ldev = mlx5_lag_dev(dev);
        dev0 = ldev->pf[MLX5_LAG_P1].dev;
        dev1 = ldev->pf[MLX5_LAG_P2].dev;
 
@@ -946,8 +949,11 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
 {
        struct mlx5_lag *ldev;
 
-       mlx5_dev_list_lock();
        ldev = mlx5_lag_dev(dev);
+       if (!ldev)
+               return;
+
+       mlx5_dev_list_lock();
        ldev->mode_changes_in_progress--;
        mlx5_dev_list_unlock();
        mlx5_queue_bond_work(ldev, 0);
index 3e85b17..6704f5c 100644 (file)
@@ -142,6 +142,13 @@ static int mlxbf_gige_open(struct net_device *netdev)
        err = mlxbf_gige_clean_port(priv);
        if (err)
                goto free_irqs;
+
+       /* Clear driver's valid_polarity to match hardware,
+        * since the above call to clean_port() resets the
+        * receive polarity used by hardware.
+        */
+       priv->valid_polarity = 0;
+
        err = mlxbf_gige_rx_init(priv);
        if (err)
                goto free_irqs;
index c1310ea..d5c485a 100644 (file)
@@ -398,9 +398,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
        int err;
        u16 i;
 
-       dma_buf = kzalloc(sizeof(*dma_buf) +
-                         q_depth * sizeof(struct hwc_work_request),
-                         GFP_KERNEL);
+       dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
        if (!dma_buf)
                return -ENOMEM;
 
index c581b95..559177e 100644 (file)
@@ -563,16 +563,6 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
        ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
                           DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
 
-       /* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of
-        * reset
-        */
-       ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(speed),
-                          DEV_CLOCK_CFG);
-
-       /* No PFC */
-       ocelot_write_gix(ocelot, ANA_PFC_PFC_CFG_FC_LINK_SPEED(speed),
-                        ANA_PFC_PFC_CFG, port);
-
        /* Core: Enable port for frame transfer */
        ocelot_fields_write(ocelot, port,
                            QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
@@ -1303,14 +1293,19 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
        return mask;
 }
 
-static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot,
+static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port,
                                      struct net_device *bridge)
 {
+       struct ocelot_port *ocelot_port = ocelot->ports[src_port];
        u32 mask = 0;
        int port;
 
+       if (!ocelot_port || ocelot_port->bridge != bridge ||
+           ocelot_port->stp_state != BR_STATE_FORWARDING)
+               return 0;
+
        for (port = 0; port < ocelot->num_phys_ports; port++) {
-               struct ocelot_port *ocelot_port = ocelot->ports[port];
+               ocelot_port = ocelot->ports[port];
 
                if (!ocelot_port)
                        continue;
@@ -1376,7 +1371,7 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
                        struct net_device *bridge = ocelot_port->bridge;
                        struct net_device *bond = ocelot_port->bond;
 
-                       mask = ocelot_get_bridge_fwd_mask(ocelot, bridge);
+                       mask = ocelot_get_bridge_fwd_mask(ocelot, port, bridge);
                        mask |= cpu_fwd_mask;
                        mask &= ~BIT(port);
                        if (bond) {
index edafbd3..b8737ef 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
-/* Copyright 2020-2021 NXP Semiconductors
+/* Copyright 2020-2021 NXP
  */
 #include <net/devlink.h>
 #include "ocelot.h"
index 08b481a..4b0941f 100644 (file)
@@ -2,7 +2,7 @@
 /* Microsemi Ocelot Switch driver
  *
  * Copyright (c) 2017, 2019 Microsemi Corporation
- * Copyright 2020-2021 NXP Semiconductors
+ * Copyright 2020-2021 NXP
  */
 
 #include <linux/if_bridge.h>
index c0c465a..e54b9fb 100644 (file)
@@ -5,7 +5,7 @@
  * mscc_ocelot_switch_lib.
  *
  * Copyright (c) 2017, 2019 Microsemi Corporation
- * Copyright 2020-2021 NXP Semiconductors
+ * Copyright 2020-2021 NXP
  */
 
 #include <linux/if_bridge.h>
index 556c349..64c0ef5 100644 (file)
@@ -1767,9 +1767,6 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
        struct nfp_flower_indr_block_cb_priv *cb_priv;
        struct nfp_flower_priv *priv = app->priv;
 
-       /* All callback list access should be protected by RTNL. */
-       ASSERT_RTNL();
-
        list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
                if (cb_priv->netdev == netdev)
                        return cb_priv;
index fc8b3e6..186d004 100644 (file)
@@ -1297,6 +1297,14 @@ qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
        prev_weight = weight;
 
        while (weight) {
+               /* If the HW device is during recovery, all resources are
+                * immediately reset without receiving a per-cid indication
+                * from HW. In this case we don't expect the cid_map to be
+                * cleared.
+                */
+               if (p_hwfn->cdev->recov_in_prog)
+                       return 0;
+
                msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
 
                weight = bitmap_weight(bmap->bitmap, bmap->max_count);
index 6e5a6cc..24cd415 100644 (file)
@@ -3367,6 +3367,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
                          struct qed_nvm_image_att *p_image_att)
 {
        enum nvm_image_type type;
+       int rc;
        u32 i;
 
        /* Translate image_id into MFW definitions */
@@ -3395,7 +3396,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
                return -EINVAL;
        }
 
-       qed_mcp_nvm_info_populate(p_hwfn);
+       rc = qed_mcp_nvm_info_populate(p_hwfn);
+       if (rc)
+               return rc;
+
        for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
                if (type == p_hwfn->nvm_info.image_att[i].image_type)
                        break;
index f16a157..cf5baa5 100644 (file)
@@ -77,6 +77,14 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
         * Beyond the added delay we clear the bitmap anyway.
         */
        while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+               /* If the HW device is during recovery, all resources are
+                * immediately reset without receiving a per-cid indication
+                * from HW. In this case we don't expect the cid bitmap to be
+                * cleared.
+                */
+               if (p_hwfn->cdev->recov_in_prog)
+                       return;
+
                msleep(100);
                if (wait_count++ > 20) {
                        DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
index 0a2f34f..27dffa2 100644 (file)
@@ -1354,10 +1354,10 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
        struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
        const struct firmware *fw = fw_info->fw;
        u32 dest, *p_cache, *temp;
-       int i, ret = -EIO;
        __le32 *temp_le;
        u8 data[16];
        size_t size;
+       int i, ret;
        u64 addr;
 
        temp = vzalloc(fw->size);
index 4b2eca5..01ef5ef 100644 (file)
 #define PHY_ST         0x8A    /* PHY status register */
 #define MAC_SM         0xAC    /* MAC status machine */
 #define  MAC_SM_RST    0x0002  /* MAC status machine reset */
+#define MD_CSC         0xb6    /* MDC speed control register */
+#define  MD_CSC_DEFAULT        0x0030
 #define MAC_ID         0xBE    /* Identifier register */
 
 #define TX_DCNT                0x80    /* TX descriptor count */
@@ -355,8 +357,9 @@ static void r6040_reset_mac(struct r6040_private *lp)
 {
        void __iomem *ioaddr = lp->base;
        int limit = MAC_DEF_TIMEOUT;
-       u16 cmd;
+       u16 cmd, md_csc;
 
+       md_csc = ioread16(ioaddr + MD_CSC);
        iowrite16(MAC_RST, ioaddr + MCR1);
        while (limit--) {
                cmd = ioread16(ioaddr + MCR1);
@@ -368,6 +371,10 @@ static void r6040_reset_mac(struct r6040_private *lp)
        iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
        iowrite16(0, ioaddr + MAC_SM);
        mdelay(5);
+
+       /* Restore MDIO clock frequency */
+       if (md_csc != MD_CSC_DEFAULT)
+               iowrite16(md_csc, ioaddr + MD_CSC);
 }
 
 static void r6040_init_mac_regs(struct net_device *dev)
index e5b0d79..3dbea02 100644 (file)
@@ -166,32 +166,46 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
         * We need a channel per event queue, plus a VI per tx queue.
         * This may be more pessimistic than it needs to be.
         */
-       if (n_channels + n_xdp_ev > max_channels) {
-               netif_err(efx, drv, efx->net_dev,
-                         "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
-                         n_xdp_ev, n_channels, max_channels);
-               netif_err(efx, drv, efx->net_dev,
-                         "XDP_TX and XDP_REDIRECT will not work on this interface");
-               efx->n_xdp_channels = 0;
-               efx->xdp_tx_per_channel = 0;
-               efx->xdp_tx_queue_count = 0;
+       if (n_channels >= max_channels) {
+               efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+               netif_warn(efx, drv, efx->net_dev,
+                          "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+                          n_xdp_ev, n_channels, max_channels);
+               netif_warn(efx, drv, efx->net_dev,
+                          "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
        } else if (n_channels + n_xdp_tx > efx->max_vis) {
-               netif_err(efx, drv, efx->net_dev,
-                         "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
-                         n_xdp_tx, n_channels, efx->max_vis);
-               netif_err(efx, drv, efx->net_dev,
-                         "XDP_TX and XDP_REDIRECT will not work on this interface");
-               efx->n_xdp_channels = 0;
-               efx->xdp_tx_per_channel = 0;
-               efx->xdp_tx_queue_count = 0;
+               efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+               netif_warn(efx, drv, efx->net_dev,
+                          "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
+                          n_xdp_tx, n_channels, efx->max_vis);
+               netif_warn(efx, drv, efx->net_dev,
+                          "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
+       } else if (n_channels + n_xdp_ev > max_channels) {
+               efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED;
+               netif_warn(efx, drv, efx->net_dev,
+                          "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+                          n_xdp_ev, n_channels, max_channels);
+
+               n_xdp_ev = max_channels - n_channels;
+               netif_warn(efx, drv, efx->net_dev,
+                          "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
+                          DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev));
        } else {
+               efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED;
+       }
+
+       if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) {
                efx->n_xdp_channels = n_xdp_ev;
                efx->xdp_tx_per_channel = tx_per_ev;
                efx->xdp_tx_queue_count = n_xdp_tx;
                n_channels += n_xdp_ev;
                netif_dbg(efx, drv, efx->net_dev,
                          "Allocating %d TX and %d event queues for XDP\n",
-                         n_xdp_tx, n_xdp_ev);
+                         n_xdp_ev * tx_per_ev, n_xdp_ev);
+       } else {
+               efx->n_xdp_channels = 0;
+               efx->xdp_tx_per_channel = 0;
+               efx->xdp_tx_queue_count = n_xdp_tx;
        }
 
        if (vec_count < n_channels) {
@@ -858,6 +872,20 @@ rollback:
        goto out;
 }
 
+static inline int
+efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
+                    struct efx_tx_queue *tx_queue)
+{
+       if (xdp_queue_number >= efx->xdp_tx_queue_count)
+               return -EINVAL;
+
+       netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
+                 tx_queue->channel->channel, tx_queue->label,
+                 xdp_queue_number, tx_queue->queue);
+       efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+       return 0;
+}
+
 int efx_set_channels(struct efx_nic *efx)
 {
        struct efx_tx_queue *tx_queue;
@@ -896,20 +924,9 @@ int efx_set_channels(struct efx_nic *efx)
                        if (efx_channel_is_xdp_tx(channel)) {
                                efx_for_each_channel_tx_queue(tx_queue, channel) {
                                        tx_queue->queue = next_queue++;
-
-                                       /* We may have a few left-over XDP TX
-                                        * queues owing to xdp_tx_queue_count
-                                        * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
-                                        * We still allocate and probe those
-                                        * TXQs, but never use them.
-                                        */
-                                       if (xdp_queue_number < efx->xdp_tx_queue_count) {
-                                               netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
-                                                         channel->channel, tx_queue->label,
-                                                         xdp_queue_number, tx_queue->queue);
-                                               efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+                                       rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+                                       if (rc == 0)
                                                xdp_queue_number++;
-                                       }
                                }
                        } else {
                                efx_for_each_channel_tx_queue(tx_queue, channel) {
@@ -918,10 +935,35 @@ int efx_set_channels(struct efx_nic *efx)
                                                  channel->channel, tx_queue->label,
                                                  tx_queue->queue);
                                }
+
+                               /* If XDP is borrowing queues from net stack, it must use the queue
+                                * with no csum offload, which is the first one of the channel
+                                * (note: channel->tx_queue_by_type is not initialized yet)
+                                */
+                               if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+                                       tx_queue = &channel->tx_queue[0];
+                                       rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+                                       if (rc == 0)
+                                               xdp_queue_number++;
+                               }
                        }
                }
        }
-       WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
+       WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
+               xdp_queue_number != efx->xdp_tx_queue_count);
+       WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
+               xdp_queue_number > efx->xdp_tx_queue_count);
+
+       /* If we have more CPUs than assigned XDP TX queues, assign the already
+        * existing queues to the exceeding CPUs
+        */
+       next_queue = 0;
+       while (xdp_queue_number < efx->xdp_tx_queue_count) {
+               tx_queue = efx->xdp_tx_queues[next_queue++];
+               rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+               if (rc == 0)
+                       xdp_queue_number++;
+       }
 
        rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
        if (rc)
index 9b4b257..f698181 100644 (file)
@@ -782,6 +782,12 @@ struct efx_async_filter_insertion {
 #define EFX_RPS_MAX_IN_FLIGHT  8
 #endif /* CONFIG_RFS_ACCEL */
 
+enum efx_xdp_tx_queues_mode {
+       EFX_XDP_TX_QUEUES_DEDICATED,    /* one queue per core, locking not needed */
+       EFX_XDP_TX_QUEUES_SHARED,       /* each queue used by more than 1 core */
+       EFX_XDP_TX_QUEUES_BORROWED      /* queues borrowed from net stack */
+};
+
 /**
  * struct efx_nic - an Efx NIC
  * @name: Device name (net device name or bus id before net device registered)
@@ -820,6 +826,7 @@ struct efx_async_filter_insertion {
  *     should be allocated for this NIC
  * @xdp_tx_queue_count: Number of entries in %xdp_tx_queues.
  * @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit.
+ * @xdp_txq_queues_mode: XDP TX queues sharing strategy.
  * @rxq_entries: Size of receive queues requested by user.
  * @txq_entries: Size of transmit queues requested by user.
  * @txq_stop_thresh: TX queue fill level at or above which we stop it.
@@ -979,6 +986,7 @@ struct efx_nic {
 
        unsigned int xdp_tx_queue_count;
        struct efx_tx_queue **xdp_tx_queues;
+       enum efx_xdp_tx_queues_mode xdp_txq_queues_mode;
 
        unsigned rxq_entries;
        unsigned txq_entries;
index 0c6650d..d16e031 100644 (file)
@@ -428,23 +428,32 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
        unsigned int len;
        int space;
        int cpu;
-       int i;
+       int i = 0;
 
-       cpu = raw_smp_processor_id();
+       if (unlikely(n && !xdpfs))
+               return -EINVAL;
+       if (unlikely(!n))
+               return 0;
 
-       if (!efx->xdp_tx_queue_count ||
-           unlikely(cpu >= efx->xdp_tx_queue_count))
+       cpu = raw_smp_processor_id();
+       if (unlikely(cpu >= efx->xdp_tx_queue_count))
                return -EINVAL;
 
        tx_queue = efx->xdp_tx_queues[cpu];
        if (unlikely(!tx_queue))
                return -EINVAL;
 
-       if (unlikely(n && !xdpfs))
-               return -EINVAL;
+       if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+               HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
 
-       if (!n)
-               return 0;
+       /* If we're borrowing net stack queues we have to handle stop-restart
+        * or we might block the queue and it will be considered as frozen
+        */
+       if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+               if (netif_tx_queue_stopped(tx_queue->core_txq))
+                       goto unlock;
+               efx_tx_maybe_stop_queue(tx_queue);
+       }
 
        /* Check for available space. We should never need multiple
         * descriptors per frame.
@@ -484,6 +493,10 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
        if (flush && i > 0)
                efx_nic_push_buffers(tx_queue);
 
+unlock:
+       if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+               HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq);
+
        return i == 0 ? -EIO : i;
 }
 
index ece02b3..553c440 100644 (file)
@@ -309,7 +309,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
                        priv->clk_csr = STMMAC_CSR_100_150M;
                else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
                        priv->clk_csr = STMMAC_CSR_150_250M;
-               else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
+               else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
                        priv->clk_csr = STMMAC_CSR_250_300M;
        }
 
@@ -7118,7 +7118,6 @@ int stmmac_suspend(struct device *dev)
        struct net_device *ndev = dev_get_drvdata(dev);
        struct stmmac_priv *priv = netdev_priv(ndev);
        u32 chan;
-       int ret;
 
        if (!ndev || !netif_running(ndev))
                return 0;
@@ -7150,13 +7149,6 @@ int stmmac_suspend(struct device *dev)
        } else {
                stmmac_mac_set(priv, priv->ioaddr, false);
                pinctrl_pm_select_sleep_state(priv->device);
-               /* Disable clock in case of PWM is off */
-               clk_disable_unprepare(priv->plat->clk_ptp_ref);
-               ret = pm_runtime_force_suspend(dev);
-               if (ret) {
-                       mutex_unlock(&priv->lock);
-                       return ret;
-               }
        }
 
        mutex_unlock(&priv->lock);
@@ -7242,12 +7234,6 @@ int stmmac_resume(struct device *dev)
                priv->irq_wake = 0;
        } else {
                pinctrl_pm_select_default_state(priv->device);
-               /* enable the clk previously disabled */
-               ret = pm_runtime_force_resume(dev);
-               if (ret)
-                       return ret;
-               if (priv->plat->clk_ptp_ref)
-                       clk_prepare_enable(priv->plat->clk_ptp_ref);
                /* reset the phy so that it's ready */
                if (priv->mii)
                        stmmac_mdio_reset(priv->mii);
index 5ca7108..62cec9b 100644 (file)
@@ -9,6 +9,7 @@
 *******************************************************************************/
 
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/module.h>
 #include <linux/io.h>
 #include <linux/of.h>
@@ -771,9 +772,52 @@ static int __maybe_unused stmmac_runtime_resume(struct device *dev)
        return stmmac_bus_clks_config(priv, true);
 }
 
+static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+       int ret;
+
+       if (!netif_running(ndev))
+               return 0;
+
+       if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+               /* Disable clock in case of PWM is off */
+               clk_disable_unprepare(priv->plat->clk_ptp_ref);
+
+               ret = pm_runtime_force_suspend(dev);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+       int ret;
+
+       if (!netif_running(ndev))
+               return 0;
+
+       if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+               /* enable the clk previously disabled */
+               ret = pm_runtime_force_resume(dev);
+               if (ret)
+                       return ret;
+
+               clk_prepare_enable(priv->plat->clk_ptp_ref);
+       }
+
+       return 0;
+}
+
 const struct dev_pm_ops stmmac_pltfr_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume)
        SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL)
+       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume)
 };
 EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
 
index 8fe8887..6192244 100644 (file)
@@ -68,9 +68,9 @@
 #define SIXP_DAMA_OFF          0
 
 /* default level 2 parameters */
-#define SIXP_TXDELAY                   (HZ/4)  /* in 1 s */
+#define SIXP_TXDELAY                   25      /* 250 ms */
 #define SIXP_PERSIST                   50      /* in 256ths */
-#define SIXP_SLOTTIME                  (HZ/10) /* in 1 s */
+#define SIXP_SLOTTIME                  10      /* 100 ms */
 #define SIXP_INIT_RESYNC_TIMEOUT       (3*HZ/2) /* in 1 s */
 #define SIXP_RESYNC_TIMEOUT            5*HZ    /* in 1 s */
 
index b50b7fa..f4c3efc 100644 (file)
@@ -973,7 +973,7 @@ static inline void tx_on(struct scc_priv *priv)
                flags = claim_dma_lock();
                set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
                set_dma_addr(priv->param.dma,
-                            (int) priv->tx_buf[priv->tx_tail] + n);
+                            virt_to_bus(priv->tx_buf[priv->tx_tail]) + n);
                set_dma_count(priv->param.dma,
                              priv->tx_len[priv->tx_tail] - n);
                release_dma_lock(flags);
@@ -1020,7 +1020,7 @@ static inline void rx_on(struct scc_priv *priv)
                flags = claim_dma_lock();
                set_dma_mode(priv->param.dma, DMA_MODE_READ);
                set_dma_addr(priv->param.dma,
-                            (int) priv->rx_buf[priv->rx_head]);
+                            virt_to_bus(priv->rx_buf[priv->rx_head]));
                set_dma_count(priv->param.dma, BUF_SIZE);
                release_dma_lock(flags);
                enable_dma(priv->param.dma);
@@ -1233,7 +1233,7 @@ static void special_condition(struct scc_priv *priv, int rc)
                if (priv->param.dma >= 0) {
                        flags = claim_dma_lock();
                        set_dma_addr(priv->param.dma,
-                                    (int) priv->rx_buf[priv->rx_head]);
+                                    virt_to_bus(priv->rx_buf[priv->rx_head]));
                        set_dma_count(priv->param.dma, BUF_SIZE);
                        release_dma_lock(flags);
                } else {
index 2324e1b..1da334f 100644 (file)
@@ -430,7 +430,8 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
         * table region determines the number of entries it has.
         */
        if (filter) {
-               count = hweight32(ipa->filter_map);
+               /* Include one extra "slot" to hold the filter map itself */
+               count = 1 + hweight32(ipa->filter_map);
                hash_count = hash_mem->size ? count : 0;
        } else {
                count = mem->size / sizeof(__le64);
index 984c9f7..d16fc58 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright 2021 NXP Semiconductors
+/* Copyright 2021 NXP
  */
 #include <linux/pcs/pcs-xpcs.h>
 #include "pcs-xpcs.h"
index 21aa24c..daae7fa 100644 (file)
@@ -5,7 +5,7 @@
 #ifndef HAVE_DP83640_REGISTERS
 #define HAVE_DP83640_REGISTERS
 
-#define PAGE0                     0x0000
+/* #define PAGE0                  0x0000 */
 #define PHYCR2                    0x001c /* PHY Control Register 2 */
 
 #define PAGE4                     0x0004
index c94cb53..250742f 100644 (file)
@@ -179,6 +179,16 @@ static int mdio_remove(struct device *dev)
        return 0;
 }
 
+static void mdio_shutdown(struct device *dev)
+{
+       struct mdio_device *mdiodev = to_mdio_device(dev);
+       struct device_driver *drv = mdiodev->dev.driver;
+       struct mdio_driver *mdiodrv = to_mdio_driver(drv);
+
+       if (mdiodrv->shutdown)
+               mdiodrv->shutdown(mdiodev);
+}
+
 /**
  * mdio_driver_register - register an mdio_driver with the MDIO layer
  * @drv: new mdio_driver to register
@@ -193,6 +203,7 @@ int mdio_driver_register(struct mdio_driver *drv)
        mdiodrv->driver.bus = &mdio_bus_type;
        mdiodrv->driver.probe = mdio_probe;
        mdiodrv->driver.remove = mdio_remove;
+       mdiodrv->driver.shutdown = mdio_shutdown;
 
        retval = driver_register(&mdiodrv->driver);
        if (retval) {
index 9e2891d..ba5ad86 100644 (file)
@@ -233,9 +233,11 @@ static DEFINE_MUTEX(phy_fixup_lock);
 
 static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
 {
+       struct device_driver *drv = phydev->mdio.dev.driver;
+       struct phy_driver *phydrv = to_phy_driver(drv);
        struct net_device *netdev = phydev->attached_dev;
 
-       if (!phydev->drv->suspend)
+       if (!drv || !phydrv->suspend)
                return false;
 
        /* PHY not attached? May suspend if the PHY has not already been
index a1464b7..0a0abe8 100644 (file)
@@ -1607,6 +1607,32 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
        if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
                return -EINVAL;
 
+       /* If this link is with an SFP, ensure that changes to advertised modes
+        * also cause the associated interface to be selected such that the
+        * link can be configured correctly.
+        */
+       if (pl->sfp_port && pl->sfp_bus) {
+               config.interface = sfp_select_interface(pl->sfp_bus,
+                                                       config.advertising);
+               if (config.interface == PHY_INTERFACE_MODE_NA) {
+                       phylink_err(pl,
+                                   "selection of interface failed, advertisement %*pb\n",
+                                   __ETHTOOL_LINK_MODE_MASK_NBITS,
+                                   config.advertising);
+                       return -EINVAL;
+               }
+
+               /* Revalidate with the selected interface */
+               linkmode_copy(support, pl->supported);
+               if (phylink_validate(pl, support, &config)) {
+                       phylink_err(pl, "validation of %s/%s with support %*pb failed\n",
+                                   phylink_an_mode_str(pl->cur_link_an_mode),
+                                   phy_modes(config.interface),
+                                   __ETHTOOL_LINK_MODE_MASK_NBITS, support);
+                       return -EINVAL;
+               }
+       }
+
        mutex_lock(&pl->state_mutex);
        pl->link_config.speed = config.speed;
        pl->link_config.duplex = config.duplex;
@@ -2186,7 +2212,9 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
        if (phy_interface_mode_is_8023z(iface) && pl->phydev)
                return -EINVAL;
 
-       changed = !linkmode_equal(pl->supported, support);
+       changed = !linkmode_equal(pl->supported, support) ||
+                 !linkmode_equal(pl->link_config.advertising,
+                                 config.advertising);
        if (changed) {
                linkmode_copy(pl->supported, support);
                linkmode_copy(pl->link_config.advertising, config.advertising);
index a57251b..f97813a 100644 (file)
@@ -2719,14 +2719,14 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
 
        serial = kzalloc(sizeof(*serial), GFP_KERNEL);
        if (!serial)
-               goto exit;
+               goto err_free_dev;
 
        hso_dev->port_data.dev_serial = serial;
        serial->parent = hso_dev;
 
        if (hso_serial_common_create
            (serial, 1, CTRL_URB_RX_SIZE, CTRL_URB_TX_SIZE))
-               goto exit;
+               goto err_free_serial;
 
        serial->tx_data_length--;
        serial->write_data = hso_mux_serial_write_data;
@@ -2742,11 +2742,9 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
        /* done, return it */
        return hso_dev;
 
-exit:
-       if (serial) {
-               tty_unregister_device(tty_drv, serial->minor);
-               kfree(serial);
-       }
+err_free_serial:
+       kfree(serial);
+err_free_dev:
        kfree(hso_dev);
        return NULL;
 
index 271d38c..79bd258 100644 (file)
@@ -423,6 +423,10 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
 
                skb_reserve(skb, p - buf);
                skb_put(skb, len);
+
+               page = (struct page *)page->private;
+               if (page)
+                       give_pages(rq, page);
                goto ok;
        }
 
index 5a8df5a..141635a 100644 (file)
@@ -4756,12 +4756,12 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
        LIST_HEAD(list);
        unsigned int h;
 
-       rtnl_lock();
        list_for_each_entry(net, net_list, exit_list) {
                struct vxlan_net *vn = net_generic(net, vxlan_net_id);
 
                unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
        }
+       rtnl_lock();
        list_for_each_entry(net, net_list, exit_list)
                vxlan_destroy_tunnels(net, &list);
 
index f6b92ef..480bcd1 100644 (file)
@@ -34,6 +34,8 @@ obj-$(CONFIG_SLIC_DS26522)    += slic_ds26522.o
 clean-files := wanxlfw.inc
 $(obj)/wanxl.o:        $(obj)/wanxlfw.inc
 
+CROSS_COMPILE_M68K = m68k-linux-gnu-
+
 ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y)
 ifeq ($(ARCH),m68k)
   M68KCC = $(CC)
index 39a01c2..32d5bc4 100644 (file)
@@ -499,7 +499,7 @@ check_frags:
                                 * the header's copy failed, and they are
                                 * sharing a slot, send an error
                                 */
-                               if (i == 0 && sharedslot)
+                               if (i == 0 && !first_shinfo && sharedslot)
                                        xenvif_idx_release(queue, pending_idx,
                                                           XEN_NETIF_RSP_ERROR);
                                else
index a620c34..0875b77 100644 (file)
@@ -278,6 +278,7 @@ static int st_nci_spi_remove(struct spi_device *dev)
 
 static struct spi_device_id st_nci_spi_id_table[] = {
        {ST_NCI_SPI_DRIVER_NAME, 0},
+       {"st21nfcb-spi", 0},
        {}
 };
 MODULE_DEVICE_TABLE(spi, st_nci_spi_id_table);
index 7efb31b..e486845 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/backing-dev.h>
-#include <linux/list_sort.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/pr.h>
@@ -3524,7 +3523,9 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
        lockdep_assert_held(&subsys->lock);
 
        list_for_each_entry(h, &subsys->nsheads, entry) {
-               if (h->ns_id == nsid && nvme_tryget_ns_head(h))
+               if (h->ns_id != nsid)
+                       continue;
+               if (!list_empty(&h->list) && nvme_tryget_ns_head(h))
                        return h;
        }
 
@@ -3714,15 +3715,6 @@ out_unlock:
        return ret;
 }
 
-static int ns_cmp(void *priv, const struct list_head *a,
-               const struct list_head *b)
-{
-       struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
-       struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
-
-       return nsa->head->ns_id - nsb->head->ns_id;
-}
-
 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 {
        struct nvme_ns *ns, *ret = NULL;
@@ -3743,6 +3735,22 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 }
 EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
 
+/*
+ * Add the namespace to the controller list while keeping the list ordered.
+ */
+static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
+{
+       struct nvme_ns *tmp;
+
+       list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
+               if (tmp->head->ns_id < ns->head->ns_id) {
+                       list_add(&ns->list, &tmp->list);
+                       return;
+               }
+       }
+       list_add(&ns->list, &ns->ctrl->namespaces);
+}
+
 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
                struct nvme_ns_ids *ids)
 {
@@ -3793,9 +3801,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
                goto out_unlink_ns;
 
        down_write(&ctrl->namespaces_rwsem);
-       list_add_tail(&ns->list, &ctrl->namespaces);
+       nvme_ns_add_to_ctrl_list(ns);
        up_write(&ctrl->namespaces_rwsem);
-
        nvme_get_ctrl(ctrl);
 
        if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
@@ -3843,6 +3850,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 
        mutex_lock(&ns->ctrl->subsys->lock);
        list_del_rcu(&ns->siblings);
+       if (list_empty(&ns->head->list)) {
+               list_del_init(&ns->head->entry);
+               last_path = true;
+       }
        mutex_unlock(&ns->ctrl->subsys->lock);
 
        /* guarantee not available in head->list */
@@ -3856,20 +3867,11 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                nvme_cdev_del(&ns->cdev, &ns->cdev_device);
        del_gendisk(ns->disk);
        blk_cleanup_queue(ns->queue);
-       if (blk_get_integrity(ns->disk))
-               blk_integrity_unregister(ns->disk);
 
        down_write(&ns->ctrl->namespaces_rwsem);
        list_del_init(&ns->list);
        up_write(&ns->ctrl->namespaces_rwsem);
 
-       /* Synchronize with nvme_init_ns_head() */
-       mutex_lock(&ns->head->subsys->lock);
-       if (list_empty(&ns->head->list)) {
-               list_del_init(&ns->head->entry);
-               last_path = true;
-       }
-       mutex_unlock(&ns->head->subsys->lock);
        if (last_path)
                nvme_mpath_shutdown_disk(ns->head);
        nvme_put_ns(ns);
@@ -4083,10 +4085,6 @@ static void nvme_scan_work(struct work_struct *work)
        if (nvme_scan_ns_list(ctrl) != 0)
                nvme_scan_ns_sequential(ctrl);
        mutex_unlock(&ctrl->scan_lock);
-
-       down_write(&ctrl->namespaces_rwsem);
-       list_sort(NULL, &ctrl->namespaces, ns_cmp);
-       up_write(&ctrl->namespaces_rwsem);
 }
 
 /*
index b08a61c..aa14ad9 100644 (file)
@@ -2487,6 +2487,7 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
         */
        if (ctrl->ctrl.queue_count > 1) {
                nvme_stop_queues(&ctrl->ctrl);
+               nvme_sync_io_queues(&ctrl->ctrl);
                blk_mq_tagset_busy_iter(&ctrl->tag_set,
                                nvme_fc_terminate_exchange, &ctrl->ctrl);
                blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
@@ -2510,6 +2511,7 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
         * clean up the admin queue. Same thing as above.
         */
        blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       blk_sync_queue(ctrl->ctrl.admin_q);
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_fc_terminate_exchange, &ctrl->ctrl);
        blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
@@ -2951,6 +2953,13 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
        if (ctrl->ctrl.queue_count == 1)
                return 0;
 
+       if (prior_ioq_cnt != nr_io_queues) {
+               dev_info(ctrl->ctrl.device,
+                       "reconnect: revising io queue count from %d to %d\n",
+                       prior_ioq_cnt, nr_io_queues);
+               blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
+       }
+
        ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
        if (ret)
                goto out_free_io_queues;
@@ -2959,15 +2968,6 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
        if (ret)
                goto out_delete_hw_queues;
 
-       if (prior_ioq_cnt != nr_io_queues) {
-               dev_info(ctrl->ctrl.device,
-                       "reconnect: revising io queue count from %d to %d\n",
-                       prior_ioq_cnt, nr_io_queues);
-               nvme_wait_freeze(&ctrl->ctrl);
-               blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
-               nvme_unfreeze(&ctrl->ctrl);
-       }
-
        return 0;
 
 out_delete_hw_queues:
index 5d7bc58..e8ccdd3 100644 (file)
@@ -600,14 +600,17 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
 
        down_read(&ctrl->namespaces_rwsem);
        list_for_each_entry(ns, &ctrl->namespaces, list) {
-               unsigned nsid = le32_to_cpu(desc->nsids[n]);
-
+               unsigned nsid;
+again:
+               nsid = le32_to_cpu(desc->nsids[n]);
                if (ns->head->ns_id < nsid)
                        continue;
                if (ns->head->ns_id == nsid)
                        nvme_update_ns_ana_state(desc, ns);
                if (++n == nr_nsids)
                        break;
+               if (ns->head->ns_id > nsid)
+                       goto again;
        }
        up_read(&ctrl->namespaces_rwsem);
        return 0;
index a68704e..042c594 100644 (file)
@@ -656,8 +656,8 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
        if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
                return;
 
-       nvme_rdma_destroy_queue_ib(queue);
        rdma_destroy_id(queue->cm_id);
+       nvme_rdma_destroy_queue_ib(queue);
        mutex_destroy(&queue->queue_lock);
 }
 
@@ -1815,14 +1815,10 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
        for (i = 0; i < queue->queue_size; i++) {
                ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
                if (ret)
-                       goto out_destroy_queue_ib;
+                       return ret;
        }
 
        return 0;
-
-out_destroy_queue_ib:
-       nvme_rdma_destroy_queue_ib(queue);
-       return ret;
 }
 
 static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
@@ -1916,14 +1912,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
        if (ret) {
                dev_err(ctrl->ctrl.device,
                        "rdma_connect_locked failed (%d).\n", ret);
-               goto out_destroy_queue_ib;
+               return ret;
        }
 
        return 0;
-
-out_destroy_queue_ib:
-       nvme_rdma_destroy_queue_ib(queue);
-       return ret;
 }
 
 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
@@ -1954,8 +1946,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
        case RDMA_CM_EVENT_ROUTE_ERROR:
        case RDMA_CM_EVENT_CONNECT_ERROR:
        case RDMA_CM_EVENT_UNREACHABLE:
-               nvme_rdma_destroy_queue_ib(queue);
-               fallthrough;
        case RDMA_CM_EVENT_ADDR_ERROR:
                dev_dbg(queue->ctrl->ctrl.device,
                        "CM error event %d\n", ev->event);
index e2ab12f..3c1c29d 100644 (file)
@@ -274,6 +274,12 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
        } while (ret > 0);
 }
 
+static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+{
+       return !list_empty(&queue->send_list) ||
+               !llist_empty(&queue->req_list) || queue->more_requests;
+}
+
 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
                bool sync, bool last)
 {
@@ -294,9 +300,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
                nvme_tcp_send_all(queue);
                queue->more_requests = false;
                mutex_unlock(&queue->send_mutex);
-       } else if (last) {
-               queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
        }
+
+       if (last && nvme_tcp_queue_more(queue))
+               queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
 }
 
 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
@@ -613,7 +620,7 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
                cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
        data->ttag = pdu->ttag;
        data->command_id = nvme_cid(rq);
-       data->data_offset = cpu_to_le32(req->data_sent);
+       data->data_offset = pdu->r2t_offset;
        data->data_length = cpu_to_le32(req->pdu_len);
        return 0;
 }
@@ -906,12 +913,6 @@ done:
        read_unlock_bh(&sk->sk_callback_lock);
 }
 
-static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
-{
-       return !list_empty(&queue->send_list) ||
-               !llist_empty(&queue->req_list) || queue->more_requests;
-}
-
 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
 {
        queue->request = NULL;
@@ -952,7 +953,15 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
                        nvme_tcp_ddgst_update(queue->snd_hash, page,
                                        offset, ret);
 
-               /* fully successful last write*/
+               /*
+                * update the request iterator except for the last payload send
+                * in the request where we don't want to modify it as we may
+                * compete with the RX path completing the request.
+                */
+               if (req->data_sent + ret < req->data_len)
+                       nvme_tcp_advance_req(req, ret);
+
+               /* fully successful last send in current PDU */
                if (last && ret == len) {
                        if (queue->data_digest) {
                                nvme_tcp_ddgst_final(queue->snd_hash,
@@ -964,7 +973,6 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
                        }
                        return 1;
                }
-               nvme_tcp_advance_req(req, ret);
        }
        return -EAGAIN;
 }
@@ -1145,8 +1153,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
                                pending = true;
                        else if (unlikely(result < 0))
                                break;
-               } else
-                       pending = !llist_empty(&queue->req_list);
+               }
 
                result = nvme_tcp_try_recv(queue);
                if (result > 0)
index d784f3c..be5d824 100644 (file)
@@ -1067,7 +1067,7 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
 {
        struct nvmet_subsys *subsys = to_subsys(item);
 
-       return snprintf(page, PAGE_SIZE, "%*s\n",
+       return snprintf(page, PAGE_SIZE, "%.*s\n",
                        NVMET_SN_MAX_SIZE, subsys->serial);
 }
 
index 39854d4..da41461 100644 (file)
@@ -109,6 +109,7 @@ config MTK_EFUSE
 
 config NVMEM_NINTENDO_OTP
        tristate "Nintendo Wii and Wii U OTP Support"
+       depends on WII || COMPILE_TEST
        help
          This is a driver exposing the OTP of a Nintendo Wii or Wii U console.
 
index 5b043ee..b0800c2 100644 (file)
@@ -85,7 +85,11 @@ of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
                        break;
        }
 
-       if (i != count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
+       /*
+        * Attempt to initialize a restricted-dma-pool region if one was found.
+        * Note that count can hold a negative error code.
+        */
+       if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
                dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
 }
 
index 3fd74bb..a348348 100644 (file)
@@ -1291,7 +1291,6 @@ DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells")
 DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
 DEFINE_SIMPLE_PROP(leds, "leds", NULL)
 DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
-DEFINE_SIMPLE_PROP(phy_handle, "phy-handle", NULL)
 DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
 DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
 
@@ -1380,7 +1379,6 @@ static const struct supplier_bindings of_supplier_bindings[] = {
        { .parse_prop = parse_resets, },
        { .parse_prop = parse_leds, },
        { .parse_prop = parse_backlight, },
-       { .parse_prop = parse_phy_handle, },
        { .parse_prop = parse_gpio_compat, },
        { .parse_prop = parse_interrupts, },
        { .parse_prop = parse_regulators, },
index 0c473d7..43e615a 100644 (file)
@@ -110,7 +110,7 @@ config PCI_PF_STUB
 
 config XEN_PCIDEV_FRONTEND
        tristate "Xen PCI Frontend"
-       depends on X86 && XEN
+       depends on XEN_PV
        select PCI_XEN
        select XEN_XENBUS_FRONTEND
        default y
index a1b1e2a..0f40943 100644 (file)
@@ -937,7 +937,7 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev);
 
 void pci_set_acpi_fwnode(struct pci_dev *dev)
 {
-       if (!ACPI_COMPANION(&dev->dev) && !pci_dev_is_added(dev))
+       if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev))
                ACPI_COMPANION_SET(&dev->dev,
                                   acpi_pci_find_companion(&dev->dev));
 }
index e5089af..4537d1e 100644 (file)
@@ -5435,7 +5435,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
                              PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
 
 /*
- * Create device link for NVIDIA GPU with integrated USB xHCI Host
+ * Create device link for GPUs with integrated USB xHCI Host
  * controller to VGA.
  */
 static void quirk_gpu_usb(struct pci_dev *usb)
@@ -5444,9 +5444,11 @@ static void quirk_gpu_usb(struct pci_dev *usb)
 }
 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
                              PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+                             PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
 
 /*
- * Create device link for NVIDIA GPU with integrated Type-C UCSI controller
+ * Create device link for GPUs with integrated Type-C UCSI controller
  * to VGA. Currently there is no class code defined for UCSI device over PCI
  * so using UNKNOWN class for now and it will be updated when UCSI
  * over PCI gets a class code.
@@ -5459,6 +5461,9 @@ static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
                              PCI_CLASS_SERIAL_UNKNOWN, 8,
                              quirk_gpu_usb_typec_ucsi);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+                             PCI_CLASS_SERIAL_UNKNOWN, 8,
+                             quirk_gpu_usb_typec_ucsi);
 
 /*
  * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it
index 25557b2..4be2489 100644 (file)
@@ -99,6 +99,24 @@ error:
        return off ?: PCI_VPD_SZ_INVALID;
 }
 
+static bool pci_vpd_available(struct pci_dev *dev)
+{
+       struct pci_vpd *vpd = &dev->vpd;
+
+       if (!vpd->cap)
+               return false;
+
+       if (vpd->len == 0) {
+               vpd->len = pci_vpd_size(dev);
+               if (vpd->len == PCI_VPD_SZ_INVALID) {
+                       vpd->cap = 0;
+                       return false;
+               }
+       }
+
+       return true;
+}
+
 /*
  * Wait for last operation to complete.
  * This code has to spin since there is no other notification from the PCI
@@ -145,7 +163,7 @@ static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
        loff_t end = pos + count;
        u8 *buf = arg;
 
-       if (!vpd->cap)
+       if (!pci_vpd_available(dev))
                return -ENODEV;
 
        if (pos < 0)
@@ -206,7 +224,7 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
        loff_t end = pos + count;
        int ret = 0;
 
-       if (!vpd->cap)
+       if (!pci_vpd_available(dev))
                return -ENODEV;
 
        if (pos < 0 || (pos & 3) || (count & 3))
@@ -242,14 +260,11 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
 
 void pci_vpd_init(struct pci_dev *dev)
 {
+       if (dev->vpd.len == PCI_VPD_SZ_INVALID)
+               return;
+
        dev->vpd.cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
        mutex_init(&dev->vpd.lock);
-
-       if (!dev->vpd.len)
-               dev->vpd.len = pci_vpd_size(dev);
-
-       if (dev->vpd.len == PCI_VPD_SZ_INVALID)
-               dev->vpd.cap = 0;
 }
 
 static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
@@ -294,13 +309,14 @@ const struct attribute_group pci_dev_vpd_attr_group = {
 
 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size)
 {
-       unsigned int len = dev->vpd.len;
+       unsigned int len;
        void *buf;
        int cnt;
 
-       if (!dev->vpd.cap)
+       if (!pci_vpd_available(dev))
                return ERR_PTR(-ENODEV);
 
+       len = dev->vpd.len;
        buf = kmalloc(len, GFP_KERNEL);
        if (!buf)
                return ERR_PTR(-ENOMEM);
index 3cbc3ba..295cc79 100644 (file)
@@ -952,6 +952,8 @@ int armpmu_register(struct arm_pmu *pmu)
                pmu->name, pmu->num_events,
                has_nmi ? ", using NMIs" : "");
 
+       kvm_host_pmu_init(pmu);
+
        return 0;
 
 out_destroy:
index 3481479..d6a7c89 100644 (file)
@@ -71,7 +71,7 @@
 #define AMD_CPU_ID_YC                  0x14B5
 
 #define PMC_MSG_DELAY_MIN_US           100
-#define RESPONSE_REGISTER_LOOP_MAX     200
+#define RESPONSE_REGISTER_LOOP_MAX     20000
 
 #define SOC_SUBSYSTEM_IP_MAX   12
 #define DELAY_MIN_US           2000
index 821aba3..42513ea 100644 (file)
@@ -166,8 +166,7 @@ config DELL_WMI
 
 config DELL_WMI_PRIVACY
        bool "Dell WMI Hardware Privacy Support"
-       depends on DELL_WMI
-       depends on LEDS_TRIGGER_AUDIO
+       depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO
        help
          This option adds integration with the "Dell Hardware Privacy"
          feature of Dell laptops to the dell-wmi driver.
index 7f3a03f..d53634c 100644 (file)
@@ -144,6 +144,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
index a33a582..0859894 100644 (file)
@@ -118,12 +118,30 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = {
        { }
 };
 
+/*
+ * Some devices, even non convertible ones, can send incorrect SW_TABLET_MODE
+ * reports. Accept such reports only from devices in this list.
+ */
+static const struct dmi_system_id dmi_auto_add_switch[] = {
+       {
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */),
+               },
+       },
+       {
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */),
+               },
+       },
+       {} /* Array terminator */
+};
+
 struct intel_hid_priv {
        struct input_dev *input_dev;
        struct input_dev *array;
        struct input_dev *switches;
        bool wakeup_mode;
-       bool dual_accel;
+       bool auto_add_switch;
 };
 
 #define HID_EVENT_FILTER_UUID  "eeec56b3-4442-408f-a792-4edd4d758054"
@@ -452,10 +470,8 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
         * Some convertible have unreliable VGBS return which could cause incorrect
         * SW_TABLET_MODE report, in these cases we enable support when receiving
         * the first event instead of during driver setup.
-        *
-        * See dual_accel_detect.h for more info on the dual_accel check.
         */
-       if (!priv->switches && !priv->dual_accel && (event == 0xcc || event == 0xcd)) {
+       if (!priv->switches && priv->auto_add_switch && (event == 0xcc || event == 0xcd)) {
                dev_info(&device->dev, "switch event received, enable switches supports\n");
                err = intel_hid_switches_setup(device);
                if (err)
@@ -596,7 +612,8 @@ static int intel_hid_probe(struct platform_device *device)
                return -ENOMEM;
        dev_set_drvdata(&device->dev, priv);
 
-       priv->dual_accel = dual_accel_detect();
+       /* See dual_accel_detect.h for more info on the dual_accel check. */
+       priv->auto_add_switch = dmi_check_system(dmi_auto_add_switch) && !dual_accel_detect();
 
        err = intel_hid_input_setup(device);
        if (err) {
index f58b854..66bb39f 100644 (file)
@@ -8,7 +8,6 @@
  * which provide mailbox interface for power management usage.
  */
 
-#include <linux/acpi.h>
 #include <linux/bitops.h>
 #include <linux/delay.h>
 #include <linux/device.h>
@@ -319,7 +318,7 @@ static struct platform_driver intel_punit_ipc_driver = {
        .remove = intel_punit_ipc_remove,
        .driver = {
                .name = "intel_punit_ipc",
-               .acpi_match_table = ACPI_PTR(punit_ipc_acpi_ids),
+               .acpi_match_table = punit_ipc_acpi_ids,
        },
 };
 
index 3e520d5..88b551c 100644 (file)
@@ -655,7 +655,7 @@ static int acpi_add(struct acpi_device *device)
                goto out_platform_registered;
        }
        product = dmi_get_system_info(DMI_PRODUCT_NAME);
-       if (strlen(product) > 4)
+       if (product && strlen(product) > 4)
                switch (product[4]) {
                case '5':
                case '6':
index 0e1451b..033f797 100644 (file)
@@ -100,10 +100,10 @@ static const struct ts_dmi_data chuwi_hi10_air_data = {
 };
 
 static const struct property_entry chuwi_hi10_plus_props[] = {
-       PROPERTY_ENTRY_U32("touchscreen-min-x", 0),
-       PROPERTY_ENTRY_U32("touchscreen-min-y", 5),
-       PROPERTY_ENTRY_U32("touchscreen-size-x", 1914),
-       PROPERTY_ENTRY_U32("touchscreen-size-y", 1283),
+       PROPERTY_ENTRY_U32("touchscreen-min-x", 12),
+       PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1908),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
        PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10plus.fw"),
        PROPERTY_ENTRY_U32("silead,max-fingers", 10),
        PROPERTY_ENTRY_BOOL("silead,home-button"),
@@ -111,6 +111,15 @@ static const struct property_entry chuwi_hi10_plus_props[] = {
 };
 
 static const struct ts_dmi_data chuwi_hi10_plus_data = {
+       .embedded_fw = {
+               .name   = "silead/gsl1680-chuwi-hi10plus.fw",
+               .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+               .length = 34056,
+               .sha256 = { 0xfd, 0x0a, 0x08, 0x08, 0x3c, 0xa6, 0x34, 0x4e,
+                           0x2c, 0x49, 0x9c, 0xcd, 0x7d, 0x44, 0x9d, 0x38,
+                           0x10, 0x68, 0xb5, 0xbd, 0xb7, 0x2a, 0x63, 0xb5,
+                           0x67, 0x0b, 0x96, 0xbd, 0x89, 0x67, 0x85, 0x09 },
+       },
        .acpi_name      = "MSSL0017:00",
        .properties     = chuwi_hi10_plus_props,
 };
@@ -141,6 +150,33 @@ static const struct ts_dmi_data chuwi_hi10_pro_data = {
        .properties     = chuwi_hi10_pro_props,
 };
 
+static const struct property_entry chuwi_hibook_props[] = {
+       PROPERTY_ENTRY_U32("touchscreen-min-x", 30),
+       PROPERTY_ENTRY_U32("touchscreen-min-y", 4),
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1892),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1276),
+       PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+       PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hibook.fw"),
+       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+       PROPERTY_ENTRY_BOOL("silead,home-button"),
+       { }
+};
+
+static const struct ts_dmi_data chuwi_hibook_data = {
+       .embedded_fw = {
+               .name   = "silead/gsl1680-chuwi-hibook.fw",
+               .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+               .length = 40392,
+               .sha256 = { 0xf7, 0xc0, 0xe8, 0x5a, 0x6c, 0xf2, 0xeb, 0x8d,
+                           0x12, 0xc4, 0x45, 0xbf, 0x55, 0x13, 0x4c, 0x1a,
+                           0x13, 0x04, 0x31, 0x08, 0x65, 0x73, 0xf7, 0xa8,
+                           0x1b, 0x7d, 0x59, 0xc9, 0xe6, 0x97, 0xf7, 0x38 },
+       },
+       .acpi_name      = "MSSL0017:00",
+       .properties     = chuwi_hibook_props,
+};
+
 static const struct property_entry chuwi_vi8_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
        PROPERTY_ENTRY_U32("touchscreen-min-y", 6),
@@ -980,6 +1016,16 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                },
        },
        {
+               /* Chuwi HiBook (CWI514) */
+               .driver_data = (void *)&chuwi_hibook_data,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+                       DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+                       /* Above matches are too generic, add bios-date match */
+                       DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
+               },
+       },
+       {
                /* Chuwi Vi8 (CWI506) */
                .driver_data = (void *)&chuwi_vi8_data,
                .matches = {
index f02bedf..458218f 100644 (file)
@@ -174,6 +174,7 @@ config PTP_1588_CLOCK_OCP
        depends on I2C && MTD
        depends on SERIAL_8250
        depends on !S390
+       depends on COMMON_CLK
        select NET_DEVLINK
        help
          This driver adds support for an OpenCompute time card.
index 1d78b45..e34face 100644 (file)
@@ -269,5 +269,3 @@ module_exit(max14577_regulator_exit);
 MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
 MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:max14577-regulator");
-MODULE_ALIAS("platform:max77836-regulator");
index 6cca910..7f458d5 100644 (file)
@@ -991,7 +991,7 @@ static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = {
        RPMH_VREG("ldo4",   "ldo%s4",  &pmic5_nldo,      "vdd-l4"),
        RPMH_VREG("ldo5",   "ldo%s5",  &pmic5_pldo,      "vdd-l5-l6"),
        RPMH_VREG("ldo6",   "ldo%s6",  &pmic5_pldo,      "vdd-l5-l6"),
-       RPMH_VREG("ldo7",   "ldo%s6",  &pmic5_pldo_lv,   "vdd-l7"),
+       RPMH_VREG("ldo7",   "ldo%s7",  &pmic5_pldo_lv,   "vdd-l7"),
        {}
 };
 
index eb15067..4eb5341 100644 (file)
@@ -1047,7 +1047,9 @@ static void cmos_check_wkalrm(struct device *dev)
         * ACK the rtc irq here
         */
        if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) {
+               local_irq_disable();
                cmos_interrupt(0, (void *)cmos->rtc);
+               local_irq_enable();
                return;
        }
 
index 2f3515f..f3d5c7f 100644 (file)
@@ -45,13 +45,14 @@ static void __init sclp_early_facilities_detect(void)
        sclp.has_gisaf = !!(sccb->fac118 & 0x08);
        sclp.has_hvs = !!(sccb->fac119 & 0x80);
        sclp.has_kss = !!(sccb->fac98 & 0x01);
-       sclp.has_sipl = !!(sccb->cbl & 0x4000);
        if (sccb->fac85 & 0x02)
                S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
        if (sccb->fac91 & 0x40)
                S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
        if (sccb->cpuoff > 134)
                sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
+       if (sccb->cpuoff > 137)
+               sclp.has_sipl = !!(sccb->cbl & 0x4000);
        sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
        sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
        sclp.rzm <<= 20;
index 2ec7411..f053860 100644 (file)
@@ -77,12 +77,13 @@ EXPORT_SYMBOL(ccwgroup_set_online);
 /**
  * ccwgroup_set_offline() - disable a ccwgroup device
  * @gdev: target ccwgroup device
+ * @call_gdrv: Call the registered gdrv set_offline function
  *
  * This function attempts to put the ccwgroup device into the offline state.
  * Returns:
  *  %0 on success and a negative error value on failure.
  */
-int ccwgroup_set_offline(struct ccwgroup_device *gdev)
+int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv)
 {
        struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
        int ret = -EINVAL;
@@ -91,11 +92,16 @@ int ccwgroup_set_offline(struct ccwgroup_device *gdev)
                return -EAGAIN;
        if (gdev->state == CCWGROUP_OFFLINE)
                goto out;
+       if (!call_gdrv) {
+               ret = 0;
+               goto offline;
+       }
        if (gdrv->set_offline)
                ret = gdrv->set_offline(gdev);
        if (ret)
                goto out;
 
+offline:
        gdev->state = CCWGROUP_OFFLINE;
 out:
        atomic_set(&gdev->onoff, 0);
@@ -124,7 +130,7 @@ static ssize_t ccwgroup_online_store(struct device *dev,
        if (value == 1)
                ret = ccwgroup_set_online(gdev);
        else if (value == 0)
-               ret = ccwgroup_set_offline(gdev);
+               ret = ccwgroup_set_offline(gdev, true);
        else
                ret = -EINVAL;
 out:
index f433428..d9b8049 100644 (file)
@@ -213,7 +213,6 @@ static inline int ap_fetch_qci_info(struct ap_config_info *info)
  * ap_init_qci_info(): Allocate and query qci config info.
  * Does also update the static variables ap_max_domain_id
  * and ap_max_adapter_id if this info is available.
-
  */
 static void __init ap_init_qci_info(void)
 {
@@ -439,6 +438,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
 /**
  * ap_interrupt_handler() - Schedule ap_tasklet on interrupt
  * @airq: pointer to adapter interrupt descriptor
+ * @floating: ignored
  */
 static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
 {
@@ -1786,6 +1786,7 @@ static inline void ap_scan_adapter(int ap)
 /**
  * ap_scan_bus(): Scan the AP bus for new devices
  * Runs periodically, workqueue timer (ap_config_time)
+ * @unused: Unused pointer.
  */
 static void ap_scan_bus(struct work_struct *unused)
 {
index d70c4d3..9ea48bf 100644 (file)
@@ -20,7 +20,7 @@ static void __ap_flush_queue(struct ap_queue *aq);
 
 /**
  * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
- * @qid: The AP queue number
+ * @aq: The AP queue
  * @ind: the notification indicator byte
  *
  * Enables interruption on AP queue via ap_aqic(). Based on the return
@@ -311,7 +311,7 @@ static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
 
 /**
  * ap_sm_reset(): Reset an AP queue.
- * @qid: The AP queue number
+ * @aq: The AP queue
  *
  * Submit the Reset command to an AP queue.
  */
index 535a60b..a5aa0bd 100644 (file)
@@ -858,7 +858,6 @@ struct qeth_card {
        struct napi_struct napi;
        struct qeth_rx rx;
        struct delayed_work buffer_reclaim_work;
-       struct work_struct close_dev_work;
 };
 
 static inline bool qeth_card_hw_is_reachable(struct qeth_card *card)
index 41ca627..e9807d2 100644 (file)
@@ -70,15 +70,6 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
 static int qeth_qdio_establish(struct qeth_card *);
 static void qeth_free_qdio_queues(struct qeth_card *card);
 
-static void qeth_close_dev_handler(struct work_struct *work)
-{
-       struct qeth_card *card;
-
-       card = container_of(work, struct qeth_card, close_dev_work);
-       QETH_CARD_TEXT(card, 2, "cldevhdl");
-       ccwgroup_set_offline(card->gdev);
-}
-
 static const char *qeth_get_cardname(struct qeth_card *card)
 {
        if (IS_VM_NIC(card)) {
@@ -202,6 +193,9 @@ static void qeth_clear_working_pool_list(struct qeth_card *card)
                                 &card->qdio.in_buf_pool.entry_list, list)
                list_del(&pool_entry->list);
 
+       if (!queue)
+               return;
+
        for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
                queue->bufs[i].pool_entry = NULL;
 }
@@ -792,10 +786,12 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
        case IPA_CMD_STOPLAN:
                if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
                        dev_err(&card->gdev->dev,
-                               "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
+                               "Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",
                                netdev_name(card->dev));
-                       schedule_work(&card->close_dev_work);
+                       /* Set offline, then probably fail to set online: */
+                       qeth_schedule_recovery(card);
                } else {
+                       /* stay online for subsequent STARTLAN */
                        dev_warn(&card->gdev->dev,
                                 "The link for interface %s on CHPID 0x%X failed\n",
                                 netdev_name(card->dev), card->info.chpid);
@@ -1537,7 +1533,6 @@ static void qeth_setup_card(struct qeth_card *card)
        INIT_LIST_HEAD(&card->ipato.entries);
        qeth_init_qdio_info(card);
        INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
-       INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
        hash_init(card->rx_mode_addrs);
        hash_init(card->local_addrs4);
        hash_init(card->local_addrs6);
@@ -5519,7 +5514,8 @@ static int qeth_do_reset(void *data)
                dev_info(&card->gdev->dev,
                         "Device successfully recovered!\n");
        } else {
-               ccwgroup_set_offline(card->gdev);
+               qeth_set_offline(card, disc, true);
+               ccwgroup_set_offline(card->gdev, false);
                dev_warn(&card->gdev->dev,
                         "The qeth device driver failed to recover an error on the device\n");
        }
index 72e84ff..dc6c007 100644 (file)
@@ -2307,7 +2307,6 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
        if (gdev->state == CCWGROUP_ONLINE)
                qeth_set_offline(card, card->discipline, false);
 
-       cancel_work_sync(&card->close_dev_work);
        if (card->dev->reg_state == NETREG_REGISTERED) {
                priv = netdev_priv(card->dev);
                if (priv->brport_features & BR_LEARNING_SYNC) {
index 3a523e7..6fd3e28 100644 (file)
@@ -1969,7 +1969,6 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
        if (cgdev->state == CCWGROUP_ONLINE)
                qeth_set_offline(card, card->discipline, false);
 
-       cancel_work_sync(&card->close_dev_work);
        if (card->dev->reg_state == NETREG_REGISTERED)
                unregister_netdev(card->dev);
 
index f34badc..9f64133 100644 (file)
@@ -10,17 +10,6 @@ config SCSI_ACORNSCSI_3
          This enables support for the Acorn SCSI card (aka30). If you have an
          Acorn system with one of these, say Y. If unsure, say N.
 
-config SCSI_ACORNSCSI_TAGGED_QUEUE
-       bool "Support SCSI 2 Tagged queueing"
-       depends on SCSI_ACORNSCSI_3
-       help
-         Say Y here to enable tagged queuing support on the Acorn SCSI card.
-
-         This is a feature of SCSI-2 which improves performance: the host
-         adapter can send several SCSI commands to a device's queue even if
-         previous commands haven't finished yet. Some SCSI devices don't
-         implement this properly, so the safe answer is N.
-
 config SCSI_ACORNSCSI_SYNC
        bool "Support SCSI 2 Synchronous Transfers"
        depends on SCSI_ACORNSCSI_3
index 4a84599..b4cb5fb 100644 (file)
  * You can tell if you have a device that supports tagged queueing my
  * cating (eg) /proc/scsi/acornscsi/0 and see if the SCSI revision is reported
  * as '2 TAG'.
- *
- * Also note that CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE is normally set in the config
- * scripts, but disabled here.  Once debugged, remove the #undef, otherwise to debug,
- * comment out the undef.
  */
-#undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
+
 /*
  * SCSI-II Synchronous transfer support.
  *
@@ -171,7 +167,7 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
                           unsigned int result);
 static int acornscsi_reconnect_finish(AS_Host *host);
 static void acornscsi_dma_cleanup(AS_Host *host);
-static void acornscsi_abortcmd(AS_Host *host, unsigned char tag);
+static void acornscsi_abortcmd(AS_Host *host);
 
 /* ====================================================================================
  * Miscellaneous
@@ -741,17 +737,6 @@ intr_ret_t acornscsi_kick(AS_Host *host)
 #endif
 
     if (from_queue) {
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
-       /*
-        * tagged queueing - allocate a new tag to this command
-        */
-       if (SCpnt->device->simple_tags) {
-           SCpnt->device->current_tag += 1;
-           if (SCpnt->device->current_tag == 0)
-               SCpnt->device->current_tag = 1;
-           SCpnt->tag = SCpnt->device->current_tag;
-       } else
-#endif
            set_bit(SCpnt->device->id * 8 +
                    (u8)(SCpnt->device->lun & 0x07), host->busyluns);
 
@@ -1192,7 +1177,7 @@ void acornscsi_dma_intr(AS_Host *host)
         * the device recognises the attention.
         */
        if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) {
-           acornscsi_abortcmd(host, host->SCpnt->tag);
+           acornscsi_abortcmd(host);
 
            dmac_write(host, DMAC_TXCNTLO, 0);
            dmac_write(host, DMAC_TXCNTHI, 0);
@@ -1560,23 +1545,6 @@ void acornscsi_message(AS_Host *host)
            acornscsi_sbic_issuecmd(host, CMND_ASSERTATN);
 
        switch (host->scsi.last_message) {
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
-       case HEAD_OF_QUEUE_TAG:
-       case ORDERED_QUEUE_TAG:
-       case SIMPLE_QUEUE_TAG:
-           /*
-            * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.17)
-            *  If a target does not implement tagged queuing and a queue tag
-            *  message is received, it shall respond with a MESSAGE REJECT
-            *  message and accept the I/O process as if it were untagged.
-            */
-           printk(KERN_NOTICE "scsi%d.%c: disabling tagged queueing\n",
-                   host->host->host_no, acornscsi_target(host));
-           host->SCpnt->device->simple_tags = 0;
-           set_bit(host->SCpnt->device->id * 8 +
-                   (u8)(host->SCpnt->device->lun & 0x7), host->busyluns);
-           break;
-#endif
        case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8):
            /*
             * Target can't handle synchronous transfers
@@ -1687,24 +1655,11 @@ void acornscsi_buildmessages(AS_Host *host)
 #if 0
     /* does the device need the current command aborted */
     if (cmd_aborted) {
-       acornscsi_abortcmd(host->SCpnt->tag);
+       acornscsi_abortcmd(host);
        return;
     }
 #endif
 
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
-    if (host->SCpnt->tag) {
-       unsigned int tag_type;
-
-       if (host->SCpnt->cmnd[0] == REQUEST_SENSE ||
-           host->SCpnt->cmnd[0] == TEST_UNIT_READY ||
-           host->SCpnt->cmnd[0] == INQUIRY)
-           tag_type = HEAD_OF_QUEUE_TAG;
-       else
-           tag_type = SIMPLE_QUEUE_TAG;
-       msgqueue_addmsg(&host->scsi.msgs, 2, tag_type, host->SCpnt->tag);
-    }
-#endif
 
 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC
     if (host->device[host->SCpnt->device->id].sync_state == SYNC_NEGOCIATE) {
@@ -1798,7 +1753,7 @@ int acornscsi_reconnect(AS_Host *host)
                "to reconnect with\n",
                host->host->host_no, '0' + target);
        acornscsi_dumplog(host, target);
-       acornscsi_abortcmd(host, 0);
+       acornscsi_abortcmd(host);
        if (host->SCpnt) {
            queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt);
            host->SCpnt = NULL;
@@ -1821,7 +1776,7 @@ int acornscsi_reconnect_finish(AS_Host *host)
        host->scsi.disconnectable = 0;
        if (host->SCpnt->device->id  == host->scsi.reconnected.target &&
            host->SCpnt->device->lun == host->scsi.reconnected.lun &&
-           host->SCpnt->tag         == host->scsi.reconnected.tag) {
+           scsi_cmd_to_tag(host->SCpnt) == host->scsi.reconnected.tag) {
 #if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
            DBG(host->SCpnt, printk("scsi%d.%c: reconnected",
                    host->host->host_no, acornscsi_target(host)));
@@ -1848,7 +1803,7 @@ int acornscsi_reconnect_finish(AS_Host *host)
     }
 
     if (!host->SCpnt)
-       acornscsi_abortcmd(host, host->scsi.reconnected.tag);
+       acornscsi_abortcmd(host);
     else {
        /*
         * Restore data pointer from SAVED pointers.
@@ -1889,21 +1844,15 @@ void acornscsi_disconnect_unexpected(AS_Host *host)
  * Function: void acornscsi_abortcmd(AS_host *host, unsigned char tag)
  * Purpose : abort a currently executing command
  * Params  : host - host with connected command to abort
- *          tag  - tag to abort
  */
 static
-void acornscsi_abortcmd(AS_Host *host, unsigned char tag)
+void acornscsi_abortcmd(AS_Host *host)
 {
     host->scsi.phase = PHASE_ABORTED;
     sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN);
 
     msgqueue_flush(&host->scsi.msgs);
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
-    if (tag)
-       msgqueue_addmsg(&host->scsi.msgs, 2, ABORT_TAG, tag);
-    else
-#endif
-       msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
+    msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
 }
 
 /* ==========================================================================================
@@ -1993,7 +1942,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
            printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTING, SSR %02X?\n",
                    host->host->host_no, acornscsi_target(host), ssr);
            acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
-           acornscsi_abortcmd(host, host->SCpnt->tag);
+           acornscsi_abortcmd(host);
        }
        return INTR_PROCESSING;
 
@@ -2029,7 +1978,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
            printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTED, SSR %02X?\n",
                    host->host->host_no, acornscsi_target(host), ssr);
            acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
-           acornscsi_abortcmd(host, host->SCpnt->tag);
+           acornscsi_abortcmd(host);
        }
        return INTR_PROCESSING;
 
@@ -2075,20 +2024,20 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
        case 0x18:                      /* -> PHASE_DATAOUT                             */
            /* COMMAND -> DATA OUT */
            if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len)
-               acornscsi_abortcmd(host, host->SCpnt->tag);
+               acornscsi_abortcmd(host);
            acornscsi_dma_setup(host, DMA_OUT);
            if (!acornscsi_starttransfer(host))
-               acornscsi_abortcmd(host, host->SCpnt->tag);
+               acornscsi_abortcmd(host);
            host->scsi.phase = PHASE_DATAOUT;
            return INTR_IDLE;
 
        case 0x19:                      /* -> PHASE_DATAIN                              */
            /* COMMAND -> DATA IN */
            if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len)
-               acornscsi_abortcmd(host, host->SCpnt->tag);
+               acornscsi_abortcmd(host);
            acornscsi_dma_setup(host, DMA_IN);
            if (!acornscsi_starttransfer(host))
-               acornscsi_abortcmd(host, host->SCpnt->tag);
+               acornscsi_abortcmd(host);
            host->scsi.phase = PHASE_DATAIN;
            return INTR_IDLE;
 
@@ -2156,7 +2105,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
            /* MESSAGE IN -> DATA OUT */
            acornscsi_dma_setup(host, DMA_OUT);
            if (!acornscsi_starttransfer(host))
-               acornscsi_abortcmd(host, host->SCpnt->tag);
+               acornscsi_abortcmd(host);
            host->scsi.phase = PHASE_DATAOUT;
            return INTR_IDLE;
 
@@ -2165,7 +2114,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
            /* MESSAGE IN -> DATA IN */
            acornscsi_dma_setup(host, DMA_IN);
            if (!acornscsi_starttransfer(host))
-               acornscsi_abortcmd(host, host->SCpnt->tag);
+               acornscsi_abortcmd(host);
            host->scsi.phase = PHASE_DATAIN;
            return INTR_IDLE;
 
@@ -2206,7 +2155,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
        switch (ssr) {
        case 0x19:                      /* -> PHASE_DATAIN                              */
        case 0x89:                      /* -> PHASE_DATAIN                              */
-           acornscsi_abortcmd(host, host->SCpnt->tag);
+           acornscsi_abortcmd(host);
            return INTR_IDLE;
 
        case 0x1b:                      /* -> PHASE_STATUSIN                            */
@@ -2255,7 +2204,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
        switch (ssr) {
        case 0x18:                      /* -> PHASE_DATAOUT                             */
        case 0x88:                      /* -> PHASE_DATAOUT                             */
-           acornscsi_abortcmd(host, host->SCpnt->tag);
+           acornscsi_abortcmd(host);
            return INTR_IDLE;
 
        case 0x1b:                      /* -> PHASE_STATUSIN                            */
@@ -2482,7 +2431,6 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
     SCpnt->scsi_done = done;
     SCpnt->host_scribble = NULL;
     SCpnt->result = 0;
-    SCpnt->tag = 0;
     SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);
     SCpnt->SCp.sent_command = 0;
     SCpnt->SCp.scsi_xferred = 0;
@@ -2581,7 +2529,7 @@ static enum res_abort acornscsi_do_abort(AS_Host *host, struct scsi_cmnd *SCpnt)
                        break;
 
                default:
-                       acornscsi_abortcmd(host, host->SCpnt->tag);
+                       acornscsi_abortcmd(host);
                        res = res_snooze;
                }
                local_irq_restore(flags);
@@ -2747,9 +2695,6 @@ char *acornscsi_info(struct Scsi_Host *host)
 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC
     " SYNC"
 #endif
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
-    " TAG"
-#endif
 #if (DEBUG & DEBUG_NO_WRITE)
     " NOWRITE (" __stringify(NO_WRITE) ")"
 #endif
@@ -2770,9 +2715,6 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC
     " SYNC"
 #endif
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
-    " TAG"
-#endif
 #if (DEBUG & DEBUG_NO_WRITE)
     " NOWRITE (" __stringify(NO_WRITE) ")"
 #endif
@@ -2827,9 +2769,8 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
        seq_printf(m, "Device/Lun TaggedQ      Sync\n");
        seq_printf(m, "     %d/%llu   ", scd->id, scd->lun);
        if (scd->tagged_supported)
-               seq_printf(m, "%3sabled(%3d) ",
-                            scd->simple_tags ? "en" : "dis",
-                            scd->current_tag);
+               seq_printf(m, "%3sabled ",
+                            scd->simple_tags ? "en" : "dis");
        else
                seq_printf(m, "unsupported  ");
 
index 9c4458a..cf71ef4 100644 (file)
@@ -77,7 +77,6 @@
  *  I was thinking that this was a good chip until I found this restriction ;(
  */
 #define SCSI2_SYNC
-#undef  SCSI2_TAG
 
 #undef DEBUG_CONNECT
 #undef DEBUG_MESSAGES
@@ -990,7 +989,7 @@ fas216_reselected_intr(FAS216_Info *info)
                info->scsi.disconnectable = 0;
                if (info->SCpnt->device->id  == target &&
                    info->SCpnt->device->lun == lun &&
-                   info->SCpnt->tag         == tag) {
+                   scsi_cmd_to_rq(info->SCpnt)->tag == tag) {
                        fas216_log(info, LOG_CONNECT, "reconnected previously executing command");
                } else {
                        queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt);
@@ -1791,8 +1790,9 @@ static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
        /*
         * add tag message if required
         */
-       if (SCpnt->tag)
-               msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG, SCpnt->tag);
+       if (SCpnt->device->simple_tags)
+               msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG,
+                               scsi_cmd_to_rq(SCpnt)->tag);
 
        do {
 #ifdef SCSI2_SYNC
@@ -1815,20 +1815,8 @@ static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
 
 static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt)
 {
-#ifdef SCSI2_TAG
-       /*
-        * tagged queuing - allocate a new tag to this command
-        */
-       if (SCpnt->device->simple_tags && SCpnt->cmnd[0] != REQUEST_SENSE &&
-           SCpnt->cmnd[0] != INQUIRY) {
-           SCpnt->device->current_tag += 1;
-               if (SCpnt->device->current_tag == 0)
-                   SCpnt->device->current_tag = 1;
-                       SCpnt->tag = SCpnt->device->current_tag;
-       } else
-#endif
-               set_bit(SCpnt->device->id * 8 +
-                       (u8)(SCpnt->device->lun & 0x7), info->busyluns);
+       set_bit(SCpnt->device->id * 8 +
+               (u8)(SCpnt->device->lun & 0x7), info->busyluns);
 
        info->stats.removes += 1;
        switch (SCpnt->cmnd[0]) {
@@ -2117,7 +2105,6 @@ request_sense:
        init_SCp(SCpnt);
        SCpnt->SCp.Message = 0;
        SCpnt->SCp.Status = 0;
-       SCpnt->tag = 0;
        SCpnt->host_scribble = (void *)fas216_rq_sns_done;
 
        /*
@@ -2223,7 +2210,6 @@ static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt,
        init_SCp(SCpnt);
 
        info->stats.queues += 1;
-       SCpnt->tag = 0;
 
        spin_lock(&info->host_lock);
 
@@ -3003,9 +2989,8 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m)
                dev = &info->device[scd->id];
                seq_printf(m, "     %d/%llu   ", scd->id, scd->lun);
                if (scd->tagged_supported)
-                       seq_printf(m, "%3sabled(%3d) ",
-                                    scd->simple_tags ? "en" : "dis",
-                                    scd->current_tag);
+                       seq_printf(m, "%3sabled ",
+                                    scd->simple_tags ? "en" : "dis");
                else
                        seq_puts(m, "unsupported   ");
 
index e5559f2..c6f71a7 100644 (file)
@@ -214,7 +214,7 @@ struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, int lun,
        list_for_each(l, &queue->head) {
                QE_t *q = list_entry(l, QE_t, list);
                if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun &&
-                   q->SCpnt->tag == tag) {
+                   scsi_cmd_to_rq(q->SCpnt)->tag == tag) {
                        SCpnt = __queue_remove(queue, l);
                        break;
                }
index bb3b460..4d73e92 100644 (file)
@@ -880,11 +880,11 @@ efct_lio_npiv_drop_nport(struct se_wwn *wwn)
        struct efct *efct = lio_vport->efct;
        unsigned long flags = 0;
 
-       spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
-
        if (lio_vport->fc_vport)
                fc_vport_terminate(lio_vport->fc_vport);
 
+       spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
+
        list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list,
                                 list_entry) {
                if (vport->lio_vport == lio_vport) {
index 725ca2a..52be013 100644 (file)
@@ -928,22 +928,21 @@ __efc_d_wait_topology_notify(struct efc_sm_ctx *ctx,
                break;
 
        case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: {
-               enum efc_nport_topology topology =
-                                       (enum efc_nport_topology)arg;
+               enum efc_nport_topology *topology = arg;
 
                WARN_ON(node->nport->domain->attached);
 
                WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
 
                node_printf(node, "topology notification, topology=%d\n",
-                           topology);
+                           *topology);
 
                /* At the time the PLOGI was received, the topology was unknown,
                 * so we didn't know which node would perform the domain attach:
                 * 1. The node from which the PLOGI was sent (p2p) or
                 * 2. The node to which the FLOGI was sent (fabric).
                 */
-               if (topology == EFC_NPORT_TOPO_P2P) {
+               if (*topology == EFC_NPORT_TOPO_P2P) {
                        /* if this is p2p, need to attach to the domain using
                         * the d_id from the PLOGI received
                         */
index d397220..3270ce4 100644 (file)
@@ -107,7 +107,6 @@ void
 efc_fabric_notify_topology(struct efc_node *node)
 {
        struct efc_node *tmp_node;
-       enum efc_nport_topology topology = node->nport->topology;
        unsigned long index;
 
        /*
@@ -118,7 +117,7 @@ efc_fabric_notify_topology(struct efc_node *node)
                if (tmp_node != node) {
                        efc_node_post_event(tmp_node,
                                            EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
-                                           (void *)topology);
+                                           &node->nport->topology);
                }
        }
 }
index b35bf70..ebe4179 100644 (file)
@@ -285,11 +285,8 @@ buffer_done:
                                "6312 Catching potential buffer "
                                "overflow > PAGE_SIZE = %lu bytes\n",
                                PAGE_SIZE);
-               strscpy(buf + PAGE_SIZE - 1 -
-                       strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1),
-                       LPFC_INFO_MORE_STR,
-                       strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1)
-                       + 1);
+               strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
+                       LPFC_INFO_MORE_STR, sizeof(LPFC_INFO_MORE_STR) + 1);
        }
        return len;
 }
@@ -6204,7 +6201,8 @@ lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
        len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d  total SGEs: %d\n",
                       phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
 
-       len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d  SCSI: %d  NVME: %d\n",
+       len += scnprintf(buf + len, PAGE_SIZE - len,
+                       "Cfg: %d  SCSI: %d  NVME: %d\n",
                        phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
                        phba->cfg_nvme_seg_cnt);
        return len;
index 1254a57..052c0e5 100644 (file)
@@ -4015,11 +4015,11 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                be32_to_cpu(pcgd->desc_tag),
                                be32_to_cpu(pcgd->desc_len),
                                be32_to_cpu(pcgd->xmt_signal_capability),
-                               be32_to_cpu(pcgd->xmt_signal_frequency.count),
-                               be32_to_cpu(pcgd->xmt_signal_frequency.units),
+                               be16_to_cpu(pcgd->xmt_signal_frequency.count),
+                               be16_to_cpu(pcgd->xmt_signal_frequency.units),
                                be32_to_cpu(pcgd->rcv_signal_capability),
-                               be32_to_cpu(pcgd->rcv_signal_frequency.count),
-                               be32_to_cpu(pcgd->rcv_signal_frequency.units));
+                               be16_to_cpu(pcgd->rcv_signal_frequency.count),
+                               be16_to_cpu(pcgd->rcv_signal_frequency.units));
 
                        /* Compare driver and Fport capabilities and choose
                         * least common.
@@ -9387,7 +9387,7 @@ lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt)
                /* Extract the next WWPN from the payload */
                wwn = *wwnlist++;
                wwpn = be64_to_cpu(wwn);
-               len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ,
+               len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len,
                                 " %016llx", wwpn);
 
                /* Log a message if we are on the last WWPN
index 79a4872..7359505 100644 (file)
@@ -1167,7 +1167,7 @@ struct lpfc_mbx_read_object {  /* Version 0 */
 #define lpfc_mbx_rd_object_rlen_MASK   0x00FFFFFF
 #define lpfc_mbx_rd_object_rlen_WORD   word0
                        uint32_t rd_object_offset;
-                       uint32_t rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW];
+                       __le32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW];
 #define LPFC_OBJ_NAME_SZ 104   /* 26 x sizeof(uint32_t) is 104. */
                        uint32_t rd_object_cnt;
                        struct lpfc_mbx_host_buf rd_object_hbuf[4];
index 0ec322f..195169b 100644 (file)
@@ -5518,7 +5518,7 @@ lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
        if (phba->cgn_fpin_frequency &&
            phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
                value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
-               cp->cgn_stat_npm = cpu_to_le32(value);
+               cp->cgn_stat_npm = value;
        }
        value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
                                    LPFC_CGN_CRC32_SEED);
@@ -5547,9 +5547,9 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
        uint32_t mbps;
        uint32_t dvalue, wvalue, lvalue, avalue;
        uint64_t latsum;
-       uint16_t *ptr;
-       uint32_t *lptr;
-       uint16_t *mptr;
+       __le16 *ptr;
+       __le32 *lptr;
+       __le16 *mptr;
 
        /* Make sure we have a congestion info buffer */
        if (!phba->cgn_i)
@@ -5570,7 +5570,7 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
        if (phba->cgn_fpin_frequency &&
            phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
                value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
-               cp->cgn_stat_npm = cpu_to_le32(value);
+               cp->cgn_stat_npm = value;
        }
 
        /* Read and clear the latency counters for this minute */
@@ -5753,7 +5753,7 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
                        dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
                        wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
                        lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
-                       mbps += le32_to_cpu(cp->cgn_bw_hr[i]);
+                       mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
                        avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
                }
                if (lvalue)             /* Avg of latency averages */
@@ -8277,11 +8277,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        return 0;
 
 out_free_hba_hdwq_info:
-       free_percpu(phba->sli4_hba.c_stat);
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+       free_percpu(phba->sli4_hba.c_stat);
 out_free_hba_idle_stat:
-       kfree(phba->sli4_hba.idle_stat);
 #endif
+       kfree(phba->sli4_hba.idle_stat);
 out_free_hba_eq_info:
        free_percpu(phba->sli4_hba.eq_info);
 out_free_hba_cpu_map:
@@ -13411,8 +13411,8 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba)
 
        /* last used Index initialized to 0xff already */
 
-       cp->cgn_warn_freq = LPFC_FPIN_INIT_FREQ;
-       cp->cgn_alarm_freq = LPFC_FPIN_INIT_FREQ;
+       cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
+       cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
        crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
        cp->cgn_info_crc = cpu_to_le32(crc);
 
index 73a3568..479b3ee 100644 (file)
@@ -1489,9 +1489,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
        struct lpfc_nvme_qhandle *lpfc_queue_info;
        struct lpfc_nvme_fcpreq_priv *freqpriv;
        struct nvme_common_command *sqe;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
        uint64_t start = 0;
-#endif
 
        /* Validate pointers. LLDD fault handling with transport does
         * have timing races.
index 0fde1e8..befdf86 100644 (file)
@@ -1495,7 +1495,6 @@ static int
 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                uint8_t *txop, uint8_t *rxop)
 {
-       uint8_t ret = 0;
 
        if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
                switch (scsi_get_prot_op(sc)) {
@@ -1548,7 +1547,7 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                }
        }
 
-       return ret;
+       return 0;
 }
 #endif
 
@@ -5578,12 +5577,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
        struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
        int err, idx;
        u8 *uuid = NULL;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-       uint64_t start = 0L;
+       uint64_t start;
 
-       if (phba->ktime_on)
-               start = ktime_get_ns();
-#endif
        start = ktime_get_ns();
        rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
 
index ffd8a14..78ce38d 100644 (file)
@@ -22090,6 +22090,7 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
        uint32_t shdr_status, shdr_add_status;
        union lpfc_sli4_cfg_shdr *shdr;
        struct lpfc_dmabuf *pcmd;
+       u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
 
        /* sanity check on queue memory */
        if (!datap)
@@ -22113,10 +22114,10 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
 
        memset((void *)read_object->u.request.rd_object_name, 0,
               LPFC_OBJ_NAME_SZ);
-       sprintf((uint8_t *)read_object->u.request.rd_object_name, rdobject);
+       scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
        for (j = 0; j < strlen(rdobject); j++)
                read_object->u.request.rd_object_name[j] =
-                       cpu_to_le32(read_object->u.request.rd_object_name[j]);
+                       cpu_to_le32(rd_object_name[j]);
 
        pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
        if (pcmd)
index e4298bf..39d8754 100644 (file)
@@ -1916,7 +1916,7 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
                raid = MR_LdRaidGet(ld, local_map_ptr);
 
                if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
-               blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+                       blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
 
                mr_device_priv_data->is_tm_capable =
                        raid->capability.tmCapable;
@@ -8033,7 +8033,7 @@ skip_firing_dcmds:
 
        if (instance->adapter_type != MFI_SERIES) {
                megasas_release_fusion(instance);
-                       pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
+               pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
                                (sizeof(struct MR_PD_CFG_SEQ) *
                                        (MAX_PHYSICAL_DEVICES - 1));
                for (i = 0; i < 2 ; i++) {
@@ -8773,8 +8773,7 @@ int megasas_update_device_list(struct megasas_instance *instance,
 
                if (event_type & SCAN_VD_CHANNEL) {
                        if (!instance->requestorId ||
-                           (instance->requestorId &&
-                            megasas_get_ld_vf_affiliation(instance, 0))) {
+                       megasas_get_ld_vf_affiliation(instance, 0)) {
                                dcmd_ret = megasas_ld_list_query(instance,
                                                MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
                                if (dcmd_ret != DCMD_SUCCESS)
index 6c82435..27eb652 100644 (file)
@@ -1582,8 +1582,10 @@ mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc)
         * wait for current poll to complete.
         */
        for (qid = 0; qid < iopoll_q_count; qid++) {
-               while (atomic_read(&ioc->io_uring_poll_queues[qid].busy))
+               while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) {
+                       cpu_relax();
                        udelay(500);
+               }
        }
 }
 
index 770b241..1b79f01 100644 (file)
@@ -2178,7 +2178,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
                mpt3sas_check_cmd_timeout(ioc,
                    ioc->ctl_cmds.status, mpi_request,
                    sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed);
-                *issue_reset = reset_needed;
+               *issue_reset = reset_needed;
                rc = -EFAULT;
                goto out;
        }
index 2f82b1e..d383d4a 100644 (file)
@@ -10749,8 +10749,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
        case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
                _scsih_pcie_topology_change_event(ioc, fw_event);
                ioc->current_event = NULL;
-                       return;
-       break;
+               return;
        }
 out:
        fw_event_work_put(fw_event);
index 7a4f5d4..2b8c6fa 100644 (file)
@@ -1939,11 +1939,8 @@ static   void    ncr_start_next_ccb (struct ncb *np, struct lcb * lp, int maxn);
 static void    ncr_put_start_queue(struct ncb *np, struct ccb *cp);
 
 static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd);
-static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd);
 static void process_waiting_list(struct ncb *np, int sts);
 
-#define remove_from_waiting_list(np, cmd) \
-               retrieve_from_waiting_list(1, (np), (cmd))
 #define requeue_waiting_list(np) process_waiting_list((np), DID_OK)
 #define reset_waiting_list(np) process_waiting_list((np), DID_RESET)
 
@@ -7997,26 +7994,6 @@ static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd)
        }
 }
 
-static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd)
-{
-       struct scsi_cmnd **pcmd = &np->waiting_list;
-
-       while (*pcmd) {
-               if (cmd == *pcmd) {
-                       if (to_remove) {
-                               *pcmd = (struct scsi_cmnd *) cmd->next_wcmd;
-                               cmd->next_wcmd = NULL;
-                       }
-#ifdef DEBUG_WAITING_LIST
-       printk("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd);
-#endif
-                       return cmd;
-               }
-               pcmd = (struct scsi_cmnd **) &(*pcmd)->next_wcmd;
-       }
-       return NULL;
-}
-
 static void process_waiting_list(struct ncb *np, int sts)
 {
        struct scsi_cmnd *waiting_list, *wcmd;
index 1e4e3e8..5fc7697 100644 (file)
@@ -7169,7 +7169,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
                                return 0;
                        break;
                case QLA2XXX_INI_MODE_DUAL:
-                       if (!qla_dual_mode_enabled(vha))
+                       if (!qla_dual_mode_enabled(vha) &&
+                           !qla_ini_mode_enabled(vha))
                                return 0;
                        break;
                case QLA2XXX_INI_MODE_ENABLED:
index d8b05d8..922e4c7 100644 (file)
@@ -441,9 +441,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
        struct iscsi_transport *t = iface->transport;
        int param = -1;
 
-       if (attr == &dev_attr_iface_enabled.attr)
-               param = ISCSI_NET_PARAM_IFACE_ENABLE;
-       else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
+       if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
                param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
        else if (attr == &dev_attr_iface_header_digest.attr)
                param = ISCSI_IFACE_PARAM_HDRDGST_EN;
@@ -483,7 +481,9 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
        if (param != -1)
                return t->attr_is_visible(ISCSI_IFACE_PARAM, param);
 
-       if (attr == &dev_attr_iface_vlan_id.attr)
+       if (attr == &dev_attr_iface_enabled.attr)
+               param = ISCSI_NET_PARAM_IFACE_ENABLE;
+       else if (attr == &dev_attr_iface_vlan_id.attr)
                param = ISCSI_NET_PARAM_VLAN_ID;
        else if (attr == &dev_attr_iface_vlan_priority.attr)
                param = ISCSI_NET_PARAM_VLAN_PRIORITY;
index cbd9999..523bf2f 100644 (file)
@@ -2124,6 +2124,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
                retries = 0;
 
                do {
+                       bool media_was_present = sdkp->media_present;
+
                        cmd[0] = TEST_UNIT_READY;
                        memset((void *) &cmd[1], 0, 9);
 
@@ -2138,7 +2140,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
                         * with any more polling.
                         */
                        if (media_not_present(sdkp, &sshdr)) {
-                               sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
+                               if (media_was_present)
+                                       sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
                                return;
                        }
 
@@ -3401,15 +3404,16 @@ static int sd_probe(struct device *dev)
        }
 
        device_initialize(&sdkp->dev);
-       sdkp->dev.parent = dev;
+       sdkp->dev.parent = get_device(dev);
        sdkp->dev.class = &sd_disk_class;
        dev_set_name(&sdkp->dev, "%s", dev_name(dev));
 
        error = device_add(&sdkp->dev);
-       if (error)
-               goto out_free_index;
+       if (error) {
+               put_device(&sdkp->dev);
+               goto out;
+       }
 
-       get_device(dev);
        dev_set_drvdata(dev, sdkp);
 
        gd->major = sd_major((index & 0xf0) >> 4);
index b9757f2..ed06798 100644 (file)
@@ -154,8 +154,8 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
 
        /*
         * Report zone buffer size should be at most 64B times the number of
-        * zones requested plus the 64B reply header, but should be at least
-        * SECTOR_SIZE for ATA devices.
+        * zones requested plus the 64B reply header, but should be aligned
+        * to SECTOR_SIZE for ATA devices.
         * Make sure that this size does not exceed the hardware capabilities.
         * Furthermore, since the report zone command cannot be split, make
         * sure that the allocated buffer can always be mapped by limiting the
@@ -174,7 +174,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
                        *buflen = bufsize;
                        return buf;
                }
-               bufsize >>= 1;
+               bufsize = rounddown(bufsize >> 1, SECTOR_SIZE);
        }
 
        return NULL;
@@ -280,7 +280,7 @@ static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
 {
        struct scsi_disk *sdkp;
        unsigned long flags;
-       unsigned int zno;
+       sector_t zno;
        int ret;
 
        sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
index c2afba2..43e6822 100644 (file)
@@ -87,9 +87,16 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
                0
        };
        unsigned char recv_page_code;
+       unsigned int retries = SES_RETRIES;
+       struct scsi_sense_hdr sshdr;
+
+       do {
+               ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
+                                      &sshdr, SES_TIMEOUT, 1, NULL);
+       } while (ret > 0 && --retries && scsi_sense_valid(&sshdr) &&
+                (sshdr.sense_key == NOT_READY ||
+                 (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
 
-       ret =  scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
-                               NULL, SES_TIMEOUT, SES_RETRIES, NULL);
        if (unlikely(ret))
                return ret;
 
@@ -121,9 +128,16 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
                bufflen & 0xff,
                0
        };
+       struct scsi_sense_hdr sshdr;
+       unsigned int retries = SES_RETRIES;
+
+       do {
+               result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
+                                         &sshdr, SES_TIMEOUT, 1, NULL);
+       } while (result > 0 && --retries && scsi_sense_valid(&sshdr) &&
+                (sshdr.sense_key == NOT_READY ||
+                 (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
 
-       result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
-                                 NULL, SES_TIMEOUT, SES_RETRIES, NULL);
        if (result)
                sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
                            result);
index 79d9aa2..ddd00ef 100644 (file)
@@ -523,7 +523,7 @@ static int sr_read_sector(Scsi_CD *cd, int lba, int blksize, unsigned char *dest
                        return rc;
                cd->readcd_known = 0;
                sr_printk(KERN_INFO, cd,
-                         "CDROM does'nt support READ CD (0xbe) command\n");
+                         "CDROM doesn't support READ CD (0xbe) command\n");
                /* fall & retry the other way */
        }
        /* ... if this fails, we switch the blocksize using MODE SELECT */
index 9d04929..ae8636d 100644 (file)
@@ -3823,6 +3823,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
        case CDROM_SEND_PACKET:
                if (!capable(CAP_SYS_RAWIO))
                        return -EPERM;
+               break;
        default:
                break;
        }
index b3bcc5c..149c1aa 100644 (file)
@@ -128,6 +128,81 @@ static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
        return err;
 }
 
+static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
+{
+       struct ufs_pa_layer_attr pwr_info = hba->pwr_info;
+       int ret;
+
+       pwr_info.lane_rx = lanes;
+       pwr_info.lane_tx = lanes;
+       ret = ufshcd_config_pwr_mode(hba, &pwr_info);
+       if (ret)
+               dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
+                       __func__, lanes, ret);
+       return ret;
+}
+
+static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
+                               enum ufs_notify_change_status status,
+                               struct ufs_pa_layer_attr *dev_max_params,
+                               struct ufs_pa_layer_attr *dev_req_params)
+{
+       int err = 0;
+
+       switch (status) {
+       case PRE_CHANGE:
+               if (ufshcd_is_hs_mode(dev_max_params) &&
+                   (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
+                       ufs_intel_set_lanes(hba, 2);
+               memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
+               break;
+       case POST_CHANGE:
+               if (ufshcd_is_hs_mode(dev_req_params)) {
+                       u32 peer_granularity;
+
+                       usleep_range(1000, 1250);
+                       err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+                                                 &peer_granularity);
+               }
+               break;
+       default:
+               break;
+       }
+
+       return err;
+}
+
+static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
+{
+       u32 granularity, peer_granularity;
+       u32 pa_tactivate, peer_pa_tactivate;
+       int ret;
+
+       ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);
+       if (ret)
+               goto out;
+
+       ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);
+       if (ret)
+               goto out;
+
+       ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+       if (ret)
+               goto out;
+
+       ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);
+       if (ret)
+               goto out;
+
+       if (granularity == peer_granularity) {
+               u32 new_peer_pa_tactivate = pa_tactivate + 2;
+
+               ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);
+       }
+out:
+       return ret;
+}
+
 #define INTEL_ACTIVELTR                0x804
 #define INTEL_IDLELTR          0x808
 
@@ -351,6 +426,7 @@ static int ufs_intel_lkf_init(struct ufs_hba *hba)
        struct ufs_host *ufs_host;
        int err;
 
+       hba->nop_out_timeout = 200;
        hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
        hba->caps |= UFSHCD_CAP_CRYPTO;
        err = ufs_intel_common_init(hba);
@@ -381,6 +457,8 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
        .exit                   = ufs_intel_common_exit,
        .hce_enable_notify      = ufs_intel_hce_enable_notify,
        .link_startup_notify    = ufs_intel_link_startup_notify,
+       .pwr_change_notify      = ufs_intel_lkf_pwr_change_notify,
+       .apply_dev_quirks       = ufs_intel_lkf_apply_dev_quirks,
        .resume                 = ufs_intel_resume,
        .device_reset           = ufs_intel_device_reset,
 };
index 3841ab4..029c963 100644 (file)
@@ -17,8 +17,6 @@
 #include <linux/blk-pm.h>
 #include <linux/blkdev.h>
 #include <scsi/scsi_driver.h>
-#include <scsi/scsi_transport.h>
-#include "../scsi_transport_api.h"
 #include "ufshcd.h"
 #include "ufs_quirks.h"
 #include "unipro.h"
@@ -237,6 +235,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
 static irqreturn_t ufshcd_intr(int irq, void *__hba);
 static int ufshcd_change_power_mode(struct ufs_hba *hba,
                             struct ufs_pa_layer_attr *pwr_mode);
+static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
@@ -2759,8 +2758,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 out:
        up_read(&hba->clk_scaling_lock);
 
-       if (ufs_trigger_eh())
-               scsi_schedule_eh(hba->host);
+       if (ufs_trigger_eh()) {
+               unsigned long flags;
+
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               ufshcd_schedule_eh_work(hba);
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+       }
 
        return err;
 }
@@ -3919,35 +3923,6 @@ out:
 }
 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
 
-static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
-{
-       lockdep_assert_held(hba->host->host_lock);
-
-       return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
-              (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
-}
-
-static void ufshcd_schedule_eh(struct ufs_hba *hba)
-{
-       bool schedule_eh = false;
-       unsigned long flags;
-
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       /* handle fatal errors only when link is not in error state */
-       if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
-               if (hba->force_reset || ufshcd_is_link_broken(hba) ||
-                   ufshcd_is_saved_err_fatal(hba))
-                       hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
-               else
-                       hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
-               schedule_eh = true;
-       }
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-       if (schedule_eh)
-               scsi_schedule_eh(hba->host);
-}
-
 /**
  * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
  * state) and waits for it to take effect.
@@ -3968,7 +3943,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
 {
        DECLARE_COMPLETION_ONSTACK(uic_async_done);
        unsigned long flags;
-       bool schedule_eh = false;
        u8 status;
        int ret;
        bool reenable_intr = false;
@@ -4038,14 +4012,10 @@ out:
                ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
        if (ret) {
                ufshcd_set_link_broken(hba);
-               schedule_eh = true;
+               ufshcd_schedule_eh_work(hba);
        }
-
 out_unlock:
        spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-       if (schedule_eh)
-               ufshcd_schedule_eh(hba);
        mutex_unlock(&hba->uic_cmd_mutex);
 
        return ret;
@@ -4776,7 +4746,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
        mutex_lock(&hba->dev_cmd.lock);
        for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
                err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
-                                              NOP_OUT_TIMEOUT);
+                                         hba->nop_out_timeout);
 
                if (!err || err == -ETIMEDOUT)
                        break;
@@ -5911,6 +5881,27 @@ out:
        return err_handling;
 }
 
+/* host lock must be held before calling this func */
+static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
+{
+       return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
+              (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
+}
+
+/* host lock must be held before calling this func */
+static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
+{
+       /* handle fatal errors only when link is not in error state */
+       if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
+               if (hba->force_reset || ufshcd_is_link_broken(hba) ||
+                   ufshcd_is_saved_err_fatal(hba))
+                       hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
+               else
+                       hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
+               queue_work(hba->eh_wq, &hba->eh_work);
+       }
+}
+
 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
 {
        down_write(&hba->clk_scaling_lock);
@@ -6044,11 +6035,11 @@ static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
 
 /**
  * ufshcd_err_handler - handle UFS errors that require s/w attention
- * @host: SCSI host pointer
+ * @work: pointer to work structure
  */
-static void ufshcd_err_handler(struct Scsi_Host *host)
+static void ufshcd_err_handler(struct work_struct *work)
 {
-       struct ufs_hba *hba = shost_priv(host);
+       struct ufs_hba *hba;
        unsigned long flags;
        bool err_xfer = false;
        bool err_tm = false;
@@ -6056,9 +6047,10 @@ static void ufshcd_err_handler(struct Scsi_Host *host)
        int tag;
        bool needs_reset = false, needs_restore = false;
 
+       hba = container_of(work, struct ufs_hba, eh_work);
+
        down(&hba->host_sem);
        spin_lock_irqsave(hba->host->host_lock, flags);
-       hba->host->host_eh_scheduled = 0;
        if (ufshcd_err_handling_should_stop(hba)) {
                if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
                        hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
@@ -6371,6 +6363,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
                                         "host_regs: ");
                        ufshcd_print_pwr_info(hba);
                }
+               ufshcd_schedule_eh_work(hba);
                retval |= IRQ_HANDLED;
        }
        /*
@@ -6382,10 +6375,6 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
        hba->errors = 0;
        hba->uic_error = 0;
        spin_unlock(hba->host->host_lock);
-
-       if (queue_eh_work)
-               ufshcd_schedule_eh(hba);
-
        return retval;
 }
 
@@ -6876,7 +6865,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
                        err = ufshcd_clear_cmd(hba, pos);
                        if (err)
                                break;
-                       __ufshcd_transfer_req_compl(hba, pos, /*retry_requests=*/true);
+                       __ufshcd_transfer_req_compl(hba, 1U << pos, false);
                }
        }
 
@@ -7048,17 +7037,15 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
         * will be to send LU reset which, again, is a spec violation.
         * To avoid these unnecessary/illegal steps, first we clean up
         * the lrb taken by this cmd and re-set it in outstanding_reqs,
-        * then queue the error handler and bail.
+        * then queue the eh_work and bail.
         */
        if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
                ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
 
                spin_lock_irqsave(host->host_lock, flags);
                hba->force_reset = true;
+               ufshcd_schedule_eh_work(hba);
                spin_unlock_irqrestore(host->host_lock, flags);
-
-               ufshcd_schedule_eh(hba);
-
                goto release;
        }
 
@@ -7191,10 +7178,11 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
 
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->force_reset = true;
+       ufshcd_schedule_eh_work(hba);
        dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-       ufshcd_err_handler(hba->host);
+       flush_work(&hba->eh_work);
 
        spin_lock_irqsave(hba->host->host_lock, flags);
        if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
@@ -8604,6 +8592,8 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
        if (hba->is_powered) {
                ufshcd_exit_clk_scaling(hba);
                ufshcd_exit_clk_gating(hba);
+               if (hba->eh_wq)
+                       destroy_workqueue(hba->eh_wq);
                ufs_debugfs_hba_exit(hba);
                ufshcd_variant_hba_exit(hba);
                ufshcd_setup_vreg(hba, false);
@@ -9448,10 +9438,6 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
        return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
 }
 
-static struct scsi_transport_template ufshcd_transport_template = {
-       .eh_strategy_handler = ufshcd_err_handler,
-};
-
 /**
  * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
  * @dev: pointer to device handle
@@ -9478,11 +9464,11 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
                err = -ENOMEM;
                goto out_error;
        }
-       host->transportt = &ufshcd_transport_template;
        hba = shost_priv(host);
        hba->host = host;
        hba->dev = dev;
        hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
+       hba->nop_out_timeout = NOP_OUT_TIMEOUT;
        INIT_LIST_HEAD(&hba->clk_list_head);
        spin_lock_init(&hba->outstanding_lock);
 
@@ -9517,6 +9503,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        int err;
        struct Scsi_Host *host = hba->host;
        struct device *dev = hba->dev;
+       char eh_wq_name[sizeof("ufs_eh_wq_00")];
 
        if (!mmio_base) {
                dev_err(hba->dev,
@@ -9570,6 +9557,17 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 
        hba->max_pwr_info.is_valid = false;
 
+       /* Initialize work queues */
+       snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
+                hba->host->host_no);
+       hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
+       if (!hba->eh_wq) {
+               dev_err(hba->dev, "%s: failed to create eh workqueue\n",
+                       __func__);
+               err = -ENOMEM;
+               goto out_disable;
+       }
+       INIT_WORK(&hba->eh_work, ufshcd_err_handler);
        INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
 
        sema_init(&hba->host_sem, 1);
index 52ea6f3..f0da5d3 100644 (file)
@@ -741,6 +741,8 @@ struct ufs_hba_monitor {
  * @is_powered: flag to check if HBA is powered
  * @shutting_down: flag to check if shutdown has been invoked
  * @host_sem: semaphore used to serialize concurrent contexts
+ * @eh_wq: Workqueue that eh_work works on
+ * @eh_work: Worker to handle UFS errors that require s/w attention
  * @eeh_work: Worker to handle exception events
  * @errors: HBA errors
  * @uic_error: UFS interconnect layer error status
@@ -843,6 +845,8 @@ struct ufs_hba {
        struct semaphore host_sem;
 
        /* Work Queues */
+       struct workqueue_struct *eh_wq;
+       struct work_struct eh_work;
        struct work_struct eeh_work;
 
        /* HBA Errors */
@@ -858,6 +862,7 @@ struct ufs_hba {
        /* Device management request data */
        struct ufs_dev_cmd dev_cmd;
        ktime_t last_dme_cmd_tstamp;
+       int nop_out_timeout;
 
        /* Keeps information of the UFS device connected to this host */
        struct ufs_dev_info dev_info;
index 02fb51a..589af5f 100644 (file)
@@ -333,9 +333,8 @@ ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
 }
 
 static void
-ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb,
-                           struct ufshcd_lrb *lrbp, u32 lpn, __be64 ppn,
-                           u8 transfer_len, int read_id)
+ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+                           __be64 ppn, u8 transfer_len, int read_id)
 {
        unsigned char *cdb = lrbp->cmd->cmnd;
        __be64 ppn_tmp = ppn;
@@ -703,8 +702,7 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
                }
        }
 
-       ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len,
-                                   read_id);
+       ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len, read_id);
 
        hpb->stats.hit_cnt++;
        return 0;
index 540861c..553b6b9 100644 (file)
@@ -600,6 +600,12 @@ static int rockchip_spi_transfer_one(
        int ret;
        bool use_dma;
 
+       /* Zero length transfers won't trigger an interrupt on completion */
+       if (!xfer->len) {
+               spi_finalize_current_transfer(ctlr);
+               return 1;
+       }
+
        WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
                (readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
 
index ebd27f8..8ce840c 100644 (file)
@@ -204,9 +204,6 @@ struct tegra_slink_data {
        struct dma_async_tx_descriptor          *tx_dma_desc;
 };
 
-static int tegra_slink_runtime_suspend(struct device *dev);
-static int tegra_slink_runtime_resume(struct device *dev);
-
 static inline u32 tegra_slink_readl(struct tegra_slink_data *tspi,
                unsigned long reg)
 {
@@ -1185,6 +1182,7 @@ static int tegra_slink_resume(struct device *dev)
 }
 #endif
 
+#ifdef CONFIG_PM
 static int tegra_slink_runtime_suspend(struct device *dev)
 {
        struct spi_master *master = dev_get_drvdata(dev);
@@ -1210,6 +1208,7 @@ static int tegra_slink_runtime_resume(struct device *dev)
        }
        return 0;
 }
+#endif /* CONFIG_PM */
 
 static const struct dev_pm_ops slink_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend,
index 57e2499..aea037c 100644 (file)
@@ -58,10 +58,6 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
        const struct spi_device *spi = to_spi_device(dev);
        int len;
 
-       len = of_device_modalias(dev, buf, PAGE_SIZE);
-       if (len != -ENODEV)
-               return len;
-
        len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
        if (len != -ENODEV)
                return len;
@@ -367,10 +363,6 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
        const struct spi_device         *spi = to_spi_device(dev);
        int rc;
 
-       rc = of_device_uevent_modalias(dev, env);
-       if (rc != -ENODEV)
-               return rc;
-
        rc = acpi_device_uevent_modalias(dev, env);
        if (rc != -ENODEV)
                return rc;
index e6d860a..dc4ed0f 100644 (file)
@@ -761,6 +761,17 @@ out:
        gbphy_runtime_put_autosuspend(gb_tty->gbphy_dev);
 }
 
+static void gb_tty_port_destruct(struct tty_port *port)
+{
+       struct gb_tty *gb_tty = container_of(port, struct gb_tty, port);
+
+       if (gb_tty->minor != GB_NUM_MINORS)
+               release_minor(gb_tty);
+       kfifo_free(&gb_tty->write_fifo);
+       kfree(gb_tty->buffer);
+       kfree(gb_tty);
+}
+
 static const struct tty_operations gb_ops = {
        .install =              gb_tty_install,
        .open =                 gb_tty_open,
@@ -786,6 +797,7 @@ static const struct tty_port_operations gb_port_ops = {
        .dtr_rts =              gb_tty_dtr_rts,
        .activate =             gb_tty_port_activate,
        .shutdown =             gb_tty_port_shutdown,
+       .destruct =             gb_tty_port_destruct,
 };
 
 static int gb_uart_probe(struct gbphy_device *gbphy_dev,
@@ -798,17 +810,11 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
        int retval;
        int minor;
 
-       gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
-       if (!gb_tty)
-               return -ENOMEM;
-
        connection = gb_connection_create(gbphy_dev->bundle,
                                          le16_to_cpu(gbphy_dev->cport_desc->id),
                                          gb_uart_request_handler);
-       if (IS_ERR(connection)) {
-               retval = PTR_ERR(connection);
-               goto exit_tty_free;
-       }
+       if (IS_ERR(connection))
+               return PTR_ERR(connection);
 
        max_payload = gb_operation_get_payload_size_max(connection);
        if (max_payload < sizeof(struct gb_uart_send_data_request)) {
@@ -816,13 +822,23 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
                goto exit_connection_destroy;
        }
 
+       gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
+       if (!gb_tty) {
+               retval = -ENOMEM;
+               goto exit_connection_destroy;
+       }
+
+       tty_port_init(&gb_tty->port);
+       gb_tty->port.ops = &gb_port_ops;
+       gb_tty->minor = GB_NUM_MINORS;
+
        gb_tty->buffer_payload_max = max_payload -
                        sizeof(struct gb_uart_send_data_request);
 
        gb_tty->buffer = kzalloc(gb_tty->buffer_payload_max, GFP_KERNEL);
        if (!gb_tty->buffer) {
                retval = -ENOMEM;
-               goto exit_connection_destroy;
+               goto exit_put_port;
        }
 
        INIT_WORK(&gb_tty->tx_work, gb_uart_tx_write_work);
@@ -830,7 +846,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
        retval = kfifo_alloc(&gb_tty->write_fifo, GB_UART_WRITE_FIFO_SIZE,
                             GFP_KERNEL);
        if (retval)
-               goto exit_buf_free;
+               goto exit_put_port;
 
        gb_tty->credits = GB_UART_FIRMWARE_CREDITS;
        init_completion(&gb_tty->credits_complete);
@@ -844,7 +860,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
                } else {
                        retval = minor;
                }
-               goto exit_kfifo_free;
+               goto exit_put_port;
        }
 
        gb_tty->minor = minor;
@@ -853,9 +869,6 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
        init_waitqueue_head(&gb_tty->wioctl);
        mutex_init(&gb_tty->mutex);
 
-       tty_port_init(&gb_tty->port);
-       gb_tty->port.ops = &gb_port_ops;
-
        gb_tty->connection = connection;
        gb_tty->gbphy_dev = gbphy_dev;
        gb_connection_set_data(connection, gb_tty);
@@ -863,7 +876,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
 
        retval = gb_connection_enable_tx(connection);
        if (retval)
-               goto exit_release_minor;
+               goto exit_put_port;
 
        send_control(gb_tty, gb_tty->ctrlout);
 
@@ -890,16 +903,10 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
 
 exit_connection_disable:
        gb_connection_disable(connection);
-exit_release_minor:
-       release_minor(gb_tty);
-exit_kfifo_free:
-       kfifo_free(&gb_tty->write_fifo);
-exit_buf_free:
-       kfree(gb_tty->buffer);
+exit_put_port:
+       tty_port_put(&gb_tty->port);
 exit_connection_destroy:
        gb_connection_destroy(connection);
-exit_tty_free:
-       kfree(gb_tty);
 
        return retval;
 }
@@ -930,15 +937,10 @@ static void gb_uart_remove(struct gbphy_device *gbphy_dev)
        gb_connection_disable_rx(connection);
        tty_unregister_device(gb_tty_driver, gb_tty->minor);
 
-       /* FIXME - free transmit / receive buffers */
-
        gb_connection_disable(connection);
-       tty_port_destroy(&gb_tty->port);
        gb_connection_destroy(connection);
-       release_minor(gb_tty);
-       kfifo_free(&gb_tty->write_fifo);
-       kfree(gb_tty->buffer);
-       kfree(gb_tty);
+
+       tty_port_put(&gb_tty->port);
 }
 
 static int gb_tty_init(void)
index 8a2edd6..20e5081 100644 (file)
@@ -919,7 +919,7 @@ static int hantro_probe(struct platform_device *pdev)
                if (!vpu->variant->irqs[i].handler)
                        continue;
 
-               if (vpu->variant->num_clocks > 1) {
+               if (vpu->variant->num_irqs > 1) {
                        irq_name = vpu->variant->irqs[i].name;
                        irq = platform_get_irq_byname(vpu->pdev, irq_name);
                } else {
index c589fe9..825af5f 100644 (file)
@@ -135,7 +135,7 @@ void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
                sizeimage = bytesperline * height;
 
                /* Chroma plane size. */
-               sizeimage += bytesperline * height / 2;
+               sizeimage += bytesperline * ALIGN(height, 64) / 2;
 
                break;
 
index 81d4255..1fd3750 100644 (file)
@@ -5372,8 +5372,8 @@ static int rtw_mp_read_reg(struct net_device *dev,
 
                        pnext++;
                        if (*pnext != '\0') {
-                                 strtout = simple_strtoul(pnext, &ptmp, 16);
-                                 sprintf(extra, "%s %d", extra, strtout);
+                               strtout = simple_strtoul(pnext, &ptmp, 16);
+                               sprintf(extra + strlen(extra), " %d", strtout);
                        } else {
                                  break;
                        }
@@ -5405,7 +5405,7 @@ static int rtw_mp_read_reg(struct net_device *dev,
                        pnext++;
                        if (*pnext != '\0') {
                                strtout = simple_strtoul(pnext, &ptmp, 16);
-                               sprintf(extra, "%s %d", extra, strtout);
+                               sprintf(extra + strlen(extra), " %d", strtout);
                        } else {
                                break;
                        }
@@ -5512,7 +5512,7 @@ static int rtw_mp_read_rf(struct net_device *dev,
                pnext++;
                if (*pnext != '\0') {
                          strtou = simple_strtoul(pnext, &ptmp, 16);
-                         sprintf(extra, "%s %d", extra, strtou);
+                         sprintf(extra + strlen(extra), " %d", strtou);
                } else {
                          break;
                }
index 102ec64..023bd45 100644 (file)
@@ -1110,20 +1110,24 @@ static ssize_t alua_support_store(struct config_item *item,
 {
        struct se_dev_attrib *da = to_attrib(item);
        struct se_device *dev = da->da_dev;
-       bool flag;
+       bool flag, oldflag;
        int ret;
 
+       ret = strtobool(page, &flag);
+       if (ret < 0)
+               return ret;
+
+       oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA);
+       if (flag == oldflag)
+               return count;
+
        if (!(dev->transport->transport_flags_changeable &
              TRANSPORT_FLAG_PASSTHROUGH_ALUA)) {
                pr_err("dev[%p]: Unable to change SE Device alua_support:"
                        " alua_support has fixed value\n", dev);
-               return -EINVAL;
+               return -ENOSYS;
        }
 
-       ret = strtobool(page, &flag);
-       if (ret < 0)
-               return ret;
-
        if (flag)
                dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA;
        else
@@ -1145,20 +1149,24 @@ static ssize_t pgr_support_store(struct config_item *item,
 {
        struct se_dev_attrib *da = to_attrib(item);
        struct se_device *dev = da->da_dev;
-       bool flag;
+       bool flag, oldflag;
        int ret;
 
+       ret = strtobool(page, &flag);
+       if (ret < 0)
+               return ret;
+
+       oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR);
+       if (flag == oldflag)
+               return count;
+
        if (!(dev->transport->transport_flags_changeable &
              TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
                pr_err("dev[%p]: Unable to change SE Device pgr_support:"
                        " pgr_support has fixed value\n", dev);
-               return -EINVAL;
+               return -ENOSYS;
        }
 
-       ret = strtobool(page, &flag);
-       if (ret < 0)
-               return ret;
-
        if (flag)
                dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
        else
index 4b94b08..3829b61 100644 (file)
@@ -269,7 +269,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
        spin_lock(&dev->dev_reservation_lock);
        if (dev->reservation_holder &&
            dev->reservation_holder->se_node_acl != sess->se_node_acl) {
-               pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
+               pr_err("SCSI-2 RESERVATION CONFLICT for %s fabric\n",
                        tpg->se_tpg_tfo->fabric_name);
                pr_err("Original reserver LUN: %llu %s\n",
                        cmd->se_lun->unpacked_lun,
index 0f0038a..fb64acf 100644 (file)
@@ -107,7 +107,7 @@ static int tcc_offset_update(unsigned int tcc)
        return 0;
 }
 
-static unsigned int tcc_offset_save;
+static int tcc_offset_save = -1;
 
 static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
                                struct device_attribute *attr, const char *buf,
@@ -352,7 +352,8 @@ int proc_thermal_resume(struct device *dev)
        proc_dev = dev_get_drvdata(dev);
        proc_thermal_read_ppcc(proc_dev);
 
-       tcc_offset_update(tcc_offset_save);
+       if (tcc_offset_save >= 0)
+               tcc_offset_update(tcc_offset_save);
 
        return 0;
 }
index 4c7ebd1..b1162e5 100644 (file)
@@ -417,7 +417,7 @@ static irqreturn_t tsens_critical_irq_thread(int irq, void *data)
                const struct tsens_sensor *s = &priv->sensor[i];
                u32 hw_id = s->hw_id;
 
-               if (IS_ERR(s->tzd))
+               if (!s->tzd)
                        continue;
                if (!tsens_threshold_violated(priv, hw_id, &d))
                        continue;
@@ -467,7 +467,7 @@ static irqreturn_t tsens_irq_thread(int irq, void *data)
                const struct tsens_sensor *s = &priv->sensor[i];
                u32 hw_id = s->hw_id;
 
-               if (IS_ERR(s->tzd))
+               if (!s->tzd)
                        continue;
                if (!tsens_threshold_violated(priv, hw_id, &d))
                        continue;
index 97ef9b0..51374f4 100644 (file)
@@ -222,15 +222,14 @@ int thermal_build_list_of_policies(char *buf)
 {
        struct thermal_governor *pos;
        ssize_t count = 0;
-       ssize_t size = PAGE_SIZE;
 
        mutex_lock(&thermal_governor_lock);
 
        list_for_each_entry(pos, &thermal_governor_list, governor_list) {
-               size = PAGE_SIZE - count;
-               count += scnprintf(buf + count, size, "%s ", pos->name);
+               count += scnprintf(buf + count, PAGE_SIZE - count, "%s ",
+                                  pos->name);
        }
-       count += scnprintf(buf + count, size, "\n");
+       count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
 
        mutex_unlock(&thermal_governor_lock);
 
index 891fd83..73e5f1d 100644 (file)
 #define UART_OMAP_EFR2_TIMEOUT_BEHAVE  BIT(6)
 
 /* RX FIFO occupancy indicator */
-#define UART_OMAP_RX_LVL               0x64
+#define UART_OMAP_RX_LVL               0x19
 
 struct omap8250_priv {
        int line;
index 231de29..ab226da 100644 (file)
@@ -163,7 +163,7 @@ static unsigned int mvebu_uart_tx_empty(struct uart_port *port)
        st = readl(port->membase + UART_STAT);
        spin_unlock_irqrestore(&port->lock, flags);
 
-       return (st & STAT_TX_FIFO_EMP) ? TIOCSER_TEMT : 0;
+       return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
 }
 
 static unsigned int mvebu_uart_get_mctrl(struct uart_port *port)
index a9acd93..25c558e 100644 (file)
@@ -438,8 +438,8 @@ static void reset_tbufs(struct slgt_info *info);
 static void tdma_reset(struct slgt_info *info);
 static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
 
-static void get_signals(struct slgt_info *info);
-static void set_signals(struct slgt_info *info);
+static void get_gtsignals(struct slgt_info *info);
+static void set_gtsignals(struct slgt_info *info);
 static void set_rate(struct slgt_info *info, u32 data_rate);
 
 static void bh_transmit(struct slgt_info *info);
@@ -720,7 +720,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
        if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
                info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
                spin_lock_irqsave(&info->lock,flags);
-               set_signals(info);
+               set_gtsignals(info);
                spin_unlock_irqrestore(&info->lock,flags);
        }
 
@@ -730,7 +730,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
                if (!C_CRTSCTS(tty) || !tty_throttled(tty))
                        info->signals |= SerialSignal_RTS;
                spin_lock_irqsave(&info->lock,flags);
-               set_signals(info);
+               set_gtsignals(info);
                spin_unlock_irqrestore(&info->lock,flags);
        }
 
@@ -1181,7 +1181,7 @@ static inline void line_info(struct seq_file *m, struct slgt_info *info)
 
        /* output current serial signal states */
        spin_lock_irqsave(&info->lock,flags);
-       get_signals(info);
+       get_gtsignals(info);
        spin_unlock_irqrestore(&info->lock,flags);
 
        stat_buf[0] = 0;
@@ -1281,7 +1281,7 @@ static void throttle(struct tty_struct * tty)
        if (C_CRTSCTS(tty)) {
                spin_lock_irqsave(&info->lock,flags);
                info->signals &= ~SerialSignal_RTS;
-               set_signals(info);
+               set_gtsignals(info);
                spin_unlock_irqrestore(&info->lock,flags);
        }
 }
@@ -1306,7 +1306,7 @@ static void unthrottle(struct tty_struct * tty)
        if (C_CRTSCTS(tty)) {
                spin_lock_irqsave(&info->lock,flags);
                info->signals |= SerialSignal_RTS;
-               set_signals(info);
+               set_gtsignals(info);
                spin_unlock_irqrestore(&info->lock,flags);
        }
 }
@@ -1477,7 +1477,7 @@ static int hdlcdev_open(struct net_device *dev)
 
        /* inform generic HDLC layer of current DCD status */
        spin_lock_irqsave(&info->lock, flags);
-       get_signals(info);
+       get_gtsignals(info);
        spin_unlock_irqrestore(&info->lock, flags);
        if (info->signals & SerialSignal_DCD)
                netif_carrier_on(dev);
@@ -2229,7 +2229,7 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
                if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
                        info->signals &= ~SerialSignal_RTS;
                        info->drop_rts_on_tx_done = false;
-                       set_signals(info);
+                       set_gtsignals(info);
                }
 
 #if SYNCLINK_GENERIC_HDLC
@@ -2394,7 +2394,7 @@ static void shutdown(struct slgt_info *info)
 
        if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
                info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
-               set_signals(info);
+               set_gtsignals(info);
        }
 
        flush_cond_wait(&info->gpio_wait_q);
@@ -2422,7 +2422,7 @@ static void program_hw(struct slgt_info *info)
        else
                async_mode(info);
 
-       set_signals(info);
+       set_gtsignals(info);
 
        info->dcd_chkcount = 0;
        info->cts_chkcount = 0;
@@ -2430,7 +2430,7 @@ static void program_hw(struct slgt_info *info)
        info->dsr_chkcount = 0;
 
        slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR | IRQ_RI);
-       get_signals(info);
+       get_gtsignals(info);
 
        if (info->netcount ||
            (info->port.tty && info->port.tty->termios.c_cflag & CREAD))
@@ -2667,7 +2667,7 @@ static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr)
        spin_lock_irqsave(&info->lock,flags);
 
        /* return immediately if state matches requested events */
-       get_signals(info);
+       get_gtsignals(info);
        s = info->signals;
 
        events = mask &
@@ -3085,7 +3085,7 @@ static int tiocmget(struct tty_struct *tty)
        unsigned long flags;
 
        spin_lock_irqsave(&info->lock,flags);
-       get_signals(info);
+       get_gtsignals(info);
        spin_unlock_irqrestore(&info->lock,flags);
 
        result = ((info->signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
@@ -3124,7 +3124,7 @@ static int tiocmset(struct tty_struct *tty,
                info->signals &= ~SerialSignal_DTR;
 
        spin_lock_irqsave(&info->lock,flags);
-       set_signals(info);
+       set_gtsignals(info);
        spin_unlock_irqrestore(&info->lock,flags);
        return 0;
 }
@@ -3135,7 +3135,7 @@ static int carrier_raised(struct tty_port *port)
        struct slgt_info *info = container_of(port, struct slgt_info, port);
 
        spin_lock_irqsave(&info->lock,flags);
-       get_signals(info);
+       get_gtsignals(info);
        spin_unlock_irqrestore(&info->lock,flags);
        return (info->signals & SerialSignal_DCD) ? 1 : 0;
 }
@@ -3150,7 +3150,7 @@ static void dtr_rts(struct tty_port *port, int on)
                info->signals |= SerialSignal_RTS | SerialSignal_DTR;
        else
                info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
-       set_signals(info);
+       set_gtsignals(info);
        spin_unlock_irqrestore(&info->lock,flags);
 }
 
@@ -3948,10 +3948,10 @@ static void tx_start(struct slgt_info *info)
 
                if (info->params.mode != MGSL_MODE_ASYNC) {
                        if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
-                               get_signals(info);
+                               get_gtsignals(info);
                                if (!(info->signals & SerialSignal_RTS)) {
                                        info->signals |= SerialSignal_RTS;
-                                       set_signals(info);
+                                       set_gtsignals(info);
                                        info->drop_rts_on_tx_done = true;
                                }
                        }
@@ -4005,7 +4005,7 @@ static void reset_port(struct slgt_info *info)
        rx_stop(info);
 
        info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
-       set_signals(info);
+       set_gtsignals(info);
 
        slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
 }
@@ -4427,7 +4427,7 @@ static void tx_set_idle(struct slgt_info *info)
 /*
  * get state of V24 status (input) signals
  */
-static void get_signals(struct slgt_info *info)
+static void get_gtsignals(struct slgt_info *info)
 {
        unsigned short status = rd_reg16(info, SSR);
 
@@ -4489,7 +4489,7 @@ static void msc_set_vcr(struct slgt_info *info)
 /*
  * set state of V24 control (output) signals
  */
-static void set_signals(struct slgt_info *info)
+static void set_gtsignals(struct slgt_info *info)
 {
        unsigned char val = rd_reg8(info, VCR);
        if (info->signals & SerialSignal_DTR)
index 756a4bf..3e4e0b2 100644 (file)
@@ -812,7 +812,6 @@ void tty_ldisc_release(struct tty_struct *tty)
 
        tty_ldisc_debug(tty, "released\n");
 }
-EXPORT_SYMBOL_GPL(tty_ldisc_release);
 
 /**
  *     tty_ldisc_init          -       ldisc setup for new tty
index 5d8c982..1f3b4a1 100644 (file)
@@ -1100,6 +1100,19 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
        return 0;
 }
 
+static void cdns3_rearm_drdy_if_needed(struct cdns3_endpoint *priv_ep)
+{
+       struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+
+       if (priv_dev->dev_ver < DEV_VER_V3)
+               return;
+
+       if (readl(&priv_dev->regs->ep_sts) & EP_STS_TRBERR) {
+               writel(EP_STS_TRBERR, &priv_dev->regs->ep_sts);
+               writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
+       }
+}
+
 /**
  * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
  * @priv_ep: endpoint object
@@ -1351,6 +1364,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
                /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
                writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
                writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
+               cdns3_rearm_drdy_if_needed(priv_ep);
                trace_cdns3_doorbell_epx(priv_ep->name,
                                         readl(&priv_dev->regs->ep_traddr));
        }
index 8bbd8e2..4e2f155 100644 (file)
@@ -726,7 +726,8 @@ static void acm_port_destruct(struct tty_port *port)
 {
        struct acm *acm = container_of(port, struct acm, port);
 
-       acm_release_minor(acm);
+       if (acm->minor != ACM_MINOR_INVALID)
+               acm_release_minor(acm);
        usb_put_intf(acm->control);
        kfree(acm->country_codes);
        kfree(acm);
@@ -1323,8 +1324,10 @@ made_compressed_probe:
        usb_get_intf(acm->control); /* undone in destruct() */
 
        minor = acm_alloc_minor(acm);
-       if (minor < 0)
+       if (minor < 0) {
+               acm->minor = ACM_MINOR_INVALID;
                goto err_put_port;
+       }
 
        acm->minor = minor;
        acm->dev = usb_dev;
index 8aef5eb..3aa7f0a 100644 (file)
@@ -22,6 +22,8 @@
 #define ACM_TTY_MAJOR          166
 #define ACM_TTY_MINORS         256
 
+#define ACM_MINOR_INVALID      ACM_TTY_MINORS
+
 /*
  * Requests.
  */
index 0f8b7c9..7ee6e4c 100644 (file)
@@ -2761,6 +2761,26 @@ static void usb_put_invalidate_rhdev(struct usb_hcd *hcd)
 }
 
 /**
+ * usb_stop_hcd - Halt the HCD
+ * @hcd: the usb_hcd that has to be halted
+ *
+ * Stop the root-hub polling timer and invoke the HCD's ->stop callback.
+ */
+static void usb_stop_hcd(struct usb_hcd *hcd)
+{
+       hcd->rh_pollable = 0;
+       clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+       del_timer_sync(&hcd->rh_timer);
+
+       hcd->driver->stop(hcd);
+       hcd->state = HC_STATE_HALT;
+
+       /* In case the HCD restarted the timer, stop it again. */
+       clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+       del_timer_sync(&hcd->rh_timer);
+}
+
+/**
  * usb_add_hcd - finish generic HCD structure initialization and register
  * @hcd: the usb_hcd structure to initialize
  * @irqnum: Interrupt line to allocate
@@ -2775,6 +2795,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
 {
        int retval;
        struct usb_device *rhdev;
+       struct usb_hcd *shared_hcd;
 
        if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
                hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
@@ -2935,24 +2956,31 @@ int usb_add_hcd(struct usb_hcd *hcd,
                goto err_hcd_driver_start;
        }
 
+       /* starting here, usbcore will pay attention to the shared HCD roothub */
+       shared_hcd = hcd->shared_hcd;
+       if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
+               retval = register_root_hub(shared_hcd);
+               if (retval != 0)
+                       goto err_register_root_hub;
+
+               if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
+                       usb_hcd_poll_rh_status(shared_hcd);
+       }
+
        /* starting here, usbcore will pay attention to this root hub */
-       retval = register_root_hub(hcd);
-       if (retval != 0)
-               goto err_register_root_hub;
+       if (!HCD_DEFER_RH_REGISTER(hcd)) {
+               retval = register_root_hub(hcd);
+               if (retval != 0)
+                       goto err_register_root_hub;
 
-       if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
-               usb_hcd_poll_rh_status(hcd);
+               if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
+                       usb_hcd_poll_rh_status(hcd);
+       }
 
        return retval;
 
 err_register_root_hub:
-       hcd->rh_pollable = 0;
-       clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-       del_timer_sync(&hcd->rh_timer);
-       hcd->driver->stop(hcd);
-       hcd->state = HC_STATE_HALT;
-       clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-       del_timer_sync(&hcd->rh_timer);
+       usb_stop_hcd(hcd);
 err_hcd_driver_start:
        if (usb_hcd_is_primary_hcd(hcd) && hcd->irq > 0)
                free_irq(irqnum, hcd);
@@ -2985,6 +3013,7 @@ EXPORT_SYMBOL_GPL(usb_add_hcd);
 void usb_remove_hcd(struct usb_hcd *hcd)
 {
        struct usb_device *rhdev = hcd->self.root_hub;
+       bool rh_registered;
 
        dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
 
@@ -2995,6 +3024,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
 
        dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
        spin_lock_irq (&hcd_root_hub_lock);
+       rh_registered = hcd->rh_registered;
        hcd->rh_registered = 0;
        spin_unlock_irq (&hcd_root_hub_lock);
 
@@ -3004,7 +3034,8 @@ void usb_remove_hcd(struct usb_hcd *hcd)
        cancel_work_sync(&hcd->died_work);
 
        mutex_lock(&usb_bus_idr_lock);
-       usb_disconnect(&rhdev);         /* Sets rhdev to NULL */
+       if (rh_registered)
+               usb_disconnect(&rhdev);         /* Sets rhdev to NULL */
        mutex_unlock(&usb_bus_idr_lock);
 
        /*
@@ -3022,16 +3053,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
         * interrupt occurs), but usb_hcd_poll_rh_status() won't invoke
         * the hub_status_data() callback.
         */
-       hcd->rh_pollable = 0;
-       clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-       del_timer_sync(&hcd->rh_timer);
-
-       hcd->driver->stop(hcd);
-       hcd->state = HC_STATE_HALT;
-
-       /* In case the HCD restarted the timer, stop it again. */
-       clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-       del_timer_sync(&hcd->rh_timer);
+       usb_stop_hcd(hcd);
 
        if (usb_hcd_is_primary_hcd(hcd)) {
                if (hcd->irq > 0)
index 837237e..11d85a6 100644 (file)
@@ -115,10 +115,16 @@ static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
  */
 static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
 {
+       struct dwc2_hsotg *hsotg = hs_ep->parent;
+       u16 limit = DSTS_SOFFN_LIMIT;
+
+       if (hsotg->gadget.speed != USB_SPEED_HIGH)
+               limit >>= 3;
+
        hs_ep->target_frame += hs_ep->interval;
-       if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) {
+       if (hs_ep->target_frame > limit) {
                hs_ep->frame_overrun = true;
-               hs_ep->target_frame &= DSTS_SOFFN_LIMIT;
+               hs_ep->target_frame &= limit;
        } else {
                hs_ep->frame_overrun = false;
        }
@@ -136,10 +142,16 @@ static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
  */
 static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
 {
+       struct dwc2_hsotg *hsotg = hs_ep->parent;
+       u16 limit = DSTS_SOFFN_LIMIT;
+
+       if (hsotg->gadget.speed != USB_SPEED_HIGH)
+               limit >>= 3;
+
        if (hs_ep->target_frame)
                hs_ep->target_frame -= 1;
        else
-               hs_ep->target_frame = DSTS_SOFFN_LIMIT;
+               hs_ep->target_frame = limit;
 }
 
 /**
@@ -1018,6 +1030,12 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
        dwc2_writel(hsotg, ctrl, depctl);
 }
 
+static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep);
+static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
+                                       struct dwc2_hsotg_ep *hs_ep,
+                                      struct dwc2_hsotg_req *hs_req,
+                                      int result);
+
 /**
  * dwc2_hsotg_start_req - start a USB request from an endpoint's queue
  * @hsotg: The controller state.
@@ -1170,14 +1188,19 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
                }
        }
 
-       if (hs_ep->isochronous && hs_ep->interval == 1) {
-               hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
-               dwc2_gadget_incr_frame_num(hs_ep);
-
-               if (hs_ep->target_frame & 0x1)
-                       ctrl |= DXEPCTL_SETODDFR;
-               else
-                       ctrl |= DXEPCTL_SETEVENFR;
+       if (hs_ep->isochronous) {
+               if (!dwc2_gadget_target_frame_elapsed(hs_ep)) {
+                       if (hs_ep->interval == 1) {
+                               if (hs_ep->target_frame & 0x1)
+                                       ctrl |= DXEPCTL_SETODDFR;
+                               else
+                                       ctrl |= DXEPCTL_SETEVENFR;
+                       }
+                       ctrl |= DXEPCTL_CNAK;
+               } else {
+                       dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
+                       return;
+               }
        }
 
        ctrl |= DXEPCTL_EPENA;  /* ensure ep enabled */
@@ -1325,12 +1348,16 @@ static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
        u32 target_frame = hs_ep->target_frame;
        u32 current_frame = hsotg->frame_number;
        bool frame_overrun = hs_ep->frame_overrun;
+       u16 limit = DSTS_SOFFN_LIMIT;
+
+       if (hsotg->gadget.speed != USB_SPEED_HIGH)
+               limit >>= 3;
 
        if (!frame_overrun && current_frame >= target_frame)
                return true;
 
        if (frame_overrun && current_frame >= target_frame &&
-           ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2))
+           ((current_frame - target_frame) < limit / 2))
                return true;
 
        return false;
@@ -1713,11 +1740,9 @@ static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
  */
 static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
 {
-       u32 mask;
        struct dwc2_hsotg *hsotg = hs_ep->parent;
        int dir_in = hs_ep->dir_in;
        struct dwc2_hsotg_req *hs_req;
-       u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
 
        if (!list_empty(&hs_ep->queue)) {
                hs_req = get_ep_head(hs_ep);
@@ -1733,9 +1758,6 @@ static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
        } else {
                dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
                        __func__);
-               mask = dwc2_readl(hsotg, epmsk_reg);
-               mask |= DOEPMSK_OUTTKNEPDISMSK;
-               dwc2_writel(hsotg, mask, epmsk_reg);
        }
 }
 
@@ -2306,19 +2328,6 @@ static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
        dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
 }
 
-static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
-                                           u32 epctl_reg)
-{
-       u32 ctrl;
-
-       ctrl = dwc2_readl(hsotg, epctl_reg);
-       if (ctrl & DXEPCTL_EOFRNUM)
-               ctrl |= DXEPCTL_SETEVENFR;
-       else
-               ctrl |= DXEPCTL_SETODDFR;
-       dwc2_writel(hsotg, ctrl, epctl_reg);
-}
-
 /*
  * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
  * @hs_ep - The endpoint on which transfer went
@@ -2439,20 +2448,11 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
                        dwc2_hsotg_ep0_zlp(hsotg, true);
        }
 
-       /*
-        * Slave mode OUT transfers do not go through XferComplete so
-        * adjust the ISOC parity here.
-        */
-       if (!using_dma(hsotg)) {
-               if (hs_ep->isochronous && hs_ep->interval == 1)
-                       dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum));
-               else if (hs_ep->isochronous && hs_ep->interval > 1)
-                       dwc2_gadget_incr_frame_num(hs_ep);
-       }
-
        /* Set actual frame number for completed transfers */
-       if (!using_desc_dma(hsotg) && hs_ep->isochronous)
-               req->frame_number = hsotg->frame_number;
+       if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
+               req->frame_number = hs_ep->target_frame;
+               dwc2_gadget_incr_frame_num(hs_ep);
+       }
 
        dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
 }
@@ -2766,6 +2766,12 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
                return;
        }
 
+       /* Set actual frame number for completed transfers */
+       if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
+               hs_req->req.frame_number = hs_ep->target_frame;
+               dwc2_gadget_incr_frame_num(hs_ep);
+       }
+
        dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
 }
 
@@ -2826,23 +2832,18 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
 
                dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
 
-               if (hs_ep->isochronous) {
-                       dwc2_hsotg_complete_in(hsotg, hs_ep);
-                       return;
-               }
-
                if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
                        int dctl = dwc2_readl(hsotg, DCTL);
 
                        dctl |= DCTL_CGNPINNAK;
                        dwc2_writel(hsotg, dctl, DCTL);
                }
-               return;
-       }
+       } else {
 
-       if (dctl & DCTL_GOUTNAKSTS) {
-               dctl |= DCTL_CGOUTNAK;
-               dwc2_writel(hsotg, dctl, DCTL);
+               if (dctl & DCTL_GOUTNAKSTS) {
+                       dctl |= DCTL_CGOUTNAK;
+                       dwc2_writel(hsotg, dctl, DCTL);
+               }
        }
 
        if (!hs_ep->isochronous)
@@ -2863,8 +2864,6 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
                /* Update current frame number value. */
                hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
        } while (dwc2_gadget_target_frame_elapsed(hs_ep));
-
-       dwc2_gadget_start_next_request(hs_ep);
 }
 
 /**
@@ -2881,8 +2880,8 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
 static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
 {
        struct dwc2_hsotg *hsotg = ep->parent;
+       struct dwc2_hsotg_req *hs_req;
        int dir_in = ep->dir_in;
-       u32 doepmsk;
 
        if (dir_in || !ep->isochronous)
                return;
@@ -2896,28 +2895,39 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
                return;
        }
 
-       if (ep->interval > 1 &&
-           ep->target_frame == TARGET_FRAME_INITIAL) {
+       if (ep->target_frame == TARGET_FRAME_INITIAL) {
                u32 ctrl;
 
                ep->target_frame = hsotg->frame_number;
-               dwc2_gadget_incr_frame_num(ep);
+               if (ep->interval > 1) {
+                       ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
+                       if (ep->target_frame & 0x1)
+                               ctrl |= DXEPCTL_SETODDFR;
+                       else
+                               ctrl |= DXEPCTL_SETEVENFR;
 
-               ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
-               if (ep->target_frame & 0x1)
-                       ctrl |= DXEPCTL_SETODDFR;
-               else
-                       ctrl |= DXEPCTL_SETEVENFR;
+                       dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
+               }
+       }
+
+       while (dwc2_gadget_target_frame_elapsed(ep)) {
+               hs_req = get_ep_head(ep);
+               if (hs_req)
+                       dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
 
-               dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
+               dwc2_gadget_incr_frame_num(ep);
+               /* Update current frame number value. */
+               hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
        }
 
-       dwc2_gadget_start_next_request(ep);
-       doepmsk = dwc2_readl(hsotg, DOEPMSK);
-       doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK;
-       dwc2_writel(hsotg, doepmsk, DOEPMSK);
+       if (!ep->req)
+               dwc2_gadget_start_next_request(ep);
+
 }
 
+static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
+                                  struct dwc2_hsotg_ep *hs_ep);
+
 /**
  * dwc2_gadget_handle_nak - handle NAK interrupt
  * @hs_ep: The endpoint on which interrupt is asserted.
@@ -2935,7 +2945,9 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
 static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
 {
        struct dwc2_hsotg *hsotg = hs_ep->parent;
+       struct dwc2_hsotg_req *hs_req;
        int dir_in = hs_ep->dir_in;
+       u32 ctrl;
 
        if (!dir_in || !hs_ep->isochronous)
                return;
@@ -2977,13 +2989,29 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
 
                        dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
                }
-
-               dwc2_hsotg_complete_request(hsotg, hs_ep,
-                                           get_ep_head(hs_ep), 0);
        }
 
-       if (!using_desc_dma(hsotg))
+       if (using_desc_dma(hsotg))
+               return;
+
+       ctrl = dwc2_readl(hsotg, DIEPCTL(hs_ep->index));
+       if (ctrl & DXEPCTL_EPENA)
+               dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
+       else
+               dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
+
+       while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
+               hs_req = get_ep_head(hs_ep);
+               if (hs_req)
+                       dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
+
                dwc2_gadget_incr_frame_num(hs_ep);
+               /* Update current frame number value. */
+               hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
+       }
+
+       if (!hs_ep->req)
+               dwc2_gadget_start_next_request(hs_ep);
 }
 
 /**
@@ -3039,21 +3067,15 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
 
                /* In DDMA handle isochronous requests separately */
                if (using_desc_dma(hsotg) && hs_ep->isochronous) {
-                       /* XferCompl set along with BNA */
-                       if (!(ints & DXEPINT_BNAINTR))
-                               dwc2_gadget_complete_isoc_request_ddma(hs_ep);
+                       dwc2_gadget_complete_isoc_request_ddma(hs_ep);
                } else if (dir_in) {
                        /*
                         * We get OutDone from the FIFO, so we only
                         * need to look at completing IN requests here
                         * if operating slave mode
                         */
-                       if (hs_ep->isochronous && hs_ep->interval > 1)
-                               dwc2_gadget_incr_frame_num(hs_ep);
-
-                       dwc2_hsotg_complete_in(hsotg, hs_ep);
-                       if (ints & DXEPINT_NAKINTRPT)
-                               ints &= ~DXEPINT_NAKINTRPT;
+                       if (!hs_ep->isochronous || !(ints & DXEPINT_NAKINTRPT))
+                               dwc2_hsotg_complete_in(hsotg, hs_ep);
 
                        if (idx == 0 && !hs_ep->req)
                                dwc2_hsotg_enqueue_setup(hsotg);
@@ -3062,10 +3084,8 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
                         * We're using DMA, we need to fire an OutDone here
                         * as we ignore the RXFIFO.
                         */
-                       if (hs_ep->isochronous && hs_ep->interval > 1)
-                               dwc2_gadget_incr_frame_num(hs_ep);
-
-                       dwc2_hsotg_handle_outdone(hsotg, idx);
+                       if (!hs_ep->isochronous || !(ints & DXEPINT_OUTTKNEPDIS))
+                               dwc2_hsotg_handle_outdone(hsotg, idx);
                }
        }
 
@@ -4085,6 +4105,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
                        mask |= DIEPMSK_NAKMSK;
                        dwc2_writel(hsotg, mask, DIEPMSK);
                } else {
+                       epctrl |= DXEPCTL_SNAK;
                        mask = dwc2_readl(hsotg, DOEPMSK);
                        mask |= DOEPMSK_OUTTKNEPDISMSK;
                        dwc2_writel(hsotg, mask, DOEPMSK);
index 2a78289..a215ec9 100644 (file)
@@ -5191,6 +5191,10 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
        hcd->has_tt = 1;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               retval = -EINVAL;
+               goto error1;
+       }
        hcd->rsrc_start = res->start;
        hcd->rsrc_len = resource_size(res);
 
index 01866dc..0104a80 100644 (file)
@@ -264,19 +264,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
 {
        u32             reg;
        int             retries = 1000;
-       int             ret;
-
-       usb_phy_init(dwc->usb2_phy);
-       usb_phy_init(dwc->usb3_phy);
-       ret = phy_init(dwc->usb2_generic_phy);
-       if (ret < 0)
-               return ret;
-
-       ret = phy_init(dwc->usb3_generic_phy);
-       if (ret < 0) {
-               phy_exit(dwc->usb2_generic_phy);
-               return ret;
-       }
 
        /*
         * We're resetting only the device side because, if we're in host mode,
@@ -310,9 +297,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
                        udelay(1);
        } while (--retries);
 
-       phy_exit(dwc->usb3_generic_phy);
-       phy_exit(dwc->usb2_generic_phy);
-
        return -ETIMEDOUT;
 
 done:
@@ -982,9 +966,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
                dwc->phys_ready = true;
        }
 
+       usb_phy_init(dwc->usb2_phy);
+       usb_phy_init(dwc->usb3_phy);
+       ret = phy_init(dwc->usb2_generic_phy);
+       if (ret < 0)
+               goto err0a;
+
+       ret = phy_init(dwc->usb3_generic_phy);
+       if (ret < 0) {
+               phy_exit(dwc->usb2_generic_phy);
+               goto err0a;
+       }
+
        ret = dwc3_core_soft_reset(dwc);
        if (ret)
-               goto err0a;
+               goto err1;
 
        if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
            !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
index 3c34995..be86456 100644 (file)
@@ -406,6 +406,14 @@ static struct usb_endpoint_descriptor ss_epin_fback_desc = {
        .bInterval = 4,
 };
 
+static struct usb_ss_ep_comp_descriptor ss_epin_fback_desc_comp = {
+       .bLength                = sizeof(ss_epin_fback_desc_comp),
+       .bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+       .bMaxBurst              = 0,
+       .bmAttributes           = 0,
+       .wBytesPerInterval      = cpu_to_le16(4),
+};
+
 
 /* Audio Streaming IN Interface - Alt0 */
 static struct usb_interface_descriptor std_as_in_if0_desc = {
@@ -597,6 +605,7 @@ static struct usb_descriptor_header *ss_audio_desc[] = {
        (struct usb_descriptor_header *)&ss_epout_desc_comp,
        (struct usb_descriptor_header *)&as_iso_out_desc,
        (struct usb_descriptor_header *)&ss_epin_fback_desc,
+       (struct usb_descriptor_header *)&ss_epin_fback_desc_comp,
 
        (struct usb_descriptor_header *)&std_as_in_if0_desc,
        (struct usb_descriptor_header *)&std_as_in_if1_desc,
@@ -705,6 +714,7 @@ static void setup_headers(struct f_uac2_opts *opts,
 {
        struct usb_ss_ep_comp_descriptor *epout_desc_comp = NULL;
        struct usb_ss_ep_comp_descriptor *epin_desc_comp = NULL;
+       struct usb_ss_ep_comp_descriptor *epin_fback_desc_comp = NULL;
        struct usb_endpoint_descriptor *epout_desc;
        struct usb_endpoint_descriptor *epin_desc;
        struct usb_endpoint_descriptor *epin_fback_desc;
@@ -730,6 +740,7 @@ static void setup_headers(struct f_uac2_opts *opts,
                epout_desc_comp = &ss_epout_desc_comp;
                epin_desc_comp = &ss_epin_desc_comp;
                epin_fback_desc = &ss_epin_fback_desc;
+               epin_fback_desc_comp = &ss_epin_fback_desc_comp;
                ep_int_desc = &ss_ep_int_desc;
        }
 
@@ -773,8 +784,11 @@ static void setup_headers(struct f_uac2_opts *opts,
 
                headers[i++] = USBDHDR(&as_iso_out_desc);
 
-               if (EPOUT_FBACK_IN_EN(opts))
+               if (EPOUT_FBACK_IN_EN(opts)) {
                        headers[i++] = USBDHDR(epin_fback_desc);
+                       if (epin_fback_desc_comp)
+                               headers[i++] = USBDHDR(epin_fback_desc_comp);
+               }
        }
 
        if (EPIN_EN(opts)) {
@@ -1164,6 +1178,9 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
        agdev->out_ep_maxpsize = max_t(u16, agdev->out_ep_maxpsize,
                                le16_to_cpu(ss_epout_desc.wMaxPacketSize));
 
+       ss_epin_desc_comp.wBytesPerInterval = ss_epin_desc.wMaxPacketSize;
+       ss_epout_desc_comp.wBytesPerInterval = ss_epout_desc.wMaxPacketSize;
+
        // HS and SS endpoint addresses are copied from autoconfigured FS descriptors
        hs_ep_int_desc.bEndpointAddress = fs_ep_int_desc.bEndpointAddress;
        hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
index 32ef228..ad16163 100644 (file)
@@ -96,11 +96,13 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
 };
 
 static void u_audio_set_fback_frequency(enum usb_device_speed speed,
+                                       struct usb_ep *out_ep,
                                        unsigned long long freq,
                                        unsigned int pitch,
                                        void *buf)
 {
        u32 ff = 0;
+       const struct usb_endpoint_descriptor *ep_desc;
 
        /*
         * Because the pitch base is 1000000, the final divider here
@@ -128,8 +130,13 @@ static void u_audio_set_fback_frequency(enum usb_device_speed speed,
                 * byte fromat (that is Q16.16)
                 *
                 * ff = (freq << 16) / 8000
+                *
+                * Win10 and OSX UAC2 drivers require number of samples per packet
+                * in order to honor the feedback value.
+                * Linux snd-usb-audio detects the applied bit-shift automatically.
                 */
-               freq <<= 4;
+               ep_desc = out_ep->desc;
+               freq <<= 4 + (ep_desc->bInterval - 1);
        }
 
        ff = DIV_ROUND_CLOSEST_ULL((freq * pitch), 1953125);
@@ -267,7 +274,7 @@ static void u_audio_iso_fback_complete(struct usb_ep *ep,
                pr_debug("%s: iso_complete status(%d) %d/%d\n",
                        __func__, status, req->actual, req->length);
 
-       u_audio_set_fback_frequency(audio_dev->gadget->speed,
+       u_audio_set_fback_frequency(audio_dev->gadget->speed, audio_dev->out_ep,
                                    params->c_srate, prm->pitch,
                                    req->buf);
 
@@ -526,7 +533,7 @@ int u_audio_start_capture(struct g_audio *audio_dev)
         * be meauserd at start of playback
         */
        prm->pitch = 1000000;
-       u_audio_set_fback_frequency(audio_dev->gadget->speed,
+       u_audio_set_fback_frequency(audio_dev->gadget->speed, ep,
                                    params->c_srate, prm->pitch,
                                    req_fback->buf);
 
index 65cae48..38e4d6b 100644 (file)
@@ -1250,7 +1250,7 @@ static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
                        do {
                                tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
                                udelay(1);
-                       } while (tmp != CS_IDST || timeout-- > 0);
+                       } while (tmp != CS_IDST && timeout-- > 0);
 
                        if (tmp == CS_IDST)
                                r8a66597_bset(r8a66597,
index 337b425..2df52f7 100644 (file)
@@ -406,12 +406,9 @@ static int bcma_hcd_probe(struct bcma_device *core)
                return -ENOMEM;
        usb_dev->core = core;
 
-       if (core->dev.of_node) {
+       if (core->dev.of_node)
                usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc",
                                                    GPIOD_OUT_HIGH);
-               if (IS_ERR(usb_dev->gpio_desc))
-                       return PTR_ERR(usb_dev->gpio_desc);
-       }
 
        switch (core->id.id) {
        case BCMA_CORE_USB20_HOST:
index 6bdc6d6..1776c05 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/moduleparam.h>
 #include <linux/dma-mapping.h>
 #include <linux/debugfs.h>
+#include <linux/platform_device.h>
 #include <linux/slab.h>
 
 #include <asm/byteorder.h>
@@ -1278,29 +1279,39 @@ MODULE_LICENSE ("GPL");
 
 #ifdef CONFIG_USB_EHCI_SH
 #include "ehci-sh.c"
-#define PLATFORM_DRIVER                ehci_hcd_sh_driver
 #endif
 
 #ifdef CONFIG_PPC_PS3
 #include "ehci-ps3.c"
-#define        PS3_SYSTEM_BUS_DRIVER   ps3_ehci_driver
 #endif
 
 #ifdef CONFIG_USB_EHCI_HCD_PPC_OF
 #include "ehci-ppc-of.c"
-#define OF_PLATFORM_DRIVER     ehci_hcd_ppc_of_driver
 #endif
 
 #ifdef CONFIG_XPS_USB_HCD_XILINX
 #include "ehci-xilinx-of.c"
-#define XILINX_OF_PLATFORM_DRIVER      ehci_hcd_xilinx_of_driver
 #endif
 
 #ifdef CONFIG_SPARC_LEON
 #include "ehci-grlib.c"
-#define PLATFORM_DRIVER                ehci_grlib_driver
 #endif
 
+static struct platform_driver * const platform_drivers[] = {
+#ifdef CONFIG_USB_EHCI_SH
+       &ehci_hcd_sh_driver,
+#endif
+#ifdef CONFIG_USB_EHCI_HCD_PPC_OF
+       &ehci_hcd_ppc_of_driver,
+#endif
+#ifdef CONFIG_XPS_USB_HCD_XILINX
+       &ehci_hcd_xilinx_of_driver,
+#endif
+#ifdef CONFIG_SPARC_LEON
+       &ehci_grlib_driver,
+#endif
+};
+
 static int __init ehci_hcd_init(void)
 {
        int retval = 0;
@@ -1324,47 +1335,23 @@ static int __init ehci_hcd_init(void)
        ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
 #endif
 
-#ifdef PLATFORM_DRIVER
-       retval = platform_driver_register(&PLATFORM_DRIVER);
+       retval = platform_register_drivers(platform_drivers, ARRAY_SIZE(platform_drivers));
        if (retval < 0)
                goto clean0;
-#endif
-
-#ifdef PS3_SYSTEM_BUS_DRIVER
-       retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
-       if (retval < 0)
-               goto clean2;
-#endif
 
-#ifdef OF_PLATFORM_DRIVER
-       retval = platform_driver_register(&OF_PLATFORM_DRIVER);
+#ifdef CONFIG_PPC_PS3
+       retval = ps3_ehci_driver_register(&ps3_ehci_driver);
        if (retval < 0)
-               goto clean3;
+               goto clean1;
 #endif
 
-#ifdef XILINX_OF_PLATFORM_DRIVER
-       retval = platform_driver_register(&XILINX_OF_PLATFORM_DRIVER);
-       if (retval < 0)
-               goto clean4;
-#endif
-       return retval;
+       return 0;
 
-#ifdef XILINX_OF_PLATFORM_DRIVER
-       /* platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); */
-clean4:
-#endif
-#ifdef OF_PLATFORM_DRIVER
-       platform_driver_unregister(&OF_PLATFORM_DRIVER);
-clean3:
-#endif
-#ifdef PS3_SYSTEM_BUS_DRIVER
-       ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
-clean2:
+#ifdef CONFIG_PPC_PS3
+clean1:
 #endif
-#ifdef PLATFORM_DRIVER
-       platform_driver_unregister(&PLATFORM_DRIVER);
+       platform_unregister_drivers(platform_drivers, ARRAY_SIZE(platform_drivers));
 clean0:
-#endif
 #ifdef CONFIG_DYNAMIC_DEBUG
        debugfs_remove(ehci_debug_root);
        ehci_debug_root = NULL;
@@ -1376,18 +1363,10 @@ module_init(ehci_hcd_init);
 
 static void __exit ehci_hcd_cleanup(void)
 {
-#ifdef XILINX_OF_PLATFORM_DRIVER
-       platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER);
-#endif
-#ifdef OF_PLATFORM_DRIVER
-       platform_driver_unregister(&OF_PLATFORM_DRIVER);
-#endif
-#ifdef PLATFORM_DRIVER
-       platform_driver_unregister(&PLATFORM_DRIVER);
-#endif
-#ifdef PS3_SYSTEM_BUS_DRIVER
-       ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
+#ifdef CONFIG_PPC_PS3
+       ps3_ehci_driver_unregister(&ps3_ehci_driver);
 #endif
+       platform_unregister_drivers(platform_drivers, ARRAY_SIZE(platform_drivers));
 #ifdef CONFIG_DYNAMIC_DEBUG
        debugfs_remove(ehci_debug_root);
 #endif
index f3dabd0..93c38b5 100644 (file)
@@ -692,6 +692,7 @@ int xhci_run(struct usb_hcd *hcd)
                if (ret)
                        xhci_free_command(xhci, command);
        }
+       set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                        "Finished xhci_run for USB2 roothub");
 
index c429376..c968ecd 100644 (file)
@@ -190,6 +190,7 @@ tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
        }
        if (len > 0) {
                /* Write the rest 1 - 3 bytes to FIFO */
+               val = 0;
                memcpy(&val, buf, len);
                musb_writel(fifo, 0, val);
        }
index 66a6ac5..1892798 100644 (file)
@@ -233,6 +233,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
        { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
        { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
+       { USB_DEVICE(0x2184, 0x0030) }, /* GW Instek GDM-834x Digital Multimeter */
        { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
        { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
        { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
@@ -258,6 +259,7 @@ struct cp210x_serial_private {
        speed_t                 max_speed;
        bool                    use_actual_rate;
        bool                    no_flow_control;
+       bool                    no_event_mode;
 };
 
 enum cp210x_event_state {
@@ -1113,12 +1115,16 @@ static void cp210x_change_speed(struct tty_struct *tty,
 
 static void cp210x_enable_event_mode(struct usb_serial_port *port)
 {
+       struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
        struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
        int ret;
 
        if (port_priv->event_mode)
                return;
 
+       if (priv->no_event_mode)
+               return;
+
        port_priv->event_state = ES_DATA;
        port_priv->event_mode = true;
 
@@ -2074,6 +2080,33 @@ static void cp210x_init_max_speed(struct usb_serial *serial)
        priv->use_actual_rate = use_actual_rate;
 }
 
+static void cp2102_determine_quirks(struct usb_serial *serial)
+{
+       struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+       u8 *buf;
+       int ret;
+
+       buf = kmalloc(2, GFP_KERNEL);
+       if (!buf)
+               return;
+       /*
+        * Some (possibly counterfeit) CP2102 do not support event-insertion
+        * mode and respond differently to malformed vendor requests.
+        * Specifically, they return one instead of two bytes when sent a
+        * two-byte part-number request.
+        */
+       ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+                       CP210X_VENDOR_SPECIFIC, REQTYPE_DEVICE_TO_HOST,
+                       CP210X_GET_PARTNUM, 0, buf, 2, USB_CTRL_GET_TIMEOUT);
+       if (ret == 1) {
+               dev_dbg(&serial->interface->dev,
+                               "device does not support event-insertion mode\n");
+               priv->no_event_mode = true;
+       }
+
+       kfree(buf);
+}
+
 static int cp210x_get_fw_version(struct usb_serial *serial, u16 value)
 {
        struct cp210x_serial_private *priv = usb_get_serial_data(serial);
@@ -2108,7 +2141,12 @@ static void cp210x_determine_type(struct usb_serial *serial)
                return;
        }
 
+       dev_dbg(&serial->interface->dev, "partnum = 0x%02x\n", priv->partnum);
+
        switch (priv->partnum) {
+       case CP210X_PARTNUM_CP2102:
+               cp2102_determine_quirks(serial);
+               break;
        case CP210X_PARTNUM_CP2105:
        case CP210X_PARTNUM_CP2108:
                cp210x_get_fw_version(serial, CP210X_GET_FW_VER);
index d7fe33c..925067a 100644 (file)
 #define BANDB_DEVICE_ID_USOPTL4_2P       0xBC02
 #define BANDB_DEVICE_ID_USOPTL4_4        0xAC44
 #define BANDB_DEVICE_ID_USOPTL4_4P       0xBC03
-#define BANDB_DEVICE_ID_USOPTL2_4        0xAC24
 
 /* Interrupt Routine Defines    */
 
@@ -186,7 +185,6 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P) },
        { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4) },
        { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P) },
-       { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4) },
        {}                      /* terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, id_table);
index 29c765c..6cfb5d3 100644 (file)
@@ -1205,6 +1205,14 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff),    /* Telit FD980 */
          .driver_info = NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff),    /* Telit LN920 (rmnet) */
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff),    /* Telit LN920 (MBIM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1062, 0xff),    /* Telit LN920 (RNDIS) */
+         .driver_info = NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff),    /* Telit LN920 (ECM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1650,7 +1658,6 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
@@ -2068,6 +2075,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
        { USB_DEVICE(0x0489, 0xe0b5),                                           /* Foxconn T77W968 ESIM */
          .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+       { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff),                     /* Foxconn T99W265 MBIM */
+         .driver_info = RSVD(3) },
        { USB_DEVICE(0x1508, 0x1001),                                           /* Fibocom NL668 (IOT version) */
          .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
        { USB_DEVICE(0x2cb7, 0x0104),                                           /* Fibocom NL678 series */
index efa972b..c6b3fcf 100644 (file)
@@ -416,9 +416,16 @@ UNUSUAL_DEV(  0x04cb, 0x0100, 0x0000, 0x2210,
                USB_SC_UFI, USB_PR_DEVICE, NULL, US_FL_FIX_INQUIRY | US_FL_SINGLE_LUN),
 
 /*
- * Reported by Ondrej Zary <linux@rainbow-software.org>
+ * Reported by Ondrej Zary <linux@zary.sk>
  * The device reports one sector more and breaks when that sector is accessed
+ * Firmwares older than 2.6c (the latest one and the only that claims Linux
+ * support) have also broken tag handling
  */
+UNUSUAL_DEV(  0x04ce, 0x0002, 0x0000, 0x026b,
+               "ScanLogic",
+               "SL11R-IDE",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_FIX_CAPACITY | US_FL_BULK_IGNORE_TAG),
 UNUSUAL_DEV(  0x04ce, 0x0002, 0x026c, 0x026c,
                "ScanLogic",
                "SL11R-IDE",
index c35a6db..4051c8c 100644 (file)
@@ -50,7 +50,7 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
                "LaCie",
                "Rugged USB3-FW",
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
-               US_FL_IGNORE_UAS),
+               US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
 
 /*
  * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
index 294ba05..bd56de7 100644 (file)
@@ -1714,6 +1714,9 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
        struct mlx5_vdpa_virtqueue *mvq;
 
+       if (!mvdev->actual_features)
+               return;
+
        if (!is_index_valid(mvdev, idx))
                return;
 
@@ -2145,6 +2148,8 @@ static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
 
        for (i = 0; i < ndev->mvdev.max_vqs; i++)
                ndev->vqs[i].ready = false;
+
+       ndev->mvdev.cvq.ready = false;
 }
 
 static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
index 29a38ec..26e3d90 100644 (file)
@@ -665,13 +665,11 @@ static void vduse_vdpa_set_config(struct vdpa_device *vdpa, unsigned int offset,
 static int vduse_vdpa_reset(struct vdpa_device *vdpa)
 {
        struct vduse_dev *dev = vdpa_to_vduse(vdpa);
-
-       if (vduse_dev_set_status(dev, 0))
-               return -EIO;
+       int ret = vduse_dev_set_status(dev, 0);
 
        vduse_dev_reset(dev);
 
-       return 0;
+       return ret;
 }
 
 static u32 vduse_vdpa_get_generation(struct vdpa_device *vdpa)
@@ -1593,8 +1591,10 @@ static int vduse_init(void)
 
        vduse_irq_wq = alloc_workqueue("vduse-irq",
                                WQ_HIGHPRI | WQ_SYSFS | WQ_UNBOUND, 0);
-       if (!vduse_irq_wq)
+       if (!vduse_irq_wq) {
+               ret = -ENOMEM;
                goto err_wq;
+       }
 
        ret = vduse_domain_init();
        if (ret)
index 3a249ee..28ef323 100644 (file)
@@ -467,7 +467,7 @@ static void vhost_tx_batch(struct vhost_net *net,
                .num = nvq->batched_xdp,
                .ptr = nvq->xdp,
        };
-       int err;
+       int i, err;
 
        if (nvq->batched_xdp == 0)
                goto signal_used;
@@ -476,6 +476,15 @@ static void vhost_tx_batch(struct vhost_net *net,
        err = sock->ops->sendmsg(sock, msghdr, 0);
        if (unlikely(err < 0)) {
                vq_err(&nvq->vq, "Fail to batch sending packets\n");
+
+               /* free pages owned by XDP; since this is an unlikely error path,
+                * keep it simple and avoid more complex bulk update for the
+                * used pages
+                */
+               for (i = 0; i < nvq->batched_xdp; ++i)
+                       put_page(virt_to_head_page(nvq->xdp[i].data));
+               nvq->batched_xdp = 0;
+               nvq->done_idx = 0;
                return;
        }
 
index f41d081..35927ce 100644 (file)
@@ -640,7 +640,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
        u64 offset, map_size, map_iova = iova;
        struct vdpa_map_file *map_file;
        struct vm_area_struct *vma;
-       int ret;
+       int ret = 0;
 
        mmap_read_lock(dev->mm);
 
index d33c5cd..b26b79d 100644 (file)
@@ -582,7 +582,9 @@ config FB_HP300
 
 config FB_TGA
        tristate "TGA/SFB+ framebuffer support"
-       depends on FB && (ALPHA || TC)
+       depends on FB
+       depends on PCI || TC
+       depends on ALPHA || TC
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
index 588e02f..0a5b540 100644 (file)
@@ -345,8 +345,13 @@ static int virtio_device_of_init(struct virtio_device *dev)
        ret = snprintf(compat, sizeof(compat), "virtio,device%x", dev->id.device);
        BUG_ON(ret >= sizeof(compat));
 
+       /*
+        * On powerpc/pseries virtio devices are PCI devices so PCI
+        * vendor/device ids play the role of the "compatible" property.
+        * Simply don't init of_node in this case.
+        */
        if (!of_device_is_compatible(np, compat)) {
-               ret = -EINVAL;
+               ret = 0;
                goto out;
        }
 
index b81fe4f..bf59fae 100644 (file)
@@ -1666,7 +1666,7 @@ config WDT_MTX1
 
 config SIBYTE_WDOG
        tristate "Sibyte SoC hardware watchdog"
-       depends on CPU_SB1 || (MIPS && COMPILE_TEST)
+       depends on CPU_SB1
        help
          Watchdog driver for the built in watchdog hardware in Sibyte
          SoC processors.  There are apparently two watchdog timers
index 5f1ce59..22f5aff 100644 (file)
@@ -177,6 +177,7 @@ config XEN_GRANT_DMA_ALLOC
 
 config SWIOTLB_XEN
        def_bool y
+       depends on XEN_PV || ARM || ARM64
        select DMA_OPS
        select SWIOTLB
 
@@ -214,7 +215,7 @@ config XEN_PVCALLS_FRONTEND
          implements them.
 
 config XEN_PVCALLS_BACKEND
-       bool "XEN PV Calls backend driver"
+       tristate "XEN PV Calls backend driver"
        depends on INET && XEN && XEN_BACKEND
        help
          Experimental backend for the Xen PV Calls protocol
index 671c712..43ebfe3 100644 (file)
@@ -43,6 +43,8 @@
 #include <linux/sched.h>
 #include <linux/cred.h>
 #include <linux/errno.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
 #include <linux/mm.h>
 #include <linux/memblock.h>
 #include <linux/pagemap.h>
@@ -115,7 +117,7 @@ static struct ctl_table xen_root[] = {
 #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
 
 /*
- * balloon_process() state:
+ * balloon_thread() state:
  *
  * BP_DONE: done or nothing to do,
  * BP_WAIT: wait to be rescheduled,
@@ -130,6 +132,8 @@ enum bp_state {
        BP_ECANCELED
 };
 
+/* Main waiting point for xen-balloon thread. */
+static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
 
 static DEFINE_MUTEX(balloon_mutex);
 
@@ -144,10 +148,6 @@ static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
 static LIST_HEAD(ballooned_pages);
 static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
 
-/* Main work function, always executed in process context. */
-static void balloon_process(struct work_struct *work);
-static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
-
 /* When ballooning out (allocating memory to return to Xen) we don't really
    want the kernel to try too hard since that can trigger the oom killer. */
 #define GFP_BALLOON \
@@ -366,7 +366,7 @@ static void xen_online_page(struct page *page, unsigned int order)
 static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
 {
        if (val == MEM_ONLINE)
-               schedule_delayed_work(&balloon_worker, 0);
+               wake_up(&balloon_thread_wq);
 
        return NOTIFY_OK;
 }
@@ -491,18 +491,43 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 }
 
 /*
- * As this is a work item it is guaranteed to run as a single instance only.
+ * Stop waiting if either state is not BP_EAGAIN and ballooning action is
+ * needed, or if the credit has changed while state is BP_EAGAIN.
+ */
+static bool balloon_thread_cond(enum bp_state state, long credit)
+{
+       if (state != BP_EAGAIN)
+               credit = 0;
+
+       return current_credit() != credit || kthread_should_stop();
+}
+
+/*
+ * As this is a kthread it is guaranteed to run as a single instance only.
  * We may of course race updates of the target counts (which are protected
  * by the balloon lock), or with changes to the Xen hard limit, but we will
  * recover from these in time.
  */
-static void balloon_process(struct work_struct *work)
+static int balloon_thread(void *unused)
 {
        enum bp_state state = BP_DONE;
        long credit;
+       unsigned long timeout;
+
+       set_freezable();
+       for (;;) {
+               if (state == BP_EAGAIN)
+                       timeout = balloon_stats.schedule_delay * HZ;
+               else
+                       timeout = 3600 * HZ;
+               credit = current_credit();
 
+               wait_event_freezable_timeout(balloon_thread_wq,
+                       balloon_thread_cond(state, credit), timeout);
+
+               if (kthread_should_stop())
+                       return 0;
 
-       do {
                mutex_lock(&balloon_mutex);
 
                credit = current_credit();
@@ -529,12 +554,7 @@ static void balloon_process(struct work_struct *work)
                mutex_unlock(&balloon_mutex);
 
                cond_resched();
-
-       } while (credit && state == BP_DONE);
-
-       /* Schedule more work if there is some still to be done. */
-       if (state == BP_EAGAIN)
-               schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
+       }
 }
 
 /* Resets the Xen limit, sets new target, and kicks off processing. */
@@ -542,7 +562,7 @@ void balloon_set_new_target(unsigned long target)
 {
        /* No need for lock. Not read-modify-write updates. */
        balloon_stats.target_pages = target;
-       schedule_delayed_work(&balloon_worker, 0);
+       wake_up(&balloon_thread_wq);
 }
 EXPORT_SYMBOL_GPL(balloon_set_new_target);
 
@@ -647,7 +667,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
 
        /* The balloon may be too large now. Shrink it if needed. */
        if (current_credit())
-               schedule_delayed_work(&balloon_worker, 0);
+               wake_up(&balloon_thread_wq);
 
        mutex_unlock(&balloon_mutex);
 }
@@ -679,6 +699,8 @@ static void __init balloon_add_region(unsigned long start_pfn,
 
 static int __init balloon_init(void)
 {
+       struct task_struct *task;
+
        if (!xen_domain())
                return -ENODEV;
 
@@ -722,6 +744,12 @@ static int __init balloon_init(void)
        }
 #endif
 
+       task = kthread_run(balloon_thread, NULL, "xen-balloon");
+       if (IS_ERR(task)) {
+               pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
+               return PTR_ERR(task);
+       }
+
        /* Init the xen-balloon driver. */
        xen_balloon_init();
 
index 1e7f6b1..fec1b65 100644 (file)
@@ -381,6 +381,14 @@ static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
                        map->unmap_ops[offset+i].handle,
                        map->unmap_ops[offset+i].status);
                map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
+               if (use_ptemod) {
+                       if (map->kunmap_ops[offset+i].status)
+                               err = -EINVAL;
+                       pr_debug("kunmap handle=%u st=%d\n",
+                                map->kunmap_ops[offset+i].handle,
+                                map->kunmap_ops[offset+i].status);
+                       map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
+               }
        }
        return err;
 }
index 643fe44..e56a5fa 100644 (file)
@@ -106,27 +106,26 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
 
 static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
 {
-       int i, rc;
-       int dma_bits;
+       int rc;
+       unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
+       unsigned int i, dma_bits = order + PAGE_SHIFT;
        dma_addr_t dma_handle;
        phys_addr_t p = virt_to_phys(buf);
 
-       dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
+       BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
+       BUG_ON(nslabs % IO_TLB_SEGSIZE);
 
        i = 0;
        do {
-               int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
-
                do {
                        rc = xen_create_contiguous_region(
-                               p + (i << IO_TLB_SHIFT),
-                               get_order(slabs << IO_TLB_SHIFT),
+                               p + (i << IO_TLB_SHIFT), order,
                                dma_bits, &dma_handle);
                } while (rc && dma_bits++ < MAX_DMA_BITS);
                if (rc)
                        return rc;
 
-               i += slabs;
+               i += IO_TLB_SEGSIZE;
        } while (i < nslabs);
        return 0;
 }
@@ -153,9 +152,7 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
        return "";
 }
 
-#define DEFAULT_NSLABS         ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)
-
-int __ref xen_swiotlb_init(void)
+int xen_swiotlb_init(void)
 {
        enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
        unsigned long bytes = swiotlb_size_or_default();
@@ -185,7 +182,7 @@ retry:
                order--;
        }
        if (!start)
-               goto error;
+               goto exit;
        if (order != get_order(bytes)) {
                pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
                        (PAGE_SIZE << order) >> 20);
@@ -208,15 +205,15 @@ retry:
        swiotlb_set_max_segment(PAGE_SIZE);
        return 0;
 error:
-       if (repeat--) {
+       if (nslabs > 1024 && repeat--) {
                /* Min is 2MB */
-               nslabs = max(1024UL, (nslabs >> 1));
-               pr_info("Lowering to %luMB\n",
-                       (nslabs << IO_TLB_SHIFT) >> 20);
+               nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
+               bytes = nslabs << IO_TLB_SHIFT;
+               pr_info("Lowering to %luMB\n", bytes >> 20);
                goto retry;
        }
+exit:
        pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
-       free_pages((unsigned long)start, order);
        return rc;
 }
 
@@ -233,10 +230,11 @@ retry:
        /*
         * Get IO TLB memory from any location.
         */
-       start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
+       start = memblock_alloc(PAGE_ALIGN(bytes),
+                              IO_TLB_SEGSIZE << IO_TLB_SHIFT);
        if (!start)
-               panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
-                     __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
+               panic("%s: Failed to allocate %lu bytes\n",
+                     __func__, PAGE_ALIGN(bytes));
 
        /*
         * And replace that memory with pages under 4GB.
@@ -244,9 +242,9 @@ retry:
        rc = xen_swiotlb_fixup(start, nslabs);
        if (rc) {
                memblock_free(__pa(start), PAGE_ALIGN(bytes));
-               if (repeat--) {
+               if (nslabs > 1024 && repeat--) {
                        /* Min is 2MB */
-                       nslabs = max(1024UL, (nslabs >> 1));
+                       nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
                        bytes = nslabs << IO_TLB_SHIFT;
                        pr_info("Lowering to %luMB\n", bytes >> 20);
                        goto retry;
@@ -254,7 +252,7 @@ retry:
                panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
        }
 
-       if (swiotlb_init_with_tbl(start, nslabs, false))
+       if (swiotlb_init_with_tbl(start, nslabs, true))
                panic("Cannot allocate SWIOTLB buffer");
        swiotlb_set_max_segment(PAGE_SIZE);
 }
index 7d9b23d..1b4d580 100644 (file)
 #include "internal.h"
 
 /*
+ * Handle invalidation of an mmap'd file.  We invalidate all the PTEs referring
+ * to the pages in this file's pagecache, forcing the kernel to go through
+ * ->fault() or ->page_mkwrite() - at which point we can handle invalidation
+ * more fully.
+ */
+void afs_invalidate_mmap_work(struct work_struct *work)
+{
+       struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work);
+
+       unmap_mapping_pages(vnode->vfs_inode.i_mapping, 0, 0, false);
+}
+
+void afs_server_init_callback_work(struct work_struct *work)
+{
+       struct afs_server *server = container_of(work, struct afs_server, initcb_work);
+       struct afs_vnode *vnode;
+       struct afs_cell *cell = server->cell;
+
+       down_read(&cell->fs_open_mmaps_lock);
+
+       list_for_each_entry(vnode, &cell->fs_open_mmaps, cb_mmap_link) {
+               if (vnode->cb_server == server) {
+                       clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+                       queue_work(system_unbound_wq, &vnode->cb_work);
+               }
+       }
+
+       up_read(&cell->fs_open_mmaps_lock);
+}
+
+/*
  * Allow the fileserver to request callback state (re-)initialisation.
  * Unfortunately, UUIDs are not guaranteed unique.
  */
@@ -29,8 +60,11 @@ void afs_init_callback_state(struct afs_server *server)
        rcu_read_lock();
        do {
                server->cb_s_break++;
-               server = rcu_dereference(server->uuid_next);
-       } while (0);
+               atomic_inc(&server->cell->fs_s_break);
+               if (!list_empty(&server->cell->fs_open_mmaps))
+                       queue_work(system_unbound_wq, &server->initcb_work);
+
+       } while ((server = rcu_dereference(server->uuid_next)));
        rcu_read_unlock();
 }
 
@@ -44,11 +78,17 @@ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reas
        clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
        if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
                vnode->cb_break++;
+               vnode->cb_v_break = vnode->volume->cb_v_break;
                afs_clear_permits(vnode);
 
                if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
                        afs_lock_may_be_available(vnode);
 
+               if (reason != afs_cb_break_for_deleted &&
+                   vnode->status.type == AFS_FTYPE_FILE &&
+                   atomic_read(&vnode->cb_nr_mmap))
+                       queue_work(system_unbound_wq, &vnode->cb_work);
+
                trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true);
        } else {
                trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, false);
index 887b673..d88407f 100644 (file)
@@ -166,6 +166,8 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
        seqlock_init(&cell->volume_lock);
        cell->fs_servers = RB_ROOT;
        seqlock_init(&cell->fs_lock);
+       INIT_LIST_HEAD(&cell->fs_open_mmaps);
+       init_rwsem(&cell->fs_open_mmaps_lock);
        rwlock_init(&cell->vl_servers_lock);
        cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
 
index ac829e6..4579bbd 100644 (file)
@@ -1077,9 +1077,9 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
  */
 static int afs_d_revalidate_rcu(struct dentry *dentry)
 {
-       struct afs_vnode *dvnode, *vnode;
+       struct afs_vnode *dvnode;
        struct dentry *parent;
-       struct inode *dir, *inode;
+       struct inode *dir;
        long dir_version, de_version;
 
        _enter("%p", dentry);
@@ -1109,18 +1109,6 @@ static int afs_d_revalidate_rcu(struct dentry *dentry)
                        return -ECHILD;
        }
 
-       /* Check to see if the vnode referred to by the dentry still
-        * has a callback.
-        */
-       if (d_really_is_positive(dentry)) {
-               inode = d_inode_rcu(dentry);
-               if (inode) {
-                       vnode = AFS_FS_I(inode);
-                       if (!afs_check_validity(vnode))
-                               return -ECHILD;
-               }
-       }
-
        return 1; /* Still valid */
 }
 
@@ -1156,17 +1144,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
        if (IS_ERR(key))
                key = NULL;
 
-       if (d_really_is_positive(dentry)) {
-               inode = d_inode(dentry);
-               if (inode) {
-                       vnode = AFS_FS_I(inode);
-                       afs_validate(vnode, key);
-                       if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
-                               goto out_bad;
-               }
-       }
-
-       /* lock down the parent dentry so we can peer at it */
+       /* Hold the parent dentry so we can peer at it */
        parent = dget_parent(dentry);
        dir = AFS_FS_I(d_inode(parent));
 
@@ -1175,7 +1153,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
 
        if (test_bit(AFS_VNODE_DELETED, &dir->flags)) {
                _debug("%pd: parent dir deleted", dentry);
-               goto out_bad_parent;
+               goto not_found;
        }
 
        /* We only need to invalidate a dentry if the server's copy changed
@@ -1201,12 +1179,12 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
        case 0:
                /* the filename maps to something */
                if (d_really_is_negative(dentry))
-                       goto out_bad_parent;
+                       goto not_found;
                inode = d_inode(dentry);
                if (is_bad_inode(inode)) {
                        printk("kAFS: afs_d_revalidate: %pd2 has bad inode\n",
                               dentry);
-                       goto out_bad_parent;
+                       goto not_found;
                }
 
                vnode = AFS_FS_I(inode);
@@ -1228,9 +1206,6 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
                               dentry, fid.unique,
                               vnode->fid.unique,
                               vnode->vfs_inode.i_generation);
-                       write_seqlock(&vnode->cb_lock);
-                       set_bit(AFS_VNODE_DELETED, &vnode->flags);
-                       write_sequnlock(&vnode->cb_lock);
                        goto not_found;
                }
                goto out_valid;
@@ -1245,7 +1220,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
        default:
                _debug("failed to iterate dir %pd: %d",
                       parent, ret);
-               goto out_bad_parent;
+               goto not_found;
        }
 
 out_valid:
@@ -1256,16 +1231,9 @@ out_valid_noupdate:
        _leave(" = 1 [valid]");
        return 1;
 
-       /* the dirent, if it exists, now points to a different vnode */
 not_found:
-       spin_lock(&dentry->d_lock);
-       dentry->d_flags |= DCACHE_NFSFS_RENAMED;
-       spin_unlock(&dentry->d_lock);
-
-out_bad_parent:
        _debug("dropping dentry %pd2", dentry);
        dput(parent);
-out_bad:
        key_put(key);
 
        _leave(" = 0 [bad]");
@@ -1792,6 +1760,10 @@ static int afs_link(struct dentry *from, struct inode *dir,
                goto error;
        }
 
+       ret = afs_validate(vnode, op->key);
+       if (ret < 0)
+               goto error_op;
+
        afs_op_set_vnode(op, 0, dvnode);
        afs_op_set_vnode(op, 1, vnode);
        op->file[0].dv_delta = 1;
@@ -1805,6 +1777,8 @@ static int afs_link(struct dentry *from, struct inode *dir,
        op->create.reason       = afs_edit_dir_for_link;
        return afs_do_sync_operation(op);
 
+error_op:
+       afs_put_operation(op);
 error:
        d_drop(dentry);
        _leave(" = %d", ret);
@@ -1989,6 +1963,11 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
        if (IS_ERR(op))
                return PTR_ERR(op);
 
+       ret = afs_validate(vnode, op->key);
+       op->error = ret;
+       if (ret < 0)
+               goto error;
+
        afs_op_set_vnode(op, 0, orig_dvnode);
        afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
        op->file[0].dv_delta = 1;
index f4600c1..540b9fc 100644 (file)
@@ -263,7 +263,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
                if (b == nr_blocks) {
                        _debug("init %u", b);
                        afs_edit_init_block(meta, block, b);
-                       i_size_write(&vnode->vfs_inode, (b + 1) * AFS_DIR_BLOCK_SIZE);
+                       afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE);
                }
 
                /* Only lower dir pages have a counter in the header. */
@@ -296,7 +296,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
 new_directory:
        afs_edit_init_block(meta, meta, 0);
        i_size = AFS_DIR_BLOCK_SIZE;
-       i_size_write(&vnode->vfs_inode, i_size);
+       afs_set_i_size(vnode, i_size);
        slot = AFS_DIR_RESV_BLOCKS0;
        page = page0;
        block = meta;
index db035ae..e6c447a 100644 (file)
@@ -24,12 +24,16 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
 static int afs_releasepage(struct page *page, gfp_t gfp_flags);
 
 static void afs_readahead(struct readahead_control *ractl);
+static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+static void afs_vm_open(struct vm_area_struct *area);
+static void afs_vm_close(struct vm_area_struct *area);
+static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff);
 
 const struct file_operations afs_file_operations = {
        .open           = afs_open,
        .release        = afs_release,
        .llseek         = generic_file_llseek,
-       .read_iter      = generic_file_read_iter,
+       .read_iter      = afs_file_read_iter,
        .write_iter     = afs_file_write,
        .mmap           = afs_file_mmap,
        .splice_read    = generic_file_splice_read,
@@ -59,8 +63,10 @@ const struct address_space_operations afs_fs_aops = {
 };
 
 static const struct vm_operations_struct afs_vm_ops = {
+       .open           = afs_vm_open,
+       .close          = afs_vm_close,
        .fault          = filemap_fault,
-       .map_pages      = filemap_map_pages,
+       .map_pages      = afs_vm_map_pages,
        .page_mkwrite   = afs_page_mkwrite,
 };
 
@@ -295,7 +301,7 @@ static void afs_req_issue_op(struct netfs_read_subrequest *subreq)
        fsreq->subreq   = subreq;
        fsreq->pos      = subreq->start + subreq->transferred;
        fsreq->len      = subreq->len   - subreq->transferred;
-       fsreq->key      = subreq->rreq->netfs_priv;
+       fsreq->key      = key_get(subreq->rreq->netfs_priv);
        fsreq->vnode    = vnode;
        fsreq->iter     = &fsreq->def_iter;
 
@@ -304,6 +310,7 @@ static void afs_req_issue_op(struct netfs_read_subrequest *subreq)
                        fsreq->pos, fsreq->len);
 
        afs_fetch_data(fsreq->vnode, fsreq);
+       afs_put_read(fsreq);
 }
 
 static int afs_symlink_readpage(struct page *page)
@@ -490,15 +497,88 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
        return 1;
 }
 
+static void afs_add_open_mmap(struct afs_vnode *vnode)
+{
+       if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
+               down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+
+               list_add_tail(&vnode->cb_mmap_link,
+                             &vnode->volume->cell->fs_open_mmaps);
+
+               up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+       }
+}
+
+static void afs_drop_open_mmap(struct afs_vnode *vnode)
+{
+       if (!atomic_dec_and_test(&vnode->cb_nr_mmap))
+               return;
+
+       down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+
+       if (atomic_read(&vnode->cb_nr_mmap) == 0)
+               list_del_init(&vnode->cb_mmap_link);
+
+       up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+       flush_work(&vnode->cb_work);
+}
+
 /*
  * Handle setting up a memory mapping on an AFS file.
  */
 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
+       struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
        int ret;
 
+       afs_add_open_mmap(vnode);
+
        ret = generic_file_mmap(file, vma);
        if (ret == 0)
                vma->vm_ops = &afs_vm_ops;
+       else
+               afs_drop_open_mmap(vnode);
        return ret;
 }
+
+static void afs_vm_open(struct vm_area_struct *vma)
+{
+       afs_add_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
+}
+
+static void afs_vm_close(struct vm_area_struct *vma)
+{
+       afs_drop_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
+}
+
+static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff)
+{
+       struct afs_vnode *vnode = AFS_FS_I(file_inode(vmf->vma->vm_file));
+       struct afs_file *af = vmf->vma->vm_file->private_data;
+
+       switch (afs_validate(vnode, af->key)) {
+       case 0:
+               return filemap_map_pages(vmf, start_pgoff, end_pgoff);
+       case -ENOMEM:
+               return VM_FAULT_OOM;
+       case -EINTR:
+       case -ERESTARTSYS:
+               return VM_FAULT_RETRY;
+       case -ESTALE:
+       default:
+               return VM_FAULT_SIGBUS;
+       }
+}
+
+static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
+       struct afs_file *af = iocb->ki_filp->private_data;
+       int ret;
+
+       ret = afs_validate(vnode, af->key);
+       if (ret < 0)
+               return ret;
+
+       return generic_file_read_iter(iocb, iter);
+}
index e7e98ad..c0031a3 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/slab.h>
 #include "afs_fs.h"
 #include "internal.h"
+#include "protocol_afs.h"
 #include "protocol_yfs.h"
 
 static unsigned int afs_fs_probe_fast_poll_interval = 30 * HZ;
@@ -102,7 +103,7 @@ void afs_fileserver_probe_result(struct afs_call *call)
        struct afs_addr_list *alist = call->alist;
        struct afs_server *server = call->server;
        unsigned int index = call->addr_ix;
-       unsigned int rtt_us = 0;
+       unsigned int rtt_us = 0, cap0;
        int ret = call->error;
 
        _enter("%pU,%u", &server->uuid, index);
@@ -159,6 +160,11 @@ responded:
                        clear_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
                        alist->addrs[index].srx_service = call->service_id;
                }
+               cap0 = ntohl(call->tmp);
+               if (cap0 & AFS3_VICED_CAPABILITY_64BITFILES)
+                       set_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
+               else
+                       clear_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
        }
 
        if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
index dd3f45d..4943413 100644 (file)
@@ -456,9 +456,7 @@ void afs_fs_fetch_data(struct afs_operation *op)
        struct afs_read *req = op->fetch.req;
        __be32 *bp;
 
-       if (upper_32_bits(req->pos) ||
-           upper_32_bits(req->len) ||
-           upper_32_bits(req->pos + req->len))
+       if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
                return afs_fs_fetch_data64(op);
 
        _enter("");
@@ -1113,9 +1111,7 @@ void afs_fs_store_data(struct afs_operation *op)
               (unsigned long long)op->store.pos,
               (unsigned long long)op->store.i_size);
 
-       if (upper_32_bits(op->store.pos) ||
-           upper_32_bits(op->store.size) ||
-           upper_32_bits(op->store.i_size))
+       if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
                return afs_fs_store_data64(op);
 
        call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData,
@@ -1229,7 +1225,7 @@ static void afs_fs_setattr_size(struct afs_operation *op)
               key_serial(op->key), vp->fid.vid, vp->fid.vnode);
 
        ASSERT(attr->ia_valid & ATTR_SIZE);
-       if (upper_32_bits(attr->ia_size))
+       if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
                return afs_fs_setattr_size64(op);
 
        call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData_as_Status,
@@ -1657,20 +1653,33 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
                        return ret;
 
                count = ntohl(call->tmp);
-
                call->count = count;
                call->count2 = count;
-               afs_extract_discard(call, count * sizeof(__be32));
+               if (count == 0) {
+                       call->unmarshall = 4;
+                       call->tmp = 0;
+                       break;
+               }
+
+               /* Extract the first word of the capabilities to call->tmp */
+               afs_extract_to_tmp(call);
                call->unmarshall++;
                fallthrough;
 
-               /* Extract capabilities words */
        case 2:
                ret = afs_extract_data(call, false);
                if (ret < 0)
                        return ret;
 
-               /* TODO: Examine capabilities */
+               afs_extract_discard(call, (count - 1) * sizeof(__be32));
+               call->unmarshall++;
+               fallthrough;
+
+               /* Extract remaining capabilities words */
+       case 3:
+               ret = afs_extract_data(call, false);
+               if (ret < 0)
+                       return ret;
 
                call->unmarshall++;
                break;
index 80b6c8d..8fcffea 100644 (file)
@@ -54,16 +54,6 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
 }
 
 /*
- * Set the file size and block count.  Estimate the number of 512 bytes blocks
- * used, rounded up to nearest 1K for consistency with other AFS clients.
- */
-static void afs_set_i_size(struct afs_vnode *vnode, u64 size)
-{
-       i_size_write(&vnode->vfs_inode, size);
-       vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
-}
-
-/*
  * Initialise an inode from the vnode status.
  */
 static int afs_inode_init_from_status(struct afs_operation *op,
@@ -587,22 +577,32 @@ static void afs_zap_data(struct afs_vnode *vnode)
 }
 
 /*
- * Get the server reinit counter for a vnode's current server.
+ * Check to see if we have a server currently serving this volume and that it
+ * hasn't been reinitialised or dropped from the list.
  */
-static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break)
+static bool afs_check_server_good(struct afs_vnode *vnode)
 {
-       struct afs_server_list *slist = rcu_dereference(vnode->volume->servers);
+       struct afs_server_list *slist;
        struct afs_server *server;
+       bool good;
        int i;
 
+       if (vnode->cb_fs_s_break == atomic_read(&vnode->volume->cell->fs_s_break))
+               return true;
+
+       rcu_read_lock();
+
+       slist = rcu_dereference(vnode->volume->servers);
        for (i = 0; i < slist->nr_servers; i++) {
                server = slist->servers[i].server;
                if (server == vnode->cb_server) {
-                       *_s_break = READ_ONCE(server->cb_s_break);
-                       return true;
+                       good = (vnode->cb_s_break == server->cb_s_break);
+                       rcu_read_unlock();
+                       return good;
                }
        }
 
+       rcu_read_unlock();
        return false;
 }
 
@@ -611,57 +611,46 @@ static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break)
  */
 bool afs_check_validity(struct afs_vnode *vnode)
 {
-       struct afs_volume *volume = vnode->volume;
        enum afs_cb_break_reason need_clear = afs_cb_break_no_break;
        time64_t now = ktime_get_real_seconds();
-       bool valid;
-       unsigned int cb_break, cb_s_break, cb_v_break;
+       unsigned int cb_break;
        int seq = 0;
 
        do {
                read_seqbegin_or_lock(&vnode->cb_lock, &seq);
-               cb_v_break = READ_ONCE(volume->cb_v_break);
                cb_break = vnode->cb_break;
 
-               if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) &&
-                   afs_get_s_break_rcu(vnode, &cb_s_break)) {
-                       if (vnode->cb_s_break != cb_s_break ||
-                           vnode->cb_v_break != cb_v_break) {
-                               vnode->cb_s_break = cb_s_break;
-                               vnode->cb_v_break = cb_v_break;
-                               need_clear = afs_cb_break_for_vsbreak;
-                               valid = false;
-                       } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
+               if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
+                       if (vnode->cb_v_break != vnode->volume->cb_v_break)
+                               need_clear = afs_cb_break_for_v_break;
+                       else if (!afs_check_server_good(vnode))
+                               need_clear = afs_cb_break_for_s_reinit;
+                       else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
                                need_clear = afs_cb_break_for_zap;
-                               valid = false;
-                       } else if (vnode->cb_expires_at - 10 <= now) {
+                       else if (vnode->cb_expires_at - 10 <= now)
                                need_clear = afs_cb_break_for_lapsed;
-                               valid = false;
-                       } else {
-                               valid = true;
-                       }
                } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
-                       valid = true;
+                       ;
                } else {
-                       vnode->cb_v_break = cb_v_break;
-                       valid = false;
+                       need_clear = afs_cb_break_no_promise;
                }
 
        } while (need_seqretry(&vnode->cb_lock, seq));
 
        done_seqretry(&vnode->cb_lock, seq);
 
-       if (need_clear != afs_cb_break_no_break) {
-               write_seqlock(&vnode->cb_lock);
-               if (cb_break == vnode->cb_break)
-                       __afs_break_callback(vnode, need_clear);
-               else
-                       trace_afs_cb_miss(&vnode->fid, need_clear);
-               write_sequnlock(&vnode->cb_lock);
-               valid = false;
-       }
+       if (need_clear == afs_cb_break_no_break)
+               return true;
 
-       return valid;
+       write_seqlock(&vnode->cb_lock);
+       if (need_clear == afs_cb_break_no_promise)
+               vnode->cb_v_break = vnode->volume->cb_v_break;
+       else if (cb_break == vnode->cb_break)
+               __afs_break_callback(vnode, need_clear);
+       else
+               trace_afs_cb_miss(&vnode->fid, need_clear);
+       write_sequnlock(&vnode->cb_lock);
+       return false;
 }
 
 /*
@@ -675,21 +664,20 @@ bool afs_check_validity(struct afs_vnode *vnode)
  */
 int afs_validate(struct afs_vnode *vnode, struct key *key)
 {
-       bool valid;
        int ret;
 
        _enter("{v={%llx:%llu} fl=%lx},%x",
               vnode->fid.vid, vnode->fid.vnode, vnode->flags,
               key_serial(key));
 
-       rcu_read_lock();
-       valid = afs_check_validity(vnode);
-       rcu_read_unlock();
-
-       if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
-               clear_nlink(&vnode->vfs_inode);
+       if (unlikely(test_bit(AFS_VNODE_DELETED, &vnode->flags))) {
+               if (vnode->vfs_inode.i_nlink)
+                       clear_nlink(&vnode->vfs_inode);
+               goto valid;
+       }
 
-       if (valid)
+       if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) &&
+           afs_check_validity(vnode))
                goto valid;
 
        down_write(&vnode->validate_lock);
index 5ed416f..0ad97a8 100644 (file)
@@ -390,6 +390,9 @@ struct afs_cell {
        /* Active fileserver interaction state. */
        struct rb_root          fs_servers;     /* afs_server (by server UUID) */
        seqlock_t               fs_lock;        /* For fs_servers  */
+       struct rw_semaphore     fs_open_mmaps_lock;
+       struct list_head        fs_open_mmaps;  /* List of vnodes that are mmapped */
+       atomic_t                fs_s_break;     /* Counter of CB.InitCallBackState messages */
 
        /* VL server list. */
        rwlock_t                vl_servers_lock; /* Lock on vl_servers */
@@ -503,6 +506,7 @@ struct afs_server {
        struct hlist_node       addr4_link;     /* Link in net->fs_addresses4 */
        struct hlist_node       addr6_link;     /* Link in net->fs_addresses6 */
        struct hlist_node       proc_link;      /* Link in net->fs_proc */
+       struct work_struct      initcb_work;    /* Work for CB.InitCallBackState* */
        struct afs_server       *gc_next;       /* Next server in manager's list */
        time64_t                unuse_time;     /* Time at which last unused */
        unsigned long           flags;
@@ -516,6 +520,7 @@ struct afs_server {
 #define AFS_SERVER_FL_IS_YFS   16              /* Server is YFS not AFS */
 #define AFS_SERVER_FL_NO_IBULK 17              /* Fileserver doesn't support FS.InlineBulkStatus */
 #define AFS_SERVER_FL_NO_RM2   18              /* Fileserver doesn't support YFS.RemoveFile2 */
+#define AFS_SERVER_FL_HAS_FS64 19              /* Fileserver supports FS.{Fetch,Store}Data64 */
        atomic_t                ref;            /* Object refcount */
        atomic_t                active;         /* Active user count */
        u32                     addr_version;   /* Address list version */
@@ -657,7 +662,11 @@ struct afs_vnode {
        afs_lock_type_t         lock_type : 8;
 
        /* outstanding callback notification on this file */
+       struct work_struct      cb_work;        /* Work for mmap'd files */
+       struct list_head        cb_mmap_link;   /* Link in cell->fs_open_mmaps */
        void                    *cb_server;     /* Server with callback/filelock */
+       atomic_t                cb_nr_mmap;     /* Number of mmaps */
+       unsigned int            cb_fs_s_break;  /* Mass server break counter (cell->fs_s_break) */
        unsigned int            cb_s_break;     /* Mass break counter on ->server */
        unsigned int            cb_v_break;     /* Mass break counter on ->volume */
        unsigned int            cb_break;       /* Break counter on vnode */
@@ -965,6 +974,8 @@ extern struct fscache_cookie_def afs_vnode_cache_index_def;
 /*
  * callback.c
  */
+extern void afs_invalidate_mmap_work(struct work_struct *);
+extern void afs_server_init_callback_work(struct work_struct *work);
 extern void afs_init_callback_state(struct afs_server *);
 extern void __afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
 extern void afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
@@ -1586,6 +1597,16 @@ static inline void afs_update_dentry_version(struct afs_operation *op,
 }
 
 /*
+ * Set the file size and block count.  Estimate the number of 512 bytes blocks
+ * used, rounded up to nearest 1K for consistency with other AFS clients.
+ */
+static inline void afs_set_i_size(struct afs_vnode *vnode, u64 size)
+{
+       i_size_write(&vnode->vfs_inode, size);
+       vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
+}
+
+/*
  * Check for a conflicting operation on a directory that we just unlinked from.
  * If someone managed to sneak a link or an unlink in on the file we just
  * unlinked, we won't be able to trust nlink on an AFS file (but not YFS).
diff --git a/fs/afs/protocol_afs.h b/fs/afs/protocol_afs.h
new file mode 100644 (file)
index 0000000..0c39358
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* AFS protocol bits
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+
+#define AFSCAPABILITIESMAX 196 /* Maximum number of words in a capability set */
+
+/* AFS3 Fileserver capabilities word 0 */
+#define AFS3_VICED_CAPABILITY_ERRORTRANS       0x0001 /* Uses UAE errors */
+#define AFS3_VICED_CAPABILITY_64BITFILES       0x0002 /* FetchData64 & StoreData64 supported */
+#define AFS3_VICED_CAPABILITY_WRITELOCKACL     0x0004 /* Can lock a file even without lock perm */
+#define AFS3_VICED_CAPABILITY_SANEACLS         0x0008 /* ACLs reviewed for sanity - don't use */
index b5bd03b..e4cd89c 100644 (file)
@@ -168,3 +168,9 @@ enum yfs_lock_type {
        yfs_LockMandatoryWrite  = 0x101,
        yfs_LockMandatoryExtend = 0x102,
 };
+
+/* RXYFS Viced Capability Flags */
+#define YFS_VICED_CAPABILITY_ERRORTRANS                0x0001 /* Deprecated v0.195 */
+#define YFS_VICED_CAPABILITY_64BITFILES                0x0002 /* Deprecated v0.195 */
+#define YFS_VICED_CAPABILITY_WRITELOCKACL      0x0004 /* Can lock a file even without lock perm */
+#define YFS_VICED_CAPABILITY_SANEACLS          0x0008 /* Deprecated v0.195 */
index d83f13c..79e1a5f 100644 (file)
@@ -374,6 +374,7 @@ selected_server:
        if (vnode->cb_server != server) {
                vnode->cb_server = server;
                vnode->cb_s_break = server->cb_s_break;
+               vnode->cb_fs_s_break = atomic_read(&server->cell->fs_s_break);
                vnode->cb_v_break = vnode->volume->cb_v_break;
                clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
        }
index 684a2b0..6e5b9a1 100644 (file)
@@ -235,6 +235,7 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell,
        server->addr_version = alist->version;
        server->uuid = *uuid;
        rwlock_init(&server->fs_lock);
+       INIT_WORK(&server->initcb_work, afs_server_init_callback_work);
        init_waitqueue_head(&server->probe_wq);
        INIT_LIST_HEAD(&server->probe_link);
        spin_lock_init(&server->probe_lock);
@@ -467,6 +468,7 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
        if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
                afs_give_up_callbacks(net, server);
 
+       flush_work(&server->initcb_work);
        afs_put_server(net, server, afs_server_trace_destroy);
 }
 
index e38bb1e..d110def 100644 (file)
@@ -698,6 +698,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
        vnode->lock_state       = AFS_VNODE_LOCK_NONE;
 
        init_rwsem(&vnode->rmdir_lock);
+       INIT_WORK(&vnode->cb_work, afs_invalidate_mmap_work);
 
        _leave(" = %p", &vnode->vfs_inode);
        return &vnode->vfs_inode;
index c053469..2dfe3b3 100644 (file)
@@ -137,7 +137,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
                write_seqlock(&vnode->cb_lock);
                i_size = i_size_read(&vnode->vfs_inode);
                if (maybe_i_size > i_size)
-                       i_size_write(&vnode->vfs_inode, maybe_i_size);
+                       afs_set_i_size(vnode, maybe_i_size);
                write_sequnlock(&vnode->cb_lock);
        }
 
@@ -471,13 +471,18 @@ static void afs_extend_writeback(struct address_space *mapping,
                        }
 
                        /* Has the page moved or been split? */
-                       if (unlikely(page != xas_reload(&xas)))
+                       if (unlikely(page != xas_reload(&xas))) {
+                               put_page(page);
                                break;
+                       }
 
-                       if (!trylock_page(page))
+                       if (!trylock_page(page)) {
+                               put_page(page);
                                break;
+                       }
                        if (!PageDirty(page) || PageWriteback(page)) {
                                unlock_page(page);
+                               put_page(page);
                                break;
                        }
 
@@ -487,6 +492,7 @@ static void afs_extend_writeback(struct address_space *mapping,
                        t = afs_page_dirty_to(page, priv);
                        if (f != 0 && !new_content) {
                                unlock_page(page);
+                               put_page(page);
                                break;
                        }
 
@@ -801,6 +807,7 @@ int afs_writepages(struct address_space *mapping,
 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
 {
        struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
+       struct afs_file *af = iocb->ki_filp->private_data;
        ssize_t result;
        size_t count = iov_iter_count(from);
 
@@ -816,6 +823,10 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
        if (!count)
                return 0;
 
+       result = afs_validate(vnode, af->key);
+       if (result < 0)
+               return result;
+
        result = generic_file_write_iter(iocb, from);
 
        _leave(" = %zd", result);
@@ -829,13 +840,18 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
  */
 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
-       struct inode *inode = file_inode(file);
-       struct afs_vnode *vnode = AFS_FS_I(inode);
+       struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+       struct afs_file *af = file->private_data;
+       int ret;
 
        _enter("{%llx:%llu},{n=%pD},%d",
               vnode->fid.vid, vnode->fid.vnode, file,
               datasync);
 
+       ret = afs_validate(vnode, af->key);
+       if (ret < 0)
+               return ret;
+
        return file_write_and_wait_range(file, start, end);
 }
 
@@ -849,11 +865,14 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
        struct file *file = vmf->vma->vm_file;
        struct inode *inode = file_inode(file);
        struct afs_vnode *vnode = AFS_FS_I(inode);
+       struct afs_file *af = file->private_data;
        unsigned long priv;
        vm_fault_t ret = VM_FAULT_RETRY;
 
        _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
 
+       afs_validate(vnode, af->key);
+
        sb_start_pagefault(inode->i_sb);
 
        /* Wait for the page to be written to the cache before we allow it to
index 2673c6b..0b9401a 100644 (file)
@@ -665,7 +665,18 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
 
                if (!ordered) {
                        ordered = btrfs_lookup_ordered_extent(inode, offset);
-                       BUG_ON(!ordered); /* Logic error */
+                       /*
+                        * The bio range is not covered by any ordered extent,
+                        * must be a code logic error.
+                        */
+                       if (unlikely(!ordered)) {
+                               WARN(1, KERN_WARNING
+                       "no ordered extent for root %llu ino %llu offset %llu\n",
+                                    inode->root->root_key.objectid,
+                                    btrfs_ino(inode), offset);
+                               kvfree(sums);
+                               return BLK_STS_IOERR;
+                       }
                }
 
                nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
index 5ada02e..aa5be0b 100644 (file)
@@ -414,9 +414,10 @@ static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
 {
        lockdep_assert_held(&info->lock);
 
-       btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
+       /* The free space could be negative in case of overcommit */
+       btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull",
                   info->flags,
-                  info->total_bytes - btrfs_space_info_used(info, true),
+                  (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
                   info->full ? "" : "not ");
        btrfs_info(fs_info,
                "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
index 28d443d..4968535 100644 (file)
@@ -451,7 +451,7 @@ static int del_orphan(struct btrfs_trans_handle *trans, struct btrfs_inode *inod
  */
 static int rollback_verity(struct btrfs_inode *inode)
 {
-       struct btrfs_trans_handle *trans;
+       struct btrfs_trans_handle *trans = NULL;
        struct btrfs_root *root = inode->root;
        int ret;
 
@@ -473,6 +473,7 @@ static int rollback_verity(struct btrfs_inode *inode)
        trans = btrfs_start_transaction(root, 2);
        if (IS_ERR(trans)) {
                ret = PTR_ERR(trans);
+               trans = NULL;
                btrfs_handle_fs_error(root->fs_info, ret,
                        "failed to start transaction in verity rollback %llu",
                        (u64)inode->vfs_inode.i_ino);
@@ -490,8 +491,9 @@ static int rollback_verity(struct btrfs_inode *inode)
                btrfs_abort_transaction(trans, ret);
                goto out;
        }
-       btrfs_end_transaction(trans);
 out:
+       if (trans)
+               btrfs_end_transaction(trans);
        return ret;
 }
 
index 464485a..2ec3b8a 100644 (file)
@@ -1137,6 +1137,19 @@ static void btrfs_close_one_device(struct btrfs_device *device)
        atomic_set(&device->dev_stats_ccnt, 0);
        extent_io_tree_release(&device->alloc_state);
 
+       /*
+        * Reset the flush error record. We might have a transient flush error
+        * in this mount, and if so we aborted the current transaction and set
+        * the fs to an error state, guaranteeing no super blocks can be further
+        * committed. However that error might be transient and if we unmount the
+        * filesystem and mount it again, we should allow the mount to succeed
+        * (btrfs_check_rw_degradable() should not fail) - if after mounting the
+        * filesystem again we still get flush errors, then we will again abort
+        * any transaction and set the error state, guaranteeing no commits of
+        * unsafe super blocks.
+        */
+       device->last_flush_error = 0;
+
        /* Verify the device is back in a pristine state  */
        ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
        ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
index ab7573d..c615387 100644 (file)
@@ -1425,12 +1425,16 @@ void invalidate_bh_lrus(void)
 }
 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
 
-void invalidate_bh_lrus_cpu(int cpu)
+/*
+ * It's called from workqueue context so we need a bh_lru_lock to close
+ * the race with preemption/irq.
+ */
+void invalidate_bh_lrus_cpu(void)
 {
        struct bh_lru *b;
 
        bh_lru_lock();
-       b = per_cpu_ptr(&bh_lrus, cpu);
+       b = this_cpu_ptr(&bh_lrus);
        __invalidate_bh_lrus(b);
        bh_lru_unlock();
 }
index 6c0e52f..3e42d04 100644 (file)
@@ -2263,7 +2263,7 @@ retry:
                        list_for_each_entry(req, &ci->i_unsafe_dirops,
                                            r_unsafe_dir_item) {
                                s = req->r_session;
-                               if (unlikely(s->s_mds > max)) {
+                               if (unlikely(s->s_mds >= max)) {
                                        spin_unlock(&ci->i_unsafe_lock);
                                        goto retry;
                                }
@@ -2277,7 +2277,7 @@ retry:
                        list_for_each_entry(req, &ci->i_unsafe_iops,
                                            r_unsafe_target_item) {
                                s = req->r_session;
-                               if (unlikely(s->s_mds > max)) {
+                               if (unlikely(s->s_mds >= max)) {
                                        spin_unlock(&ci->i_unsafe_lock);
                                        goto retry;
                                }
index 8a3b30e..8be57aa 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cache.c - CIFS filesystem cache index structure definitions
+ *   CIFS filesystem cache index structure definitions
  *
  *   Copyright (c) 2010 Novell, Inc.
  *   Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
index 51a824f..de2c12b 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- *   fs/cifs_debug.c
  *
  *   Copyright (C) International Business Machines  Corp., 2000,2005
  *
index 4fd7885..f974075 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifs_fs_sb.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2004
  *   Author(s): Steve French (sfrench@us.ibm.com)
index ef723be..b87cbbe 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifs_ioctl.h
  *
  *   Structure definitions for io control for cifs/smb3
  *
index 8fa26a8..353bd0d 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cifs_spnego.c -- SPNEGO upcall management for CIFS
+ *   SPNEGO upcall management for CIFS
  *
  *   Copyright (c) 2007 Red Hat, Inc.
  *   Author(s): Jeff Layton (jlayton@redhat.com)
index 31387d0..e6a0451 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifs_spnego.h -- SPNEGO upcall management for CIFS
+ *   SPNEGO upcall management for CIFS
  *
  *   Copyright (c) 2007 Red Hat, Inc.
  *   Author(s): Jeff Layton (jlayton@redhat.com)
index 171ad8b..e7582dd 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- *   fs/cifs/cifs_unicode.c
  *
  *   Copyright (c) International Business Machines  Corp., 2000,2009
  *   Modified by Steve French (sfrench@us.ibm.com)
index 388eb53..ee3aab3 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cifsacl.c
  *
  *   Copyright (C) International Business Machines  Corp., 2007,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index f8292bc..ccbfc75 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifsacl.h
  *
  *   Copyright (c) International Business Machines  Corp., 2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 2e6f403..d118282 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cifsencrypt.c
  *
  *   Encryption and hashing operations relating to NTLM, NTLMv2.  See MS-NLMP
  *   for more detailed information
index 8c20bfa..9fa930d 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cifsfs.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index d25a409..b50da19 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifsfs.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002, 2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
index c068f7d..e916470 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifsglob.h
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
@@ -1400,6 +1399,7 @@ struct cifsInodeInfo {
 #define CIFS_INO_INVALID_MAPPING         (4) /* pagecache is invalid */
 #define CIFS_INO_LOCK                    (5) /* lock bit for synchronization */
 #define CIFS_INO_MODIFIED_ATTR            (6) /* Indicate change in mtime/ctime */
+#define CIFS_INO_CLOSE_ON_LOCK            (7) /* Not to defer the close when lock is set */
        unsigned long flags;
        spinlock_t writers_lock;
        unsigned int writers;           /* Number of writers on this inode */
index 98e8e5a..d2ff438 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifspdu.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2009
  *   Author(s): Steve French (sfrench@us.ibm.com)
index f9740c2..d0f85b6 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/cifsproto.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
@@ -268,6 +267,9 @@ extern void cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode);
 
 extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon);
 
+extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
+                               const char *path);
+
 extern struct TCP_Server_Info *cifs_get_tcp_session(struct smb3_fs_context *ctx);
 extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
                                 int from_reconnect);
index a8e41c1..243d176 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/cifssmb.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2010
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 0db3448..c3b94c1 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/connect.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2011
  *   Author(s): Steve French (sfrench@us.ibm.com)
@@ -1090,7 +1089,7 @@ next_pdu:
        module_put_and_exit(0);
 }
 
-/**
+/*
  * Returns true if srcaddr isn't specified and rhs isn't specified, or
  * if srcaddr is specified and matches the IP address of the rhs argument
  */
@@ -1550,6 +1549,9 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
 
 /**
  * cifs_setup_ipc - helper to setup the IPC tcon for the session
+ * @ses: smb session to issue the request on
+ * @ctx: the superblock configuration context to use for building the
+ *       new tree connection for the IPC (interprocess communication RPC)
  *
  * A new IPC connection is made and stored in the session
  * tcon_ipc. The IPC tcon has the same lifetime as the session.
@@ -1605,6 +1607,7 @@ out:
 
 /**
  * cifs_free_ipc - helper to release the session IPC tcon
+ * @ses: smb session to unmount the IPC from
  *
  * Needs to be called everytime a session is destroyed.
  *
@@ -1855,6 +1858,8 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)),
 
 /**
  * cifs_get_smb_ses - get a session matching @ctx data from @server
+ * @server: server to setup the session to
+ * @ctx: superblock configuration context to use to setup the session
  *
  * This function assumes it is being called from cifs_mount() where we
  * already got a server reference (server refcount +1). See
@@ -2065,6 +2070,8 @@ cifs_put_tcon(struct cifs_tcon *tcon)
 
 /**
  * cifs_get_tcon - get a tcon matching @ctx data from @ses
+ * @ses: smb session to issue the request on
+ * @ctx: the superblock configuration context to use for building the
  *
  * - tcon refcount is the number of mount points using the tcon.
  * - ses refcount is the number of tcon using the session.
@@ -2382,9 +2389,10 @@ cifs_match_super(struct super_block *sb, void *data)
        spin_lock(&cifs_tcp_ses_lock);
        cifs_sb = CIFS_SB(sb);
        tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
-       if (IS_ERR(tlink)) {
+       if (tlink == NULL) {
+               /* can not match superblock if tlink were ever null */
                spin_unlock(&cifs_tcp_ses_lock);
-               return rc;
+               return 0;
        }
        tcon = tlink_tcon(tlink);
        ses = tcon->ses;
@@ -3030,7 +3038,7 @@ build_unc_path_to_root(const struct smb3_fs_context *ctx,
        return full_path;
 }
 
-/**
+/*
  * expand_dfs_referral - Perform a dfs referral query and update the cifs_sb
  *
  * If a referral is found, cifs_sb->ctx->mount_options will be (re-)allocated
index 5f8a302..6e8e7cc 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/dir.c
  *
  *   vfs operations that deal with dentries
  *
index 8c616aa..0458d28 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *  fs/cifs/dns_resolve.c
  *
  *   Copyright (c) 2007 Igor Mammedov
  *   Author(s): Igor Mammedov (niallain@gmail.com)
index 9fa2807..afc0df3 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/dns_resolve.h -- DNS Resolver upcall management for CIFS DFS
- *                            Handles host name to IP address resolution
+ *   DNS Resolver upcall management for CIFS DFS
+ *   Handles host name to IP address resolution
  *
  *   Copyright (c) International Business Machines  Corp., 2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 747a540..37c2841 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/export.c
  *
  *   Copyright (C) International Business Machines  Corp., 2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
index d021647..13f3182 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/file.c
  *
  *   vfs operations that deal with files
  *
@@ -883,8 +882,9 @@ int cifs_close(struct inode *inode, struct file *file)
                dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
                if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
                    cinode->lease_granted &&
+                   !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
                    dclose) {
-                       if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
+                       if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
                                inode->i_ctime = inode->i_mtime = current_time(inode);
                                cifs_fscache_update_inode_cookie(inode);
                        }
@@ -1865,6 +1865,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
        cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
                        tcon->ses->server);
        cifs_sb = CIFS_FILE_SB(file);
+       set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
 
        if (cap_unix(tcon->ses) &&
            (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
@@ -3112,7 +3113,7 @@ static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
        struct cifs_tcon *tcon;
        struct cifs_sb_info *cifs_sb;
        struct dentry *dentry = ctx->cfile->dentry;
-       int rc;
+       ssize_t rc;
 
        tcon = tlink_tcon(ctx->cfile->tlink);
        cifs_sb = CIFS_SB(dentry->d_sb);
index fab47fa..8eedd20 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/fscache.c - CIFS filesystem cache interface
+ *   CIFS filesystem cache interface
  *
  *   Copyright (c) 2010 Novell, Inc.
  *   Author(s): Suresh Jayaraman <sjayaraman@suse.de>
index 82e856b..9baa1d0 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/fscache.h - CIFS filesystem cache interface definitions
+ *   CIFS filesystem cache interface definitions
  *
  *   Copyright (c) 2010 Novell, Inc.
  *   Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
index 50c01cf..8284841 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/inode.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2010
  *   Author(s): Steve French (sfrench@us.ibm.com)
@@ -1625,7 +1624,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
                goto unlink_out;
        }
 
-       cifs_close_deferred_file(CIFS_I(inode));
+       cifs_close_deferred_file_under_dentry(tcon, full_path);
        if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
                                le64_to_cpu(tcon->fsUnixInfo.Capability))) {
                rc = CIFSPOSIXDelFile(xid, tcon, full_path,
@@ -2114,9 +2113,9 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
                goto cifs_rename_exit;
        }
 
-       cifs_close_deferred_file(CIFS_I(d_inode(source_dentry)));
+       cifs_close_deferred_file_under_dentry(tcon, from_name);
        if (d_inode(target_dentry) != NULL)
-               cifs_close_deferred_file(CIFS_I(d_inode(target_dentry)));
+               cifs_close_deferred_file_under_dentry(tcon, to_name);
 
        rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
                            to_name);
index 42c6a0b..0359b60 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/ioctl.c
  *
  *   vfs operations that deal with io control
  *
@@ -359,7 +358,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                        if (pSMBFile == NULL)
                                break;
                        tcon = tlink_tcon(pSMBFile->tlink);
-                       caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
+                       /* caps = le64_to_cpu(tcon->fsUnixInfo.Capability); */
 
                        if (get_user(ExtAttrBits, (int __user *)arg)) {
                                rc = -EFAULT;
index f0a6d63..852e54e 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/link.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 9469f1c..bb1185f 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/misc.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
@@ -265,7 +264,8 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
 
                        /* Uid is not converted */
                        buffer->Uid = treeCon->ses->Suid;
-                       buffer->Mid = get_next_mid(treeCon->ses->server);
+                       if (treeCon->ses->server)
+                               buffer->Mid = get_next_mid(treeCon->ses->server);
                }
                if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
                        buffer->Flags2 |= SMBFLG2_DFS;
@@ -591,6 +591,7 @@ void cifs_put_writer(struct cifsInodeInfo *cinode)
 
 /**
  * cifs_queue_oplock_break - queue the oplock break handler for cfile
+ * @cfile: The file to break the oplock on
  *
  * This function is called from the demultiplex thread when it
  * receives an oplock break for @cfile.
@@ -736,7 +737,7 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
                        if (cancel_delayed_work(&cfile->deferred)) {
                                tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
                                if (tmp_list == NULL)
-                                       continue;
+                                       break;
                                tmp_list->cfile = cfile;
                                list_add_tail(&tmp_list->list, &file_head);
                        }
@@ -767,7 +768,7 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
                        if (cancel_delayed_work(&cfile->deferred)) {
                                tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
                                if (tmp_list == NULL)
-                                       continue;
+                                       break;
                                tmp_list->cfile = cfile;
                                list_add_tail(&tmp_list->list, &file_head);
                        }
@@ -781,6 +782,43 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
                kfree(tmp_list);
        }
 }
+void
+cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
+{
+       struct cifsFileInfo *cfile;
+       struct list_head *tmp;
+       struct file_list *tmp_list, *tmp_next_list;
+       struct list_head file_head;
+       void *page;
+       const char *full_path;
+
+       INIT_LIST_HEAD(&file_head);
+       page = alloc_dentry_path();
+       spin_lock(&tcon->open_file_lock);
+       list_for_each(tmp, &tcon->openFileList) {
+               cfile = list_entry(tmp, struct cifsFileInfo, tlist);
+               full_path = build_path_from_dentry(cfile->dentry, page);
+               if (strstr(full_path, path)) {
+                       if (delayed_work_pending(&cfile->deferred)) {
+                               if (cancel_delayed_work(&cfile->deferred)) {
+                                       tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+                                       if (tmp_list == NULL)
+                                               break;
+                                       tmp_list->cfile = cfile;
+                                       list_add_tail(&tmp_list->list, &file_head);
+                               }
+                       }
+               }
+       }
+       spin_unlock(&tcon->open_file_lock);
+
+       list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+               _cifsFileInfo_put(tmp_list->cfile, true, false);
+               list_del(&tmp_list->list);
+               kfree(tmp_list);
+       }
+       free_dentry_path(page);
+}
 
 /* parses DFS refferal V3 structure
  * caller is responsible for freeing target_nodes
@@ -1029,6 +1067,9 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
 
 /**
  * cifs_alloc_hash - allocate hash and hash context together
+ * @name: The name of the crypto hash algo
+ * @shash: Where to put the pointer to the hash algo
+ * @sdesc: Where to put the pointer to the hash descriptor
  *
  * The caller has to make sure @sdesc is initialized to either NULL or
  * a valid context. Both can be freed via cifs_free_hash().
@@ -1067,6 +1108,8 @@ cifs_alloc_hash(const char *name,
 
 /**
  * cifs_free_hash - free hash and hash context together
+ * @shash: Where to find the pointer to the hash algo
+ * @sdesc: Where to find the pointer to the hash descriptor
  *
  * Freeing a NULL hash or context is safe.
  */
@@ -1082,8 +1125,10 @@ cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
 
 /**
  * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
- * Input: rqst - a smb_rqst, page - a page index for rqst
- * Output: *len - the length for this page, *offset - the offset for this page
+ * @rqst: The request descriptor
+ * @page: The index of the page to query
+ * @len: Where to store the length for this page:
+ * @offset: Where to store the offset for this page
  */
 void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
                                unsigned int *len, unsigned int *offset)
@@ -1116,6 +1161,8 @@ void extract_unc_hostname(const char *unc, const char **h, size_t *len)
 
 /**
  * copy_path_name - copy src path to dst, possibly truncating
+ * @dst: The destination buffer
+ * @src: The source name
  *
  * returns number of bytes written (including trailing nul)
  */
index 0e728aa..fa9fbd6 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- *   fs/cifs/netmisc.c
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 378133c..25a2b8e 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/ntlmssp.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 54d77c9..1929e80 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/readdir.c
  *
  *   Directory search handling
  *
index 137f7c9..ae1d025 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/rfc1002pdu.h
  *
  *   Protocol Data Unit definitions for RFC 1001/1002 support
  *
index 118403f..23e02db 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/sess.c
  *
  *   SMB/CIFS session setup handling routines
  *
index c9d8a50..f5dcc49 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/smb2file.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002, 2011
  *   Author(s): Steve French (sfrench@us.ibm.com),
index d0e9f37..ca692b2 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/smb2glob.h
  *
  *   Definitions for various global variables and structures
  *
index 957b259..8297703 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/smb2inode.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002, 2011
  *                 Etersoft, 2012
index 668f771..29b5554 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/smb2misc.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2011
  *                 Etersoft, 2012
index b6d2e35..7829c59 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/smb2pdu.c
  *
  *   Copyright (C) International Business Machines  Corp., 2009, 2013
  *                 Etersoft, 2012
@@ -2398,7 +2397,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
        buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd);
        /* Ship the ACL for now. we will copy it into buf later. */
        aclptr = ptr;
-       ptr += sizeof(struct cifs_acl);
+       ptr += sizeof(struct smb3_acl);
 
        /* create one ACE to hold the mode embedded in reserved special SID */
        acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode);
@@ -2423,7 +2422,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
        acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
        acl.AclSize = cpu_to_le16(acl_size);
        acl.AceCount = cpu_to_le16(ace_count);
-       memcpy(aclptr, &acl, sizeof(struct cifs_acl));
+       memcpy(aclptr, &acl, sizeof(struct smb3_acl));
 
        buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
        *len = roundup(ptr - (__u8 *)buf, 8);
index e9cac79..f32c99c 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/smb2pdu.h
  *
  *   Copyright (c) International Business Machines  Corp., 2009, 2013
  *                 Etersoft, 2012
index 263767f..5479454 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/smb2proto.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002, 2011
  *                 Etersoft, 2012
index 0215ef3..a9e9581 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/smb2status.h
  *
  *   SMB2 Status code (network error) definitions
  *   Definitions are from MS-ERREF
index 6f7952e..f59b956 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/smb2transport.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002, 2011
  *                 Etersoft, 2012
index 60189ef..aeffdad 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1 */
 /*
- *   fs/cifs/smberr.h
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2004
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 75a95de..b737932 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/transport.c
  *
  *   Copyright (C) International Business Machines  Corp., 2002,2008
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 59b6c57..2f075b5 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
- * fs/cifs/winucase.c
  *
  * Copyright (c) Jeffrey Layton <jlayton@redhat.com>, 2013
  *
index 9ed481e..7d8b72d 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: LGPL-2.1
 /*
- *   fs/cifs/xattr.c
  *
  *   Copyright (c) International Business Machines  Corp., 2003, 2007
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 31ac3a7..a552399 100644 (file)
@@ -176,7 +176,7 @@ static struct page *erofs_read_inode(struct inode *inode,
        }
 
        if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
-               if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_ALL)) {
+               if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
                        erofs_err(inode->i_sb,
                                  "unsupported chunk format %x of nid %llu",
                                  vi->chunkformat, vi->nid);
index 9fb98d8..7a6df35 100644 (file)
@@ -369,7 +369,8 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
        if (compacted_4b_initial == 32 / 4)
                compacted_4b_initial = 0;
 
-       if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B)
+       if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
+           compacted_4b_initial < totalidx)
                compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
        else
                compacted_2b = 0;
index 1f3f432..c17ccc1 100644 (file)
@@ -48,10 +48,9 @@ struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
        struct ext2_sb_info *sbi = EXT2_SB(sb);
 
        if (block_group >= sbi->s_groups_count) {
-               ext2_error (sb, "ext2_get_group_desc",
-                           "block_group >= groups_count - "
-                           "block_group = %d, groups_count = %lu",
-                           block_group, sbi->s_groups_count);
+               WARN(1, "block_group >= groups_count - "
+                    "block_group = %d, groups_count = %lu",
+                    block_group, sbi->s_groups_count);
 
                return NULL;
        }
@@ -59,10 +58,9 @@ struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
        group_desc = block_group >> EXT2_DESC_PER_BLOCK_BITS(sb);
        offset = block_group & (EXT2_DESC_PER_BLOCK(sb) - 1);
        if (!sbi->s_group_desc[group_desc]) {
-               ext2_error (sb, "ext2_get_group_desc",
-                           "Group descriptor not loaded - "
-                           "block_group = %d, group_desc = %lu, desc = %lu",
-                            block_group, group_desc, offset);
+               WARN(1, "Group descriptor not loaded - "
+                    "block_group = %d, group_desc = %lu, desc = %lu",
+                     block_group, group_desc, offset);
                return NULL;
        }
 
index 37710ca..ed0cab8 100644 (file)
@@ -190,8 +190,10 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
        mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
        mapping->private_data = NULL;
        mapping->writeback_index = 0;
-       __init_rwsem(&mapping->invalidate_lock, "mapping.invalidate_lock",
-                    &sb->s_type->invalidate_lock_key);
+       init_rwsem(&mapping->invalidate_lock);
+       lockdep_set_class_and_name(&mapping->invalidate_lock,
+                                  &sb->s_type->invalidate_lock_key,
+                                  "mapping.invalidate_lock");
        inode->i_private = NULL;
        inode->i_mapping = mapping;
        INIT_HLIST_HEAD(&inode->i_dentry);      /* buggered by rcu freeing */
index 6c55362..c2360cd 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/rculist_nulls.h>
 #include <linux/cpu.h>
 #include <linux/tracehook.h>
+#include <uapi/linux/io_uring.h>
 
 #include "io-wq.h"
 
@@ -176,7 +177,6 @@ static void io_worker_ref_put(struct io_wq *wq)
 static void io_worker_exit(struct io_worker *worker)
 {
        struct io_wqe *wqe = worker->wqe;
-       struct io_wqe_acct *acct = io_wqe_get_acct(worker);
 
        if (refcount_dec_and_test(&worker->ref))
                complete(&worker->ref_done);
@@ -186,7 +186,6 @@ static void io_worker_exit(struct io_worker *worker)
        if (worker->flags & IO_WORKER_F_FREE)
                hlist_nulls_del_rcu(&worker->nulls_node);
        list_del_rcu(&worker->all_list);
-       acct->nr_workers--;
        preempt_disable();
        io_wqe_dec_running(worker);
        worker->flags = 0;
@@ -246,8 +245,6 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
  */
 static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
 {
-       bool do_create = false;
-
        /*
         * Most likely an attempt to queue unbounded work on an io_wq that
         * wasn't setup with any unbounded workers.
@@ -256,18 +253,15 @@ static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
                pr_warn_once("io-wq is not configured for unbound workers");
 
        raw_spin_lock(&wqe->lock);
-       if (acct->nr_workers < acct->max_workers) {
-               acct->nr_workers++;
-               do_create = true;
+       if (acct->nr_workers == acct->max_workers) {
+               raw_spin_unlock(&wqe->lock);
+               return true;
        }
+       acct->nr_workers++;
        raw_spin_unlock(&wqe->lock);
-       if (do_create) {
-               atomic_inc(&acct->nr_running);
-               atomic_inc(&wqe->wq->worker_refs);
-               return create_io_worker(wqe->wq, wqe, acct->index);
-       }
-
-       return true;
+       atomic_inc(&acct->nr_running);
+       atomic_inc(&wqe->wq->worker_refs);
+       return create_io_worker(wqe->wq, wqe, acct->index);
 }
 
 static void io_wqe_inc_running(struct io_worker *worker)
@@ -574,6 +568,7 @@ loop:
                }
                /* timed out, exit unless we're the last worker */
                if (last_timeout && acct->nr_workers > 1) {
+                       acct->nr_workers--;
                        raw_spin_unlock(&wqe->lock);
                        __set_current_state(TASK_RUNNING);
                        break;
@@ -589,7 +584,8 @@ loop:
 
                        if (!get_signal(&ksig))
                                continue;
-                       if (fatal_signal_pending(current))
+                       if (fatal_signal_pending(current) ||
+                           signal_group_exit(current->signal))
                                break;
                        continue;
                }
@@ -1287,6 +1283,10 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
 {
        int i, node, prev = 0;
 
+       BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND   != (int) IO_WQ_BOUND);
+       BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
+       BUILD_BUG_ON((int) IO_WQ_ACCT_NR      != 2);
+
        for (i = 0; i < 2; i++) {
                if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
                        new_count[i] = task_rlimit(current, RLIMIT_NPROC);
index 16fb743..82f8679 100644 (file)
@@ -502,6 +502,7 @@ struct io_poll_update {
 struct io_close {
        struct file                     *file;
        int                             fd;
+       u32                             file_slot;
 };
 
 struct io_timeout_data {
@@ -712,6 +713,7 @@ struct io_async_rw {
        struct iovec                    fast_iov[UIO_FASTIOV];
        const struct iovec              *free_iovec;
        struct iov_iter                 iter;
+       struct iov_iter_state           iter_state;
        size_t                          bytes_done;
        struct wait_page_queue          wpq;
 };
@@ -735,7 +737,6 @@ enum {
        REQ_F_BUFFER_SELECTED_BIT,
        REQ_F_COMPLETE_INLINE_BIT,
        REQ_F_REISSUE_BIT,
-       REQ_F_DONT_REISSUE_BIT,
        REQ_F_CREDS_BIT,
        REQ_F_REFCOUNT_BIT,
        REQ_F_ARM_LTIMEOUT_BIT,
@@ -782,8 +783,6 @@ enum {
        REQ_F_COMPLETE_INLINE   = BIT(REQ_F_COMPLETE_INLINE_BIT),
        /* caller should reissue async */
        REQ_F_REISSUE           = BIT(REQ_F_REISSUE_BIT),
-       /* don't attempt request reissue, see io_rw_reissue() */
-       REQ_F_DONT_REISSUE      = BIT(REQ_F_DONT_REISSUE_BIT),
        /* supports async reads */
        REQ_F_NOWAIT_READ       = BIT(REQ_F_NOWAIT_READ_BIT),
        /* supports async writes */
@@ -1100,6 +1099,8 @@ static int io_req_prep_async(struct io_kiocb *req);
 
 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
                                 unsigned int issue_flags, u32 slot_index);
+static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
+
 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
 
 static struct kmem_cache *req_cachep;
@@ -2444,13 +2445,6 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
                req = list_first_entry(done, struct io_kiocb, inflight_entry);
                list_del(&req->inflight_entry);
 
-               if (READ_ONCE(req->result) == -EAGAIN &&
-                   !(req->flags & REQ_F_DONT_REISSUE)) {
-                       req->iopoll_completed = 0;
-                       io_req_task_queue_reissue(req);
-                       continue;
-               }
-
                __io_cqring_fill_event(ctx, req->user_data, req->result,
                                        io_put_rw_kbuf(req));
                (*nr_events)++;
@@ -2613,8 +2607,7 @@ static bool io_resubmit_prep(struct io_kiocb *req)
 
        if (!rw)
                return !io_req_prep_async(req);
-       /* may have left rw->iter inconsistent on -EIOCBQUEUED */
-       iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter));
+       iov_iter_restore(&rw->iter, &rw->iter_state);
        return true;
 }
 
@@ -2714,10 +2707,9 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
        if (kiocb->ki_flags & IOCB_WRITE)
                kiocb_end_write(req);
        if (unlikely(res != req->result)) {
-               if (!(res == -EAGAIN && io_rw_should_reissue(req) &&
-                   io_resubmit_prep(req))) {
-                       req_set_fail(req);
-                       req->flags |= REQ_F_DONT_REISSUE;
+               if (res == -EAGAIN && io_rw_should_reissue(req)) {
+                       req->flags |= REQ_F_REISSUE;
+                       return;
                }
        }
 
@@ -2843,7 +2835,8 @@ static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
        return __io_file_supports_nowait(req->file, rw);
 }
 
-static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+                     int rw)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct kiocb *kiocb = &req->rw.kiocb;
@@ -2865,8 +2858,13 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (unlikely(ret))
                return ret;
 
-       /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
-       if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
+       /*
+        * If the file is marked O_NONBLOCK, still allow retry for it if it
+        * supports async. Otherwise it's impossible to use O_NONBLOCK files
+        * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
+        */
+       if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+           ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw)))
                req->flags |= REQ_F_NOWAIT;
 
        ioprio = READ_ONCE(sqe->ioprio);
@@ -2931,7 +2929,6 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
        struct io_async_rw *io = req->async_data;
-       bool check_reissue = kiocb->ki_complete == io_complete_rw;
 
        /* add previously done IO, if any */
        if (io && io->bytes_done > 0) {
@@ -2943,19 +2940,27 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
 
        if (req->flags & REQ_F_CUR_POS)
                req->file->f_pos = kiocb->ki_pos;
-       if (ret >= 0 && check_reissue)
+       if (ret >= 0 && (kiocb->ki_complete == io_complete_rw))
                __io_complete_rw(req, ret, 0, issue_flags);
        else
                io_rw_done(kiocb, ret);
 
-       if (check_reissue && (req->flags & REQ_F_REISSUE)) {
+       if (req->flags & REQ_F_REISSUE) {
                req->flags &= ~REQ_F_REISSUE;
                if (io_resubmit_prep(req)) {
                        io_req_task_queue_reissue(req);
                } else {
+                       unsigned int cflags = io_put_rw_kbuf(req);
+                       struct io_ring_ctx *ctx = req->ctx;
+
                        req_set_fail(req);
-                       __io_req_complete(req, issue_flags, ret,
-                                         io_put_rw_kbuf(req));
+                       if (issue_flags & IO_URING_F_NONBLOCK) {
+                               mutex_lock(&ctx->uring_lock);
+                               __io_req_complete(req, issue_flags, ret, cflags);
+                               mutex_unlock(&ctx->uring_lock);
+                       } else {
+                               __io_req_complete(req, issue_flags, ret, cflags);
+                       }
                }
        }
 }
@@ -3263,12 +3268,15 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
                                ret = nr;
                        break;
                }
+               if (!iov_iter_is_bvec(iter)) {
+                       iov_iter_advance(iter, nr);
+               } else {
+                       req->rw.len -= nr;
+                       req->rw.addr += nr;
+               }
                ret += nr;
                if (nr != iovec.iov_len)
                        break;
-               req->rw.len -= nr;
-               req->rw.addr += nr;
-               iov_iter_advance(iter, nr);
        }
 
        return ret;
@@ -3315,12 +3323,17 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
        if (!force && !io_op_defs[req->opcode].needs_async_setup)
                return 0;
        if (!req->async_data) {
+               struct io_async_rw *iorw;
+
                if (io_alloc_async_data(req)) {
                        kfree(iovec);
                        return -ENOMEM;
                }
 
                io_req_map_rw(req, iovec, fast_iov, iter);
+               iorw = req->async_data;
+               /* we've copied and mapped the iter, ensure state is saved */
+               iov_iter_save_state(&iorw->iter, &iorw->iter_state);
        }
        return 0;
 }
@@ -3339,6 +3352,7 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
        iorw->free_iovec = iov;
        if (iov)
                req->flags |= REQ_F_NEED_CLEANUP;
+       iov_iter_save_state(&iorw->iter, &iorw->iter_state);
        return 0;
 }
 
@@ -3346,7 +3360,7 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        if (unlikely(!(req->file->f_mode & FMODE_READ)))
                return -EBADF;
-       return io_prep_rw(req, sqe);
+       return io_prep_rw(req, sqe, READ);
 }
 
 /*
@@ -3442,19 +3456,28 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
        struct kiocb *kiocb = &req->rw.kiocb;
        struct iov_iter __iter, *iter = &__iter;
        struct io_async_rw *rw = req->async_data;
-       ssize_t io_size, ret, ret2;
        bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+       struct iov_iter_state __state, *state;
+       ssize_t ret, ret2;
 
        if (rw) {
                iter = &rw->iter;
+               state = &rw->iter_state;
+               /*
+                * We come here from an earlier attempt, restore our state to
+                * match in case it doesn't. It's cheap enough that we don't
+                * need to make this conditional.
+                */
+               iov_iter_restore(iter, state);
                iovec = NULL;
        } else {
                ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
                if (ret < 0)
                        return ret;
+               state = &__state;
+               iov_iter_save_state(iter, state);
        }
-       io_size = iov_iter_count(iter);
-       req->result = io_size;
+       req->result = iov_iter_count(iter);
 
        /* Ensure we clear previously set non-block flag */
        if (!force_nonblock)
@@ -3468,7 +3491,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                return ret ?: -EAGAIN;
        }
 
-       ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
+       ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result);
        if (unlikely(ret)) {
                kfree(iovec);
                return ret;
@@ -3484,30 +3507,49 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                /* no retry on NONBLOCK nor RWF_NOWAIT */
                if (req->flags & REQ_F_NOWAIT)
                        goto done;
-               /* some cases will consume bytes even on error returns */
-               iov_iter_reexpand(iter, iter->count + iter->truncated);
-               iov_iter_revert(iter, io_size - iov_iter_count(iter));
                ret = 0;
        } else if (ret == -EIOCBQUEUED) {
                goto out_free;
-       } else if (ret <= 0 || ret == io_size || !force_nonblock ||
+       } else if (ret <= 0 || ret == req->result || !force_nonblock ||
                   (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
                /* read all, failed, already did sync or don't want to retry */
                goto done;
        }
 
+       /*
+        * Don't depend on the iter state matching what was consumed, or being
+        * untouched in case of error. Restore it and we'll advance it
+        * manually if we need to.
+        */
+       iov_iter_restore(iter, state);
+
        ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
        if (ret2)
                return ret2;
 
        iovec = NULL;
        rw = req->async_data;
-       /* now use our persistent iterator, if we aren't already */
-       iter = &rw->iter;
+       /*
+        * Now use our persistent iterator and state, if we aren't already.
+        * We've restored and mapped the iter to match.
+        */
+       if (iter != &rw->iter) {
+               iter = &rw->iter;
+               state = &rw->iter_state;
+       }
 
        do {
-               io_size -= ret;
+               /*
+                * We end up here because of a partial read, either from
+                * above or inside this loop. Advance the iter by the bytes
+                * that were consumed.
+                */
+               iov_iter_advance(iter, ret);
+               if (!iov_iter_count(iter))
+                       break;
                rw->bytes_done += ret;
+               iov_iter_save_state(iter, state);
+
                /* if we can retry, do so with the callbacks armed */
                if (!io_rw_should_retry(req)) {
                        kiocb->ki_flags &= ~IOCB_WAITQ;
@@ -3525,7 +3567,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                        return 0;
                /* we got some bytes, but not all. retry. */
                kiocb->ki_flags &= ~IOCB_WAITQ;
-       } while (ret > 0 && ret < io_size);
+               iov_iter_restore(iter, state);
+       } while (ret > 0);
 done:
        kiocb_done(kiocb, ret, issue_flags);
 out_free:
@@ -3539,7 +3582,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
                return -EBADF;
-       return io_prep_rw(req, sqe);
+       return io_prep_rw(req, sqe, WRITE);
 }
 
 static int io_write(struct io_kiocb *req, unsigned int issue_flags)
@@ -3548,19 +3591,23 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
        struct kiocb *kiocb = &req->rw.kiocb;
        struct iov_iter __iter, *iter = &__iter;
        struct io_async_rw *rw = req->async_data;
-       ssize_t ret, ret2, io_size;
        bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+       struct iov_iter_state __state, *state;
+       ssize_t ret, ret2;
 
        if (rw) {
                iter = &rw->iter;
+               state = &rw->iter_state;
+               iov_iter_restore(iter, state);
                iovec = NULL;
        } else {
                ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
                if (ret < 0)
                        return ret;
+               state = &__state;
+               iov_iter_save_state(iter, state);
        }
-       io_size = iov_iter_count(iter);
-       req->result = io_size;
+       req->result = iov_iter_count(iter);
 
        /* Ensure we clear previously set non-block flag */
        if (!force_nonblock)
@@ -3577,7 +3624,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
            (req->flags & REQ_F_ISREG))
                goto copy_iov;
 
-       ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
+       ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result);
        if (unlikely(ret))
                goto out_free;
 
@@ -3624,9 +3671,7 @@ done:
                kiocb_done(kiocb, ret2, issue_flags);
        } else {
 copy_iov:
-               /* some cases will consume bytes even on error returns */
-               iov_iter_reexpand(iter, iter->count + iter->truncated);
-               iov_iter_revert(iter, io_size - iov_iter_count(iter));
+               iov_iter_restore(iter, state);
                ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
                return ret ?: -EAGAIN;
        }
@@ -4342,7 +4387,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
        int i, bid = pbuf->bid;
 
        for (i = 0; i < pbuf->nbufs; i++) {
-               buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+               buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
                if (!buf)
                        break;
 
@@ -4549,12 +4594,16 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
-           sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
+           sqe->rw_flags || sqe->buf_index)
                return -EINVAL;
        if (req->flags & REQ_F_FIXED_FILE)
                return -EBADF;
 
        req->close.fd = READ_ONCE(sqe->fd);
+       req->close.file_slot = READ_ONCE(sqe->file_index);
+       if (req->close.file_slot && req->close.fd)
+               return -EINVAL;
+
        return 0;
 }
 
@@ -4566,6 +4615,11 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags)
        struct file *file = NULL;
        int ret = -EBADF;
 
+       if (req->close.file_slot) {
+               ret = io_close_fixed(req, issue_flags);
+               goto err;
+       }
+
        spin_lock(&files->file_lock);
        fdt = files_fdtable(files);
        if (close->fd >= fdt->max_fds) {
@@ -5293,7 +5347,7 @@ static bool __io_poll_complete(struct io_kiocb *req, __poll_t mask)
        if (req->poll.events & EPOLLONESHOT)
                flags = 0;
        if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
-               req->poll.done = true;
+               req->poll.events |= EPOLLONESHOT;
                flags = 0;
        }
        if (flags & IORING_CQE_F_MORE)
@@ -5322,10 +5376,15 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
        } else {
                bool done;
 
+               if (req->poll.done) {
+                       spin_unlock(&ctx->completion_lock);
+                       return;
+               }
                done = __io_poll_complete(req, req->result);
                if (done) {
                        io_poll_remove_double(req);
                        hash_del(&req->hash_node);
+                       req->poll.done = true;
                } else {
                        req->result = 0;
                        add_wait_queue(req->poll.head, &req->poll.wait);
@@ -5463,6 +5522,7 @@ static void io_async_task_func(struct io_kiocb *req, bool *locked)
 
        hash_del(&req->hash_node);
        io_poll_remove_double(req);
+       apoll->poll.done = true;
        spin_unlock(&ctx->completion_lock);
 
        if (!READ_ONCE(apoll->poll.canceled))
@@ -5783,6 +5843,7 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
        struct io_ring_ctx *ctx = req->ctx;
        struct io_poll_table ipt;
        __poll_t mask;
+       bool done;
 
        ipt.pt._qproc = io_poll_queue_proc;
 
@@ -5791,13 +5852,13 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
 
        if (mask) { /* no async, we'd stolen it */
                ipt.error = 0;
-               io_poll_complete(req, mask);
+               done = io_poll_complete(req, mask);
        }
        spin_unlock(&ctx->completion_lock);
 
        if (mask) {
                io_cqring_ev_posted(ctx);
-               if (poll->events & EPOLLONESHOT)
+               if (done)
                        io_put_req(req);
        }
        return ipt.error;
@@ -6288,19 +6349,16 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
        struct io_uring_rsrc_update2 up;
        int ret;
 
-       if (issue_flags & IO_URING_F_NONBLOCK)
-               return -EAGAIN;
-
        up.offset = req->rsrc_update.offset;
        up.data = req->rsrc_update.arg;
        up.nr = 0;
        up.tags = 0;
        up.resv = 0;
 
-       mutex_lock(&ctx->uring_lock);
+       io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
        ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
                                        &up, req->rsrc_update.nr_args);
-       mutex_unlock(&ctx->uring_lock);
+       io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
 
        if (ret < 0)
                req_set_fail(req);
@@ -7515,6 +7573,14 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                        break;
        } while (1);
 
+       if (uts) {
+               struct timespec64 ts;
+
+               if (get_timespec64(&ts, uts))
+                       return -EFAULT;
+               timeout = timespec64_to_jiffies(&ts);
+       }
+
        if (sig) {
 #ifdef CONFIG_COMPAT
                if (in_compat_syscall())
@@ -7528,14 +7594,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                        return ret;
        }
 
-       if (uts) {
-               struct timespec64 ts;
-
-               if (get_timespec64(&ts, uts))
-                       return -EFAULT;
-               timeout = timespec64_to_jiffies(&ts);
-       }
-
        init_waitqueue_func_entry(&iowq.wq, io_wake_function);
        iowq.wq.private = current;
        INIT_LIST_HEAD(&iowq.wq.entry);
@@ -8284,11 +8342,27 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
 #endif
 }
 
+static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
+                                struct io_rsrc_node *node, void *rsrc)
+{
+       struct io_rsrc_put *prsrc;
+
+       prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
+       if (!prsrc)
+               return -ENOMEM;
+
+       prsrc->tag = *io_get_tag_slot(data, idx);
+       prsrc->rsrc = rsrc;
+       list_add(&prsrc->list, &node->rsrc_list);
+       return 0;
+}
+
 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
                                 unsigned int issue_flags, u32 slot_index)
 {
        struct io_ring_ctx *ctx = req->ctx;
        bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+       bool needs_switch = false;
        struct io_fixed_file *file_slot;
        int ret = -EBADF;
 
@@ -8304,9 +8378,22 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
 
        slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
        file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
-       ret = -EBADF;
-       if (file_slot->file_ptr)
-               goto err;
+
+       if (file_slot->file_ptr) {
+               struct file *old_file;
+
+               ret = io_rsrc_node_switch_start(ctx);
+               if (ret)
+                       goto err;
+
+               old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
+               ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
+                                           ctx->rsrc_node, old_file);
+               if (ret)
+                       goto err;
+               file_slot->file_ptr = 0;
+               needs_switch = true;
+       }
 
        *io_get_tag_slot(ctx->file_data, slot_index) = 0;
        io_fixed_file_set(file_slot, file);
@@ -8318,25 +8405,50 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
 
        ret = 0;
 err:
+       if (needs_switch)
+               io_rsrc_node_switch(ctx, ctx->file_data);
        io_ring_submit_unlock(ctx, !force_nonblock);
        if (ret)
                fput(file);
        return ret;
 }
 
-static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
-                                struct io_rsrc_node *node, void *rsrc)
+static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
 {
-       struct io_rsrc_put *prsrc;
+       unsigned int offset = req->close.file_slot - 1;
+       struct io_ring_ctx *ctx = req->ctx;
+       struct io_fixed_file *file_slot;
+       struct file *file;
+       int ret, i;
 
-       prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
-       if (!prsrc)
-               return -ENOMEM;
+       io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
+       ret = -ENXIO;
+       if (unlikely(!ctx->file_data))
+               goto out;
+       ret = -EINVAL;
+       if (offset >= ctx->nr_user_files)
+               goto out;
+       ret = io_rsrc_node_switch_start(ctx);
+       if (ret)
+               goto out;
 
-       prsrc->tag = *io_get_tag_slot(data, idx);
-       prsrc->rsrc = rsrc;
-       list_add(&prsrc->list, &node->rsrc_list);
-       return 0;
+       i = array_index_nospec(offset, ctx->nr_user_files);
+       file_slot = io_fixed_file_slot(&ctx->file_table, i);
+       ret = -EBADF;
+       if (!file_slot->file_ptr)
+               goto out;
+
+       file = (struct file *)(file_slot->file_ptr & FFS_MASK);
+       ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);
+       if (ret)
+               goto out;
+
+       file_slot->file_ptr = 0;
+       io_rsrc_node_switch(ctx, ctx->file_data);
+       ret = 0;
+out:
+       io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
+       return ret;
 }
 
 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
@@ -9105,8 +9217,10 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
        struct io_buffer *buf;
        unsigned long index;
 
-       xa_for_each(&ctx->io_buffers, index, buf)
+       xa_for_each(&ctx->io_buffers, index, buf) {
                __io_remove_buffers(ctx, buf, index, -1U);
+               cond_resched();
+       }
 }
 
 static void io_req_cache_free(struct list_head *list)
@@ -9604,8 +9718,10 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx)
        struct io_tctx_node *node;
        unsigned long index;
 
-       xa_for_each(&tctx->xa, index, node)
+       xa_for_each(&tctx->xa, index, node) {
                io_uring_del_tctx_node(index);
+               cond_resched();
+       }
        if (wq) {
                /*
                 * Must be after io_uring_del_task_file() (removes nodes under
@@ -10560,10 +10676,12 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
                         * ordering. Fine to drop uring_lock here, we hold
                         * a ref to the ctx.
                         */
+                       refcount_inc(&sqd->refs);
                        mutex_unlock(&ctx->uring_lock);
                        mutex_lock(&sqd->lock);
                        mutex_lock(&ctx->uring_lock);
-                       tctx = sqd->thread->io_uring;
+                       if (sqd->thread)
+                               tctx = sqd->thread->io_uring;
                }
        } else {
                tctx = current->io_uring;
@@ -10577,16 +10695,20 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
        if (ret)
                goto err;
 
-       if (sqd)
+       if (sqd) {
                mutex_unlock(&sqd->lock);
+               io_put_sq_data(sqd);
+       }
 
        if (copy_to_user(arg, new_count, sizeof(new_count)))
                return -EFAULT;
 
        return 0;
 err:
-       if (sqd)
+       if (sqd) {
                mutex_unlock(&sqd->lock);
+               io_put_sq_data(sqd);
+       }
        return ret;
 }
 
index 0b307ca..6a19f4b 100644 (file)
@@ -158,25 +158,21 @@ out:
  * Return : windows path string or error
  */
 
-char *convert_to_nt_pathname(char *filename, char *sharepath)
+char *convert_to_nt_pathname(char *filename)
 {
        char *ab_pathname;
-       int len, name_len;
 
-       name_len = strlen(filename);
-       ab_pathname = kmalloc(name_len, GFP_KERNEL);
-       if (!ab_pathname)
-               return NULL;
-
-       ab_pathname[0] = '\\';
-       ab_pathname[1] = '\0';
+       if (strlen(filename) == 0) {
+               ab_pathname = kmalloc(2, GFP_KERNEL);
+               ab_pathname[0] = '\\';
+               ab_pathname[1] = '\0';
+       } else {
+               ab_pathname = kstrdup(filename, GFP_KERNEL);
+               if (!ab_pathname)
+                       return NULL;
 
-       len = strlen(sharepath);
-       if (!strncmp(filename, sharepath, len) && name_len != len) {
-               strscpy(ab_pathname, &filename[len], name_len);
                ksmbd_conv_path_to_windows(ab_pathname);
        }
-
        return ab_pathname;
 }
 
@@ -240,7 +236,7 @@ char *ksmbd_extract_sharename(char *treename)
  *
  * Return:     converted name on success, otherwise NULL
  */
-char *convert_to_unix_name(struct ksmbd_share_config *share, char *name)
+char *convert_to_unix_name(struct ksmbd_share_config *share, const char *name)
 {
        int no_slash = 0, name_len, path_len;
        char *new_name;
index af8717d..253366b 100644 (file)
@@ -14,13 +14,13 @@ struct ksmbd_file;
 int match_pattern(const char *str, size_t len, const char *pattern);
 int ksmbd_validate_filename(char *filename);
 int parse_stream_name(char *filename, char **stream_name, int *s_type);
-char *convert_to_nt_pathname(char *filename, char *sharepath);
+char *convert_to_nt_pathname(char *filename);
 int get_nlink(struct kstat *st);
 void ksmbd_conv_path_to_unix(char *path);
 void ksmbd_strip_last_slash(char *path);
 void ksmbd_conv_path_to_windows(char *path);
 char *ksmbd_extract_sharename(char *treename);
-char *convert_to_unix_name(struct ksmbd_share_config *share, char *name);
+char *convert_to_unix_name(struct ksmbd_share_config *share, const char *name);
 
 #define KSMBD_DIR_INFO_ALIGNMENT       8
 struct ksmbd_dir_info;
index e6a9f6a..2a2b213 100644 (file)
@@ -584,6 +584,9 @@ static int __init ksmbd_server_init(void)
        ret = ksmbd_workqueue_init();
        if (ret)
                goto err_crypto_destroy;
+
+       pr_warn_once("The ksmbd server is experimental, use at your own risk.\n");
+
        return 0;
 
 err_crypto_destroy:
index c86164d..761e121 100644 (file)
@@ -433,7 +433,7 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
                work->compound_pfid = KSMBD_NO_FID;
        }
        memset((char *)rsp_hdr + 4, 0, sizeof(struct smb2_hdr) + 2);
-       rsp_hdr->ProtocolId = rcv_hdr->ProtocolId;
+       rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
        rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
        rsp_hdr->Command = rcv_hdr->Command;
 
@@ -634,7 +634,7 @@ static char *
 smb2_get_name(struct ksmbd_share_config *share, const char *src,
              const int maxlen, struct nls_table *local_nls)
 {
-       char *name, *unixname;
+       char *name;
 
        name = smb_strndup_from_utf16(src, maxlen, 1, local_nls);
        if (IS_ERR(name)) {
@@ -642,19 +642,9 @@ smb2_get_name(struct ksmbd_share_config *share, const char *src,
                return name;
        }
 
-       /* change it to absolute unix name */
        ksmbd_conv_path_to_unix(name);
        ksmbd_strip_last_slash(name);
-
-       unixname = convert_to_unix_name(share, name);
-       kfree(name);
-       if (!unixname) {
-               pr_err("can not convert absolute name\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
-       ksmbd_debug(SMB, "absolute name = %s\n", unixname);
-       return unixname;
+       return name;
 }
 
 int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
@@ -2348,7 +2338,7 @@ static int smb2_creat(struct ksmbd_work *work, struct path *path, char *name,
                        return rc;
        }
 
-       rc = ksmbd_vfs_kern_path(name, 0, path, 0);
+       rc = ksmbd_vfs_kern_path(work, name, 0, path, 0);
        if (rc) {
                pr_err("cannot get linux path (%s), err = %d\n",
                       name, rc);
@@ -2423,7 +2413,7 @@ int smb2_open(struct ksmbd_work *work)
        struct oplock_info *opinfo;
        __le32 *next_ptr = NULL;
        int req_op_level = 0, open_flags = 0, may_flags = 0, file_info = 0;
-       int rc = 0, len = 0;
+       int rc = 0;
        int contxt_cnt = 0, query_disk_id = 0;
        int maximal_access_ctxt = 0, posix_ctxt = 0;
        int s_type = 0;
@@ -2495,17 +2485,11 @@ int smb2_open(struct ksmbd_work *work)
                        goto err_out1;
                }
        } else {
-               len = strlen(share->path);
-               ksmbd_debug(SMB, "share path len %d\n", len);
-               name = kmalloc(len + 1, GFP_KERNEL);
+               name = kstrdup("", GFP_KERNEL);
                if (!name) {
-                       rsp->hdr.Status = STATUS_NO_MEMORY;
                        rc = -ENOMEM;
                        goto err_out1;
                }
-
-               memcpy(name, share->path, len);
-               *(name + len) = '\0';
        }
 
        req_op_level = req->RequestedOplockLevel;
@@ -2628,13 +2612,9 @@ int smb2_open(struct ksmbd_work *work)
                goto err_out1;
        }
 
-       if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) {
-               /*
-                * On delete request, instead of following up, need to
-                * look the current entity
-                */
-               rc = ksmbd_vfs_kern_path(name, 0, &path, 1);
-               if (!rc) {
+       rc = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, 1);
+       if (!rc) {
+               if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) {
                        /*
                         * If file exists with under flags, return access
                         * denied error.
@@ -2653,34 +2633,16 @@ int smb2_open(struct ksmbd_work *work)
                                path_put(&path);
                                goto err_out;
                        }
-               }
-       } else {
-               if (test_share_config_flag(work->tcon->share_conf,
-                                          KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS)) {
-                       /*
-                        * Use LOOKUP_FOLLOW to follow the path of
-                        * symlink in path buildup
-                        */
-                       rc = ksmbd_vfs_kern_path(name, LOOKUP_FOLLOW, &path, 1);
-                       if (rc) { /* Case for broken link ?*/
-                               rc = ksmbd_vfs_kern_path(name, 0, &path, 1);
-                       }
-               } else {
-                       rc = ksmbd_vfs_kern_path(name, 0, &path, 1);
-                       if (!rc && d_is_symlink(path.dentry)) {
-                               rc = -EACCES;
-                               path_put(&path);
-                               goto err_out;
-                       }
+               } else if (d_is_symlink(path.dentry)) {
+                       rc = -EACCES;
+                       path_put(&path);
+                       goto err_out;
                }
        }
 
        if (rc) {
-               if (rc == -EACCES) {
-                       ksmbd_debug(SMB,
-                                   "User does not have right permission\n");
+               if (rc != -ENOENT)
                        goto err_out;
-               }
                ksmbd_debug(SMB, "can not get linux path for %s, rc = %d\n",
                            name, rc);
                rc = 0;
@@ -3176,7 +3138,7 @@ err_out1:
                        rsp->hdr.Status = STATUS_INVALID_PARAMETER;
                else if (rc == -EOPNOTSUPP)
                        rsp->hdr.Status = STATUS_NOT_SUPPORTED;
-               else if (rc == -EACCES || rc == -ESTALE)
+               else if (rc == -EACCES || rc == -ESTALE || rc == -EXDEV)
                        rsp->hdr.Status = STATUS_ACCESS_DENIED;
                else if (rc == -ENOENT)
                        rsp->hdr.Status = STATUS_OBJECT_NAME_INVALID;
@@ -4041,6 +4003,10 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
        path = &fp->filp->f_path;
        /* single EA entry is requested with given user.* name */
        if (req->InputBufferLength) {
+               if (le32_to_cpu(req->InputBufferLength) <
+                   sizeof(struct smb2_ea_info_req))
+                       return -EINVAL;
+
                ea_req = (struct smb2_ea_info_req *)req->Buffer;
        } else {
                /* need to send all EAs, if no specific EA is requested*/
@@ -4288,8 +4254,7 @@ static int get_file_all_info(struct ksmbd_work *work,
                return -EACCES;
        }
 
-       filename = convert_to_nt_pathname(fp->filename,
-                                         work->tcon->share_conf->path);
+       filename = convert_to_nt_pathname(fp->filename);
        if (!filename)
                return -ENOMEM;
 
@@ -4420,17 +4385,15 @@ static void get_file_stream_info(struct ksmbd_work *work,
                file_info->NextEntryOffset = cpu_to_le32(next);
        }
 
-       if (nbytes) {
+       if (!S_ISDIR(stat.mode)) {
                file_info = (struct smb2_file_stream_info *)
                        &rsp->Buffer[nbytes];
                streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,
                                              "::$DATA", 7, conn->local_nls, 0);
                streamlen *= 2;
                file_info->StreamNameLength = cpu_to_le32(streamlen);
-               file_info->StreamSize = S_ISDIR(stat.mode) ? 0 :
-                       cpu_to_le64(stat.size);
-               file_info->StreamAllocationSize = S_ISDIR(stat.mode) ? 0 :
-                       cpu_to_le64(stat.size);
+               file_info->StreamSize = 0;
+               file_info->StreamAllocationSize = 0;
                nbytes += sizeof(struct smb2_file_stream_info) + streamlen;
        }
 
@@ -4745,12 +4708,8 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
        struct path path;
        int rc = 0, len;
        int fs_infoclass_size = 0;
-       int lookup_flags = 0;
-
-       if (test_share_config_flag(share, KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
-               lookup_flags = LOOKUP_FOLLOW;
 
-       rc = ksmbd_vfs_kern_path(share->path, lookup_flags, &path, 0);
+       rc = kern_path(share->path, LOOKUP_NO_SYMLINKS, &path);
        if (rc) {
                pr_err("cannot create vfs path\n");
                return -EIO;
@@ -5299,7 +5258,7 @@ static int smb2_rename(struct ksmbd_work *work,
                        goto out;
 
                len = strlen(new_name);
-               if (new_name[len - 1] != '/') {
+               if (len > 0 && new_name[len - 1] != '/') {
                        pr_err("not allow base filename in rename\n");
                        rc = -ESHARE;
                        goto out;
@@ -5327,11 +5286,14 @@ static int smb2_rename(struct ksmbd_work *work,
        }
 
        ksmbd_debug(SMB, "new name %s\n", new_name);
-       rc = ksmbd_vfs_kern_path(new_name, 0, &path, 1);
-       if (rc)
+       rc = ksmbd_vfs_kern_path(work, new_name, LOOKUP_NO_SYMLINKS, &path, 1);
+       if (rc) {
+               if (rc != -ENOENT)
+                       goto out;
                file_present = false;
-       else
+       } else {
                path_put(&path);
+       }
 
        if (ksmbd_share_veto_filename(share, new_name)) {
                rc = -ENOENT;
@@ -5401,11 +5363,14 @@ static int smb2_create_link(struct ksmbd_work *work,
        }
 
        ksmbd_debug(SMB, "target name is %s\n", target_name);
-       rc = ksmbd_vfs_kern_path(link_name, 0, &path, 0);
-       if (rc)
+       rc = ksmbd_vfs_kern_path(work, link_name, LOOKUP_NO_SYMLINKS, &path, 0);
+       if (rc) {
+               if (rc != -ENOENT)
+                       goto out;
                file_present = false;
-       else
+       } else {
                path_put(&path);
+       }
 
        if (file_info->ReplaceIfExists) {
                if (file_present) {
@@ -5565,7 +5530,7 @@ static int set_file_allocation_info(struct ksmbd_work *work,
                 * inode size is retained by backup inode size.
                 */
                size = i_size_read(inode);
-               rc = ksmbd_vfs_truncate(work, NULL, fp, alloc_blks * 512);
+               rc = ksmbd_vfs_truncate(work, fp, alloc_blks * 512);
                if (rc) {
                        pr_err("truncate failed! filename : %s, err %d\n",
                               fp->filename, rc);
@@ -5602,7 +5567,7 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
        if (inode->i_sb->s_magic != MSDOS_SUPER_MAGIC) {
                ksmbd_debug(SMB, "filename : %s truncated to newsize %lld\n",
                            fp->filename, newsize);
-               rc = ksmbd_vfs_truncate(work, NULL, fp, newsize);
+               rc = ksmbd_vfs_truncate(work, fp, newsize);
                if (rc) {
                        ksmbd_debug(SMB, "truncate failed! filename : %s err %d\n",
                                    fp->filename, rc);
@@ -5879,7 +5844,7 @@ int smb2_set_info(struct ksmbd_work *work)
        return 0;
 
 err_out:
-       if (rc == -EACCES || rc == -EPERM)
+       if (rc == -EACCES || rc == -EPERM || rc == -EXDEV)
                rsp->hdr.Status = STATUS_ACCESS_DENIED;
        else if (rc == -EINVAL)
                rsp->hdr.Status = STATUS_INVALID_PARAMETER;
index 43d3123..40f4faf 100644 (file)
@@ -129,16 +129,22 @@ int ksmbd_lookup_protocol_idx(char *str)
  *
  * check for valid smb signature and packet direction(request/response)
  *
- * Return:      0 on success, otherwise 1
+ * Return:      0 on success, otherwise -EINVAL
  */
 int ksmbd_verify_smb_message(struct ksmbd_work *work)
 {
-       struct smb2_hdr *smb2_hdr = work->request_buf;
+       struct smb2_hdr *smb2_hdr = work->request_buf + work->next_smb2_rcv_hdr_off;
+       struct smb_hdr *hdr;
 
        if (smb2_hdr->ProtocolId == SMB2_PROTO_NUMBER)
                return ksmbd_smb2_check_message(work);
 
-       return 0;
+       hdr = work->request_buf;
+       if (*(__le32 *)hdr->Protocol == SMB1_PROTO_NUMBER &&
+           hdr->Command == SMB_COM_NEGOTIATE)
+               return 0;
+
+       return -EINVAL;
 }
 
 /**
@@ -265,7 +271,6 @@ static int ksmbd_negotiate_smb_dialect(void *buf)
        return BAD_PROT_ID;
 }
 
-#define SMB_COM_NEGOTIATE      0x72
 int ksmbd_init_smb_server(struct ksmbd_work *work)
 {
        struct ksmbd_conn *conn = work->conn;
index 57c667c..0a6af44 100644 (file)
                FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES)
 
 #define SMB1_PROTO_NUMBER              cpu_to_le32(0x424d53ff)
+#define SMB_COM_NEGOTIATE              0x72
 
 #define SMB1_CLIENT_GUID_SIZE          (16)
 struct smb_hdr {
index 52b2556..3a7fa23 100644 (file)
@@ -20,7 +20,6 @@
 #define SUBMOD_NAME    "smb_direct"
 
 #include <linux/kthread.h>
-#include <linux/rwlock.h>
 #include <linux/list.h>
 #include <linux/mempool.h>
 #include <linux/highmem.h>
index b047f29..b419542 100644 (file)
@@ -19,6 +19,8 @@
 #include <linux/sched/xacct.h>
 #include <linux/crc32c.h>
 
+#include "../internal.h"       /* for vfs_path_lookup */
+
 #include "glob.h"
 #include "oplock.h"
 #include "connection.h"
@@ -44,7 +46,6 @@ static char *extract_last_component(char *path)
                p++;
        } else {
                p = NULL;
-               pr_err("Invalid path %s\n", path);
        }
        return p;
 }
@@ -155,7 +156,7 @@ int ksmbd_vfs_query_maximal_access(struct user_namespace *user_ns,
 /**
  * ksmbd_vfs_create() - vfs helper for smb create file
  * @work:      work
- * @name:      file name
+ * @name:      file name that is relative to share
  * @mode:      file create mode
  *
  * Return:     0 on success, otherwise error
@@ -166,7 +167,8 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
        struct dentry *dentry;
        int err;
 
-       dentry = kern_path_create(AT_FDCWD, name, &path, 0);
+       dentry = ksmbd_vfs_kern_path_create(work, name,
+                                           LOOKUP_NO_SYMLINKS, &path);
        if (IS_ERR(dentry)) {
                err = PTR_ERR(dentry);
                if (err != -ENOENT)
@@ -191,7 +193,7 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
 /**
  * ksmbd_vfs_mkdir() - vfs helper for smb create directory
  * @work:      work
- * @name:      directory name
+ * @name:      directory name that is relative to share
  * @mode:      directory create mode
  *
  * Return:     0 on success, otherwise error
@@ -203,7 +205,9 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
        struct dentry *dentry;
        int err;
 
-       dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
+       dentry = ksmbd_vfs_kern_path_create(work, name,
+                                           LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,
+                                           &path);
        if (IS_ERR(dentry)) {
                err = PTR_ERR(dentry);
                if (err != -EEXIST)
@@ -578,7 +582,7 @@ int ksmbd_vfs_fsync(struct ksmbd_work *work, u64 fid, u64 p_id)
 
 /**
  * ksmbd_vfs_remove_file() - vfs helper for smb rmdir or unlink
- * @name:      absolute directory or file name
+ * @name:      directory or file name that is relative to share
  *
  * Return:     0 on success, otherwise error
  */
@@ -588,16 +592,11 @@ int ksmbd_vfs_remove_file(struct ksmbd_work *work, char *name)
        struct path path;
        struct dentry *parent;
        int err;
-       int flags = 0;
 
        if (ksmbd_override_fsids(work))
                return -ENOMEM;
 
-       if (test_share_config_flag(work->tcon->share_conf,
-                                  KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
-               flags = LOOKUP_FOLLOW;
-
-       err = kern_path(name, flags, &path);
+       err = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, false);
        if (err) {
                ksmbd_debug(VFS, "can't get %s, err %d\n", name, err);
                ksmbd_revert_fsids(work);
@@ -642,7 +641,7 @@ out_err:
 /**
  * ksmbd_vfs_link() - vfs helper for creating smb hardlink
  * @oldname:   source file name
- * @newname:   hardlink name
+ * @newname:   hardlink name that is relative to share
  *
  * Return:     0 on success, otherwise error
  */
@@ -652,24 +651,20 @@ int ksmbd_vfs_link(struct ksmbd_work *work, const char *oldname,
        struct path oldpath, newpath;
        struct dentry *dentry;
        int err;
-       int flags = 0;
 
        if (ksmbd_override_fsids(work))
                return -ENOMEM;
 
-       if (test_share_config_flag(work->tcon->share_conf,
-                                  KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
-               flags = LOOKUP_FOLLOW;
-
-       err = kern_path(oldname, flags, &oldpath);
+       err = kern_path(oldname, LOOKUP_NO_SYMLINKS, &oldpath);
        if (err) {
                pr_err("cannot get linux path for %s, err = %d\n",
                       oldname, err);
                goto out1;
        }
 
-       dentry = kern_path_create(AT_FDCWD, newname, &newpath,
-                                 flags | LOOKUP_REVAL);
+       dentry = ksmbd_vfs_kern_path_create(work, newname,
+                                           LOOKUP_NO_SYMLINKS | LOOKUP_REVAL,
+                                           &newpath);
        if (IS_ERR(dentry)) {
                err = PTR_ERR(dentry);
                pr_err("path create err for %s, err %d\n", newname, err);
@@ -788,21 +783,19 @@ int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
        struct dentry *src_dent, *trap_dent, *src_child;
        char *dst_name;
        int err;
-       int flags;
 
        dst_name = extract_last_component(newname);
-       if (!dst_name)
-               return -EINVAL;
+       if (!dst_name) {
+               dst_name = newname;
+               newname = "";
+       }
 
        src_dent_parent = dget_parent(fp->filp->f_path.dentry);
        src_dent = fp->filp->f_path.dentry;
 
-       flags = LOOKUP_DIRECTORY;
-       if (test_share_config_flag(work->tcon->share_conf,
-                                  KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
-               flags |= LOOKUP_FOLLOW;
-
-       err = kern_path(newname, flags, &dst_path);
+       err = ksmbd_vfs_kern_path(work, newname,
+                                 LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,
+                                 &dst_path, false);
        if (err) {
                ksmbd_debug(VFS, "Cannot get path for %s [%d]\n", newname, err);
                goto out;
@@ -848,61 +841,43 @@ out:
 /**
  * ksmbd_vfs_truncate() - vfs helper for smb file truncate
  * @work:      work
- * @name:      old filename
  * @fid:       file id of old file
  * @size:      truncate to given size
  *
  * Return:     0 on success, otherwise error
  */
-int ksmbd_vfs_truncate(struct ksmbd_work *work, const char *name,
+int ksmbd_vfs_truncate(struct ksmbd_work *work,
                       struct ksmbd_file *fp, loff_t size)
 {
-       struct path path;
        int err = 0;
+       struct file *filp;
 
-       if (name) {
-               err = kern_path(name, 0, &path);
-               if (err) {
-                       pr_err("cannot get linux path for %s, err %d\n",
-                              name, err);
-                       return err;
-               }
-               err = vfs_truncate(&path, size);
-               if (err)
-                       pr_err("truncate failed for %s err %d\n",
-                              name, err);
-               path_put(&path);
-       } else {
-               struct file *filp;
-
-               filp = fp->filp;
-
-               /* Do we need to break any of a levelII oplock? */
-               smb_break_all_levII_oplock(work, fp, 1);
+       filp = fp->filp;
 
-               if (!work->tcon->posix_extensions) {
-                       struct inode *inode = file_inode(filp);
+       /* Do we need to break any of a levelII oplock? */
+       smb_break_all_levII_oplock(work, fp, 1);
 
-                       if (size < inode->i_size) {
-                               err = check_lock_range(filp, size,
-                                                      inode->i_size - 1, WRITE);
-                       } else {
-                               err = check_lock_range(filp, inode->i_size,
-                                                      size - 1, WRITE);
-                       }
+       if (!work->tcon->posix_extensions) {
+               struct inode *inode = file_inode(filp);
 
-                       if (err) {
-                               pr_err("failed due to lock\n");
-                               return -EAGAIN;
-                       }
+               if (size < inode->i_size) {
+                       err = check_lock_range(filp, size,
+                                              inode->i_size - 1, WRITE);
+               } else {
+                       err = check_lock_range(filp, inode->i_size,
+                                              size - 1, WRITE);
                }
 
-               err = vfs_truncate(&filp->f_path, size);
-               if (err)
-                       pr_err("truncate failed for filename : %s err %d\n",
-                              fp->filename, err);
+               if (err) {
+                       pr_err("failed due to lock\n");
+                       return -EAGAIN;
+               }
        }
 
+       err = vfs_truncate(&filp->f_path, size);
+       if (err)
+               pr_err("truncate failed for filename : %s err %d\n",
+                      fp->filename, err);
        return err;
 }
 
@@ -1220,22 +1195,25 @@ static int ksmbd_vfs_lookup_in_dir(struct path *dir, char *name, size_t namelen)
 
 /**
  * ksmbd_vfs_kern_path() - lookup a file and get path info
- * @name:      name of file for lookup
+ * @name:      file path that is relative to share
  * @flags:     lookup flags
  * @path:      if lookup succeed, return path info
  * @caseless:  caseless filename lookup
  *
  * Return:     0 on success, otherwise error
  */
-int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
-                       bool caseless)
+int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *name,
+                       unsigned int flags, struct path *path, bool caseless)
 {
+       struct ksmbd_share_config *share_conf = work->tcon->share_conf;
        int err;
 
-       if (name[0] != '/')
-               return -EINVAL;
-
-       err = kern_path(name, flags, path);
+       flags |= LOOKUP_BENEATH;
+       err = vfs_path_lookup(share_conf->vfs_path.dentry,
+                             share_conf->vfs_path.mnt,
+                             name,
+                             flags,
+                             path);
        if (!err)
                return 0;
 
@@ -1249,11 +1227,10 @@ int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
                        return -ENOMEM;
 
                path_len = strlen(filepath);
-               remain_len = path_len - 1;
+               remain_len = path_len;
 
-               err = kern_path("/", flags, &parent);
-               if (err)
-                       goto out;
+               parent = share_conf->vfs_path;
+               path_get(&parent);
 
                while (d_can_lookup(parent.dentry)) {
                        char *filename = filepath + path_len - remain_len;
@@ -1266,21 +1243,21 @@ int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
 
                        err = ksmbd_vfs_lookup_in_dir(&parent, filename,
                                                      filename_len);
-                       if (err) {
-                               path_put(&parent);
+                       path_put(&parent);
+                       if (err)
                                goto out;
-                       }
 
-                       path_put(&parent);
                        next[0] = '\0';
 
-                       err = kern_path(filepath, flags, &parent);
+                       err = vfs_path_lookup(share_conf->vfs_path.dentry,
+                                             share_conf->vfs_path.mnt,
+                                             filepath,
+                                             flags,
+                                             &parent);
                        if (err)
                                goto out;
-
-                       if (is_last) {
-                               path->mnt = parent.mnt;
-                               path->dentry = parent.dentry;
+                       else if (is_last) {
+                               *path = parent;
                                goto out;
                        }
 
@@ -1296,6 +1273,23 @@ out:
        return err;
 }
 
+struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
+                                         const char *name,
+                                         unsigned int flags,
+                                         struct path *path)
+{
+       char *abs_name;
+       struct dentry *dent;
+
+       abs_name = convert_to_unix_name(work->tcon->share_conf, name);
+       if (!abs_name)
+               return ERR_PTR(-ENOMEM);
+
+       dent = kern_path_create(AT_FDCWD, abs_name, path, flags);
+       kfree(abs_name);
+       return dent;
+}
+
 int ksmbd_vfs_remove_acl_xattrs(struct user_namespace *user_ns,
                                struct dentry *dentry)
 {
index 85db50a..7b1dcaa 100644 (file)
@@ -126,7 +126,7 @@ int ksmbd_vfs_link(struct ksmbd_work *work,
 int ksmbd_vfs_getattr(struct path *path, struct kstat *stat);
 int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
                        char *newname);
-int ksmbd_vfs_truncate(struct ksmbd_work *work, const char *name,
+int ksmbd_vfs_truncate(struct ksmbd_work *work,
                       struct ksmbd_file *fp, loff_t size);
 struct srv_copychunk;
 int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work,
@@ -152,8 +152,13 @@ int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
                                size_t *xattr_stream_name_size, int s_type);
 int ksmbd_vfs_remove_xattr(struct user_namespace *user_ns,
                           struct dentry *dentry, char *attr_name);
-int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
+int ksmbd_vfs_kern_path(struct ksmbd_work *work,
+                       char *name, unsigned int flags, struct path *path,
                        bool caseless);
+struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
+                                         const char *name,
+                                         unsigned int flags,
+                                         struct path *path);
 int ksmbd_vfs_empty_dir(struct ksmbd_file *fp);
 void ksmbd_vfs_set_fadvise(struct file *filp, __le32 option);
 int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
index c69a0bb..4f1a451 100644 (file)
@@ -134,18 +134,9 @@ svcxdr_decode_owner(struct xdr_stream *xdr, struct xdr_netobj *obj)
 static inline bool
 svcxdr_encode_owner(struct xdr_stream *xdr, const struct xdr_netobj *obj)
 {
-       unsigned int quadlen = XDR_QUADLEN(obj->len);
-       __be32 *p;
-
-       if (xdr_stream_encode_u32(xdr, obj->len) < 0)
-               return false;
-       p = xdr_reserve_space(xdr, obj->len);
-       if (!p)
+       if (obj->len > XDR_MAX_NETOBJ)
                return false;
-       p[quadlen - 1] = 0;     /* XDR pad */
-       memcpy(p, obj->data, obj->len);
-
-       return true;
+       return xdr_stream_encode_opaque(xdr, obj->data, obj->len) > 0;
 }
 
 #endif /* _LOCKD_SVCXDR_H_ */
index 4235641..3f4027a 100644 (file)
@@ -3570,7 +3570,7 @@ static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_s
 }
 
 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
-                               struct nfsd4_session *session, u32 req)
+               struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
 {
        struct nfs4_client *clp = session->se_client;
        struct svc_xprt *xpt = rqst->rq_xprt;
@@ -3593,6 +3593,8 @@ static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
        else
                status = nfserr_inval;
        spin_unlock(&clp->cl_lock);
+       if (status == nfs_ok && conn)
+               *conn = c;
        return status;
 }
 
@@ -3617,8 +3619,16 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
        status = nfserr_wrong_cred;
        if (!nfsd4_mach_creds_match(session->se_client, rqstp))
                goto out;
-       status = nfsd4_match_existing_connection(rqstp, session, bcts->dir);
-       if (status == nfs_ok || status == nfserr_inval)
+       status = nfsd4_match_existing_connection(rqstp, session,
+                       bcts->dir, &conn);
+       if (status == nfs_ok) {
+               if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
+                               bcts->dir == NFS4_CDFC4_BACK)
+                       conn->cn_flags |= NFS4_CDFC4_BACK;
+               nfsd4_probe_callback(session->se_client);
+               goto out;
+       }
+       if (status == nfserr_inval)
                goto out;
        status = nfsd4_map_bcts_dir(&bcts->dir);
        if (status)
index 359524b..801e60b 100644 (file)
@@ -3951,7 +3951,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
                oi = OCFS2_I(inode);
                oi->ip_dir_lock_gen++;
                mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
-               goto out;
+               goto out_forget;
        }
 
        if (!S_ISREG(inode->i_mode))
@@ -3982,6 +3982,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
                filemap_fdatawait(mapping);
        }
 
+out_forget:
        forget_all_cached_acls(inode);
 
 out:
index a6ee23a..66645a5 100644 (file)
 #include <linux/buffer_head.h>
 #include "qnx4.h"
 
+/*
+ * A qnx4 directory entry is an inode entry or link info
+ * depending on the status field in the last byte. The
+ * first byte is where the name start either way, and a
+ * zero means it's empty.
+ *
+ * Also, due to a bug in gcc, we don't want to use the
+ * real (differently sized) name arrays in the inode and
+ * link entries, but always the 'de_name[]' one in the
+ * fake struct entry.
+ *
+ * See
+ *
+ *   https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99578#c6
+ *
+ * for details, but basically gcc will take the size of the
+ * 'name' array from one of the used union entries randomly.
+ *
+ * This use of 'de_name[]' (48 bytes) avoids the false positive
+ * warnings that would happen if gcc decides to use 'inode.di_name'
+ * (16 bytes) even when the pointer and size were to come from
+ * 'link.dl_name' (48 bytes).
+ *
+ * In all cases the actual name pointer itself is the same, it's
+ * only the gcc internal 'what is the size of this field' logic
+ * that can get confused.
+ */
+union qnx4_directory_entry {
+       struct {
+               const char de_name[48];
+               u8 de_pad[15];
+               u8 de_status;
+       };
+       struct qnx4_inode_entry inode;
+       struct qnx4_link_info link;
+};
+
 static int qnx4_readdir(struct file *file, struct dir_context *ctx)
 {
        struct inode *inode = file_inode(file);
        unsigned int offset;
        struct buffer_head *bh;
-       struct qnx4_inode_entry *de;
-       struct qnx4_link_info *le;
        unsigned long blknum;
        int ix, ino;
        int size;
@@ -38,27 +73,27 @@ static int qnx4_readdir(struct file *file, struct dir_context *ctx)
                }
                ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK;
                for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) {
+                       union qnx4_directory_entry *de;
+
                        offset = ix * QNX4_DIR_ENTRY_SIZE;
-                       de = (struct qnx4_inode_entry *) (bh->b_data + offset);
-                       if (!de->di_fname[0])
+                       de = (union qnx4_directory_entry *) (bh->b_data + offset);
+
+                       if (!de->de_name[0])
                                continue;
-                       if (!(de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
+                       if (!(de->de_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
                                continue;
-                       if (!(de->di_status & QNX4_FILE_LINK))
-                               size = QNX4_SHORT_NAME_MAX;
-                       else
-                               size = QNX4_NAME_MAX;
-                       size = strnlen(de->di_fname, size);
-                       QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname));
-                       if (!(de->di_status & QNX4_FILE_LINK))
+                       if (!(de->de_status & QNX4_FILE_LINK)) {
+                               size = sizeof(de->inode.di_fname);
                                ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
-                       else {
-                               le  = (struct qnx4_link_info*)de;
-                               ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) *
+                       else {
+                               size = sizeof(de->link.dl_fname);
+                               ino = ( le32_to_cpu(de->link.dl_inode_blk) - 1 ) *
                                        QNX4_INODES_PER_BLOCK +
-                                       le->dl_inode_ndx;
+                                       de->link.dl_inode_ndx;
                        }
-                       if (!dir_emit(ctx, de->di_fname, size, ino, DT_UNKNOWN)) {
+                       size = strnlen(de->de_name, size);
+                       QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, name));
+                       if (!dir_emit(ctx, de->de_name, size, ino, DT_UNKNOWN)) {
                                brelse(bh);
                                return 0;
                        }
index d01e8c9..926f87c 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: LGPL-2.1+ */
 /*
- *   fs/cifs/smbfsctl.h: SMB, CIFS, SMB2 FSCTL definitions
+ *   SMB, CIFS, SMB2 FSCTL definitions
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2013
  *   Author(s): Steve French (sfrench@us.ibm.com)
index 4f5e59f..37dd3fe 100644 (file)
 
 #define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
 
-#define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000')
-#define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377')
-#define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376')
-#define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375')
+static const unsigned char VBSF_MOUNT_SIGNATURE[4] = "\000\377\376\375";
 
 static int follow_symlinks;
 module_param(follow_symlinks, int, 0444);
@@ -386,12 +383,7 @@ fail_nomem:
 
 static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)
 {
-       unsigned char *options = data;
-
-       if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 &&
-                      options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 &&
-                      options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 &&
-                      options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) {
+       if (data && !memcmp(data, VBSF_MOUNT_SIGNATURE, 4)) {
                vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n");
                return -EINVAL;
        }
index 77e159a..60a4372 100644 (file)
@@ -177,7 +177,7 @@ static int build_merkle_tree(struct file *filp,
         * (level 0) and ascending to the root node (level 'num_levels - 1').
         * Then at the end (level 'num_levels'), calculate the root hash.
         */
-       blocks = (inode->i_size + params->block_size - 1) >>
+       blocks = ((u64)inode->i_size + params->block_size - 1) >>
                 params->log_blocksize;
        for (level = 0; level <= params->num_levels; level++) {
                err = build_merkle_tree_level(filp, level, blocks, params,
index 60ff8af..92df87f 100644 (file)
@@ -89,7 +89,7 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params,
         */
 
        /* Compute number of levels and the number of blocks in each level */
-       blocks = (inode->i_size + params->block_size - 1) >> log_blocksize;
+       blocks = ((u64)inode->i_size + params->block_size - 1) >> log_blocksize;
        pr_debug("Data is %lld bytes (%llu blocks)\n", inode->i_size, blocks);
        while (blocks > 1) {
                if (params->num_levels >= FS_VERITY_MAX_LEVELS) {
index a0212e6..027faa8 100644 (file)
@@ -14,14 +14,6 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
 }
 #endif
 
-#ifndef acpi_os_memmap
-static inline void __iomem *acpi_os_memmap(acpi_physical_address phys,
-                                           acpi_size size)
-{
-       return ioremap_cache(phys, size);
-}
-#endif
-
 extern bool acpi_permanent_mmap;
 
 void __iomem __ref
index e93375c..cc7338f 100644 (file)
@@ -1023,16 +1023,7 @@ static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
        port &= IO_SPACE_LIMIT;
        return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
 }
-#define __pci_ioport_unmap __pci_ioport_unmap
-static inline void __pci_ioport_unmap(void __iomem *p)
-{
-       uintptr_t start = (uintptr_t) PCI_IOBASE;
-       uintptr_t addr = (uintptr_t) p;
-
-       if (addr >= start && addr < start + IO_SPACE_LIMIT)
-               return;
-       iounmap(p);
-}
+#define ARCH_HAS_GENERIC_IOPORT_MAP
 #endif
 
 #ifndef ioport_unmap
@@ -1048,21 +1039,10 @@ extern void ioport_unmap(void __iomem *p);
 #endif /* CONFIG_HAS_IOPORT_MAP */
 
 #ifndef CONFIG_GENERIC_IOMAP
-struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
-
-#ifndef __pci_ioport_unmap
-static inline void __pci_ioport_unmap(void __iomem *p) {}
-#endif
-
 #ifndef pci_iounmap
-#define pci_iounmap pci_iounmap
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
-{
-       __pci_ioport_unmap(p);
-}
+#define ARCH_WANTS_GENERIC_PCI_IOUNMAP
+#endif
 #endif
-#endif /* CONFIG_GENERIC_IOMAP */
 
 #ifndef xlate_dev_mem_ptr
 #define xlate_dev_mem_ptr xlate_dev_mem_ptr
index 9b3eb6d..08237ae 100644 (file)
@@ -110,16 +110,6 @@ static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
 }
 #endif
 
-#ifdef CONFIG_PCI
-/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
-struct pci_dev;
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
-#elif defined(CONFIG_GENERIC_IOMAP)
-struct pci_dev;
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{ }
-#endif
-
 #include <asm-generic/pci_iomap.h>
 
 #endif
index c1ab6a6..d3eae6c 100644 (file)
@@ -197,10 +197,12 @@ static inline int hv_cpu_number_to_vp_number(int cpu_number)
        return hv_vp_index[cpu_number];
 }
 
-static inline int cpumask_to_vpset(struct hv_vpset *vpset,
-                                   const struct cpumask *cpus)
+static inline int __cpumask_to_vpset(struct hv_vpset *vpset,
+                                   const struct cpumask *cpus,
+                                   bool exclude_self)
 {
        int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
+       int this_cpu = smp_processor_id();
 
        /* valid_bank_mask can represent up to 64 banks */
        if (hv_max_vp_index / 64 >= 64)
@@ -218,6 +220,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
         * Some banks may end up being empty but this is acceptable.
         */
        for_each_cpu(cpu, cpus) {
+               if (exclude_self && cpu == this_cpu)
+                       continue;
                vcpu = hv_cpu_number_to_vp_number(cpu);
                if (vcpu == VP_INVAL)
                        return -1;
@@ -232,6 +236,19 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
        return nr_bank;
 }
 
+static inline int cpumask_to_vpset(struct hv_vpset *vpset,
+                                   const struct cpumask *cpus)
+{
+       return __cpumask_to_vpset(vpset, cpus, false);
+}
+
+static inline int cpumask_to_vpset_noself(struct hv_vpset *vpset,
+                                   const struct cpumask *cpus)
+{
+       WARN_ON_ONCE(preemptible());
+       return __cpumask_to_vpset(vpset, cpus, true);
+}
+
 void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
 bool hv_is_hyperv_initialized(void);
 bool hv_is_hibernation_supported(void);
index df636c6..5a2f9bf 100644 (file)
@@ -18,6 +18,7 @@ extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
 extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
                                        unsigned long offset,
                                        unsigned long maxlen);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
 /* Create a virtual mapping cookie for a port on a given PCI device.
  * Do not call this directly, it exists to make it easier for architectures
  * to override */
@@ -50,6 +51,8 @@ static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
 {
        return NULL;
 }
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{ }
 #endif
 
 #endif /* __ASM_GENERIC_PCI_IOMAP_H */
index aa50bf2..f2984af 100644 (file)
  * GCC 4.5 and later have a 32 bytes section alignment for structures.
  * Except GCC 4.9, that feels the need to align on 64 bytes.
  */
-#if __GNUC__ == 4 && __GNUC_MINOR__ == 9
-#define STRUCT_ALIGNMENT 64
-#else
 #define STRUCT_ALIGNMENT 32
-#endif
 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
 
 /*
index 864b999..90f2189 100644 (file)
@@ -61,7 +61,6 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
                            struct kvm_device_attr *attr);
 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
-int kvm_pmu_probe_pmuver(void);
 #else
 struct kvm_pmu {
 };
@@ -118,8 +117,6 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
        return 0;
 }
 
-static inline int kvm_pmu_probe_pmuver(void) { return 0xf; }
-
 #endif
 
 #endif
index 6486d3c..36f3368 100644 (file)
@@ -194,7 +194,7 @@ void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
 struct buffer_head *__bread_gfp(struct block_device *,
                                sector_t block, unsigned size, gfp_t gfp);
 void invalidate_bh_lrus(void);
-void invalidate_bh_lrus_cpu(int cpu);
+void invalidate_bh_lrus_cpu(void);
 bool has_bh_in_lru(int cpu, void *dummy);
 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
 void free_buffer_head(struct buffer_head * bh);
@@ -408,7 +408,7 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; }
 static inline void invalidate_inode_buffers(struct inode *inode) {}
 static inline int remove_inode_buffers(struct inode *inode) { return 1; }
 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
-static inline void invalidate_bh_lrus_cpu(int cpu) {}
+static inline void invalidate_bh_lrus_cpu(void) {}
 static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
 #define buffer_heads_over_limit 0
 
index e1c705f..db2e147 100644 (file)
@@ -752,107 +752,54 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
  * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
  * per-socket cgroup information except for memcg association.
  *
- * On legacy hierarchies, net_prio and net_cls controllers directly set
- * attributes on each sock which can then be tested by the network layer.
- * On the default hierarchy, each sock is associated with the cgroup it was
- * created in and the networking layer can match the cgroup directly.
- *
- * To avoid carrying all three cgroup related fields separately in sock,
- * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
- * On boot, sock_cgroup_data records the cgroup that the sock was created
- * in so that cgroup2 matches can be made; however, once either net_prio or
- * net_cls starts being used, the area is overridden to carry prioidx and/or
- * classid.  The two modes are distinguished by whether the lowest bit is
- * set.  Clear bit indicates cgroup pointer while set bit prioidx and
- * classid.
- *
- * While userland may start using net_prio or net_cls at any time, once
- * either is used, cgroup2 matching no longer works.  There is no reason to
- * mix the two and this is in line with how legacy and v2 compatibility is
- * handled.  On mode switch, cgroup references which are already being
- * pointed to by socks may be leaked.  While this can be remedied by adding
- * synchronization around sock_cgroup_data, given that the number of leaked
- * cgroups is bound and highly unlikely to be high, this seems to be the
- * better trade-off.
+ * On legacy hierarchies, net_prio and net_cls controllers directly
+ * set attributes on each sock which can then be tested by the network
+ * layer. On the default hierarchy, each sock is associated with the
+ * cgroup it was created in and the networking layer can match the
+ * cgroup directly.
  */
 struct sock_cgroup_data {
-       union {
-#ifdef __LITTLE_ENDIAN
-               struct {
-                       u8      is_data : 1;
-                       u8      no_refcnt : 1;
-                       u8      unused : 6;
-                       u8      padding;
-                       u16     prioidx;
-                       u32     classid;
-               } __packed;
-#else
-               struct {
-                       u32     classid;
-                       u16     prioidx;
-                       u8      padding;
-                       u8      unused : 6;
-                       u8      no_refcnt : 1;
-                       u8      is_data : 1;
-               } __packed;
+       struct cgroup   *cgroup; /* v2 */
+#ifdef CONFIG_CGROUP_NET_CLASSID
+       u32             classid; /* v1 */
+#endif
+#ifdef CONFIG_CGROUP_NET_PRIO
+       u16             prioidx; /* v1 */
 #endif
-               u64             val;
-       };
 };
 
-/*
- * There's a theoretical window where the following accessors race with
- * updaters and return part of the previous pointer as the prioidx or
- * classid.  Such races are short-lived and the result isn't critical.
- */
 static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
 {
-       /* fallback to 1 which is always the ID of the root cgroup */
-       return (skcd->is_data & 1) ? skcd->prioidx : 1;
+#ifdef CONFIG_CGROUP_NET_PRIO
+       return READ_ONCE(skcd->prioidx);
+#else
+       return 1;
+#endif
 }
 
 static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
 {
-       /* fallback to 0 which is the unconfigured default classid */
-       return (skcd->is_data & 1) ? skcd->classid : 0;
+#ifdef CONFIG_CGROUP_NET_CLASSID
+       return READ_ONCE(skcd->classid);
+#else
+       return 0;
+#endif
 }
 
-/*
- * If invoked concurrently, the updaters may clobber each other.  The
- * caller is responsible for synchronization.
- */
 static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
                                           u16 prioidx)
 {
-       struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
-       if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
-               return;
-
-       if (!(skcd_buf.is_data & 1)) {
-               skcd_buf.val = 0;
-               skcd_buf.is_data = 1;
-       }
-
-       skcd_buf.prioidx = prioidx;
-       WRITE_ONCE(skcd->val, skcd_buf.val);    /* see sock_cgroup_ptr() */
+#ifdef CONFIG_CGROUP_NET_PRIO
+       WRITE_ONCE(skcd->prioidx, prioidx);
+#endif
 }
 
 static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
                                           u32 classid)
 {
-       struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
-       if (sock_cgroup_classid(&skcd_buf) == classid)
-               return;
-
-       if (!(skcd_buf.is_data & 1)) {
-               skcd_buf.val = 0;
-               skcd_buf.is_data = 1;
-       }
-
-       skcd_buf.classid = classid;
-       WRITE_ONCE(skcd->val, skcd_buf.val);    /* see sock_cgroup_ptr() */
+#ifdef CONFIG_CGROUP_NET_CLASSID
+       WRITE_ONCE(skcd->classid, classid);
+#endif
 }
 
 #else  /* CONFIG_SOCK_CGROUP_DATA */
index 7bf6045..75c1514 100644 (file)
@@ -829,33 +829,13 @@ static inline void cgroup_account_cputime_field(struct task_struct *task,
  */
 #ifdef CONFIG_SOCK_CGROUP_DATA
 
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
-extern spinlock_t cgroup_sk_update_lock;
-#endif
-
-void cgroup_sk_alloc_disable(void);
 void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
 void cgroup_sk_clone(struct sock_cgroup_data *skcd);
 void cgroup_sk_free(struct sock_cgroup_data *skcd);
 
 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
 {
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
-       unsigned long v;
-
-       /*
-        * @skcd->val is 64bit but the following is safe on 32bit too as we
-        * just need the lower ulong to be written and read atomically.
-        */
-       v = READ_ONCE(skcd->val);
-
-       if (v & 3)
-               return &cgrp_dfl_root.cgrp;
-
-       return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
-#else
-       return (struct cgroup *)(unsigned long)skcd->val;
-#endif
+       return skcd->cgroup;
 }
 
 #else  /* CONFIG_CGROUP_DATA */
index 49b0ac8..3c4de9b 100644 (file)
 #define __no_sanitize_coverage
 #endif
 
-/*
- * Not all versions of clang implement the type-generic versions
- * of the builtin overflow checkers. Fortunately, clang implements
- * __has_builtin allowing us to avoid awkward version
- * checks. Unfortunately, we don't know which version of gcc clang
- * pretends to be, so the macro may or may not be defined.
- */
-#if __has_builtin(__builtin_mul_overflow) && \
-    __has_builtin(__builtin_add_overflow) && \
-    __has_builtin(__builtin_sub_overflow)
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
-#endif
-
 #if __has_feature(shadow_call_stack)
 # define __noscs       __attribute__((__no_sanitize__("shadow-call-stack")))
 #endif
index 21c36b6..bd2b881 100644 (file)
 
 #if GCC_VERSION >= 70000
 #define KASAN_ABI_VERSION 5
-#elif GCC_VERSION >= 50000
+#else
 #define KASAN_ABI_VERSION 4
-#elif GCC_VERSION >= 40902
-#define KASAN_ABI_VERSION 3
 #endif
 
 #if __has_attribute(__no_sanitize_address__)
 #define __no_sanitize_coverage
 #endif
 
-#if GCC_VERSION >= 50100
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
-#endif
-
 /*
  * Turn individual warnings and errors on and off locally, depending
  * on version.
index b67261a..3d5af56 100644 (file)
@@ -188,6 +188,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
     (typeof(ptr)) (__ptr + (off)); })
 #endif
 
+#define absolute_pointer(val)  RELOC_HIDE((void *)(val), 0)
+
 #ifndef OPTIMIZER_HIDE_VAR
 /* Make the optimizer believe the variable can be manipulated arbitrarily. */
 #define OPTIMIZER_HIDE_VAR(var)                                                \
index 8f2106e..e6ec634 100644 (file)
  */
 
 /*
- * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
- * In the meantime, to support gcc < 5, we implement __has_attribute
- * by hand.
- */
-#ifndef __has_attribute
-# define __has_attribute(x) __GCC4_has_attribute_##x
-# define __GCC4_has_attribute___assume_aligned__      1
-# define __GCC4_has_attribute___copy__                0
-# define __GCC4_has_attribute___designated_init__     0
-# define __GCC4_has_attribute___error__               1
-# define __GCC4_has_attribute___externally_visible__  1
-# define __GCC4_has_attribute___no_caller_saved_registers__ 0
-# define __GCC4_has_attribute___noclone__             1
-# define __GCC4_has_attribute___no_profile_instrument_function__ 0
-# define __GCC4_has_attribute___nonstring__           0
-# define __GCC4_has_attribute___no_sanitize_address__ 1
-# define __GCC4_has_attribute___no_sanitize_undefined__ 1
-# define __GCC4_has_attribute___no_sanitize_coverage__ 0
-# define __GCC4_has_attribute___fallthrough__         0
-# define __GCC4_has_attribute___warning__             1
-#endif
-
-/*
  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute
  */
 #define __alias(symbol)                 __attribute__((__alias__(#symbol)))
@@ -77,7 +54,6 @@
  * compiler should see some alignment anyway, when the return value is
  * massaged by 'flags = ptr & 3; ptr &= ~3;').
  *
- * Optional: only supported since gcc >= 4.9
  * Optional: not supported by icc
  *
  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute
index c6bc45a..435777a 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0
- * Copyright 2019-2021 NXP Semiconductors
+ * Copyright 2019-2021 NXP
  */
 
 #ifndef _NET_DSA_TAG_OCELOT_H
index 23e4ee5..9ee238a 100644 (file)
@@ -251,7 +251,7 @@ static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
 }
 
 void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
                                    irq_hw_number_t hwirq_max, int direct_max,
                                    const struct irq_domain_ops *ops,
                                    void *host_data);
index 041ca7f..0f18df7 100644 (file)
@@ -608,7 +608,6 @@ struct kvm {
        unsigned long mmu_notifier_range_start;
        unsigned long mmu_notifier_range_end;
 #endif
-       long tlbs_dirty;
        struct list_head devices;
        u64 manual_dirty_log_protect;
        struct dentry *debugfs_dentry;
@@ -721,11 +720,6 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
        return NULL;
 }
 
-static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
-{
-       return vcpu->vcpu_idx;
-}
-
 #define kvm_for_each_memslot(memslot, slots)                           \
        for (memslot = &slots->memslots[0];                             \
             memslot < slots->memslots + slots->used_slots; memslot++)  \
index ffb787d..5e6dc38 100644 (file)
@@ -80,6 +80,9 @@ struct mdio_driver {
 
        /* Clears up any memory if needed */
        void (*remove)(struct mdio_device *mdiodev);
+
+       /* Quiesces the device on system shutdown, turns off interrupts etc */
+       void (*shutdown)(struct mdio_device *mdiodev);
 };
 
 static inline struct mdio_driver *
index b066024..34de69b 100644 (file)
@@ -118,6 +118,7 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
 int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
 
 void memblock_free_all(void);
+void memblock_free_ptr(void *ptr, size_t size);
 void reset_node_managed_pages(pg_data_t *pgdat);
 void reset_all_zones_managed_pages(void);
 
index 3262509..c8077e9 100644 (file)
@@ -19,6 +19,11 @@ struct migration_target_control;
  */
 #define MIGRATEPAGE_SUCCESS            0
 
+/*
+ * Keep sync with:
+ * - macro MIGRATE_REASON in include/trace/events/migrate.h
+ * - migrate_reason_names[MR_TYPES] in mm/debug.c
+ */
 enum migrate_reason {
        MR_COMPACTION,
        MR_MEMORY_FAILURE,
@@ -32,7 +37,6 @@ enum migrate_reason {
        MR_TYPES
 };
 
-/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
 extern const char *migrate_reason_names[MR_TYPES];
 
 #ifdef CONFIG_MIGRATION
index b179f1e..96e113e 100644 (file)
@@ -144,15 +144,6 @@ static inline void mmap_read_unlock(struct mm_struct *mm)
        up_read(&mm->mmap_lock);
 }
 
-static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
-{
-       if (mmap_read_trylock(mm)) {
-               rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
-               return true;
-       }
-       return false;
-}
-
 static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
 {
        __mmap_lock_trace_released(mm, false);
index 923dada..c0c0cef 100644 (file)
@@ -150,6 +150,20 @@ static inline int nvmem_cell_read_u64(struct device *dev,
        return -EOPNOTSUPP;
 }
 
+static inline int nvmem_cell_read_variable_le_u32(struct device *dev,
+                                                const char *cell_id,
+                                                u32 *val)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int nvmem_cell_read_variable_le_u64(struct device *dev,
+                                                 const char *cell_id,
+                                                 u64 *val)
+{
+       return -EOPNOTSUPP;
+}
+
 static inline struct nvmem_device *nvmem_device_get(struct device *dev,
                                                    const char *name)
 {
index 0f12345..4669632 100644 (file)
@@ -6,12 +6,9 @@
 #include <linux/limits.h>
 
 /*
- * In the fallback code below, we need to compute the minimum and
- * maximum values representable in a given type. These macros may also
- * be useful elsewhere, so we provide them outside the
- * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
- *
- * It would seem more obvious to do something like
+ * We need to compute the minimum and maximum values representable in a given
+ * type. These macros may also be useful elsewhere. It would seem more obvious
+ * to do something like:
  *
  * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
  * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
@@ -54,7 +51,6 @@ static inline bool __must_check __must_check_overflow(bool overflow)
        return unlikely(overflow);
 }
 
-#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
 /*
  * For simplicity and code hygiene, the fallback code below insists on
  * a, b and *d having the same type (similar to the min() and max()
@@ -90,134 +86,6 @@ static inline bool __must_check __must_check_overflow(bool overflow)
        __builtin_mul_overflow(__a, __b, __d);  \
 }))
 
-#else
-
-
-/* Checking for unsigned overflow is relatively easy without causing UB. */
-#define __unsigned_add_overflow(a, b, d) ({    \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = __a + __b;                       \
-       *__d < __a;                             \
-})
-#define __unsigned_sub_overflow(a, b, d) ({    \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = __a - __b;                       \
-       __a < __b;                              \
-})
-/*
- * If one of a or b is a compile-time constant, this avoids a division.
- */
-#define __unsigned_mul_overflow(a, b, d) ({            \
-       typeof(a) __a = (a);                            \
-       typeof(b) __b = (b);                            \
-       typeof(d) __d = (d);                            \
-       (void) (&__a == &__b);                          \
-       (void) (&__a == __d);                           \
-       *__d = __a * __b;                               \
-       __builtin_constant_p(__b) ?                     \
-         __b > 0 && __a > type_max(typeof(__a)) / __b : \
-         __a > 0 && __b > type_max(typeof(__b)) / __a;  \
-})
-
-/*
- * For signed types, detecting overflow is much harder, especially if
- * we want to avoid UB. But the interface of these macros is such that
- * we must provide a result in *d, and in fact we must produce the
- * result promised by gcc's builtins, which is simply the possibly
- * wrapped-around value. Fortunately, we can just formally do the
- * operations in the widest relevant unsigned type (u64) and then
- * truncate the result - gcc is smart enough to generate the same code
- * with and without the (u64) casts.
- */
-
-/*
- * Adding two signed integers can overflow only if they have the same
- * sign, and overflow has happened iff the result has the opposite
- * sign.
- */
-#define __signed_add_overflow(a, b, d) ({      \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = (u64)__a + (u64)__b;             \
-       (((~(__a ^ __b)) & (*__d ^ __a))        \
-               & type_min(typeof(__a))) != 0;  \
-})
-
-/*
- * Subtraction is similar, except that overflow can now happen only
- * when the signs are opposite. In this case, overflow has happened if
- * the result has the opposite sign of a.
- */
-#define __signed_sub_overflow(a, b, d) ({      \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = (u64)__a - (u64)__b;             \
-       ((((__a ^ __b)) & (*__d ^ __a))         \
-               & type_min(typeof(__a))) != 0;  \
-})
-
-/*
- * Signed multiplication is rather hard. gcc always follows C99, so
- * division is truncated towards 0. This means that we can write the
- * overflow check like this:
- *
- * (a > 0 && (b > MAX/a || b < MIN/a)) ||
- * (a < -1 && (b > MIN/a || b < MAX/a) ||
- * (a == -1 && b == MIN)
- *
- * The redundant casts of -1 are to silence an annoying -Wtype-limits
- * (included in -Wextra) warning: When the type is u8 or u16, the
- * __b_c_e in check_mul_overflow obviously selects
- * __unsigned_mul_overflow, but unfortunately gcc still parses this
- * code and warns about the limited range of __b.
- */
-
-#define __signed_mul_overflow(a, b, d) ({                              \
-       typeof(a) __a = (a);                                            \
-       typeof(b) __b = (b);                                            \
-       typeof(d) __d = (d);                                            \
-       typeof(a) __tmax = type_max(typeof(a));                         \
-       typeof(a) __tmin = type_min(typeof(a));                         \
-       (void) (&__a == &__b);                                          \
-       (void) (&__a == __d);                                           \
-       *__d = (u64)__a * (u64)__b;                                     \
-       (__b > 0   && (__a > __tmax/__b || __a < __tmin/__b)) ||        \
-       (__b < (typeof(__b))-1  && (__a > __tmin/__b || __a < __tmax/__b)) || \
-       (__b == (typeof(__b))-1 && __a == __tmin);                      \
-})
-
-
-#define check_add_overflow(a, b, d)    __must_check_overflow(          \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_add_overflow(a, b, d),                 \
-                       __unsigned_add_overflow(a, b, d)))
-
-#define check_sub_overflow(a, b, d)    __must_check_overflow(          \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_sub_overflow(a, b, d),                 \
-                       __unsigned_sub_overflow(a, b, d)))
-
-#define check_mul_overflow(a, b, d)    __must_check_overflow(          \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_mul_overflow(a, b, d),                 \
-                       __unsigned_mul_overflow(a, b, d)))
-
-#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
-
 /** check_shl_overflow() - Calculate a left-shifted value and check overflow
  *
  * @a: Value to be shifted
index 5466773..8d6571f 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright 2016-2018 NXP
  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #ifndef _LINUX_PACKING_H
index 5054802..2512e2f 100644 (file)
@@ -163,6 +163,12 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
 static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
 #endif
 
+#ifdef CONFIG_KVM
+void kvm_host_pmu_init(struct arm_pmu *pmu);
+#else
+#define kvm_host_pmu_init(x)   do { } while(0)
+#endif
+
 /* Internal functions only for core arm_pmu code */
 struct arm_pmu *armpmu_alloc(void);
 struct arm_pmu *armpmu_alloc_atomic(void);
index 6beb26b..86be8bf 100644 (file)
@@ -4,6 +4,8 @@
 
 #include <linux/mm.h>
 
+#define ARCH_DEFAULT_PKEY      0
+
 #ifdef CONFIG_ARCH_HAS_PKEYS
 #include <asm/pkeys.h>
 #else /* ! CONFIG_ARCH_HAS_PKEYS */
index e12b524..39039ce 100644 (file)
@@ -1471,6 +1471,7 @@ struct task_struct {
                                        mce_whole_page : 1,
                                        __mce_reserved : 62;
        struct callback_head            mce_kill_me;
+       int                             mce_count;
 #endif
 
 #ifdef CONFIG_KRETPROBES
index 6bdb0db..841e2f0 100644 (file)
@@ -1940,7 +1940,7 @@ static inline void __skb_insert(struct sk_buff *newsk,
        WRITE_ONCE(newsk->prev, prev);
        WRITE_ONCE(next->prev, newsk);
        WRITE_ONCE(prev->next, newsk);
-       list->qlen++;
+       WRITE_ONCE(list->qlen, list->qlen + 1);
 }
 
 static inline void __skb_queue_splice(const struct sk_buff_head *list,
index 3e80c4b..2564b74 100644 (file)
@@ -197,6 +197,8 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)
 
        mem_cgroup_handle_over_high();
        blkcg_maybe_throttle_current();
+
+       rseq_handle_notify_resume(NULL, regs);
 }
 
 /*
index 5265024..207101a 100644 (file)
@@ -27,6 +27,12 @@ enum iter_type {
        ITER_DISCARD,
 };
 
+struct iov_iter_state {
+       size_t iov_offset;
+       size_t count;
+       unsigned long nr_segs;
+};
+
 struct iov_iter {
        u8 iter_type;
        bool data_source;
@@ -47,7 +53,6 @@ struct iov_iter {
                };
                loff_t xarray_start;
        };
-       size_t truncated;
 };
 
 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
@@ -55,6 +60,14 @@ static inline enum iter_type iov_iter_type(const struct iov_iter *i)
        return i->iter_type;
 }
 
+static inline void iov_iter_save_state(struct iov_iter *iter,
+                                      struct iov_iter_state *state)
+{
+       state->iov_offset = iter->iov_offset;
+       state->count = iter->count;
+       state->nr_segs = iter->nr_segs;
+}
+
 static inline bool iter_is_iovec(const struct iov_iter *i)
 {
        return iov_iter_type(i) == ITER_IOVEC;
@@ -233,6 +246,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
                        size_t maxsize, size_t *start);
 int iov_iter_npages(const struct iov_iter *i, int maxpages);
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
 
 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
 
@@ -255,10 +269,8 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
         * conversion in assignement is by definition greater than all
         * values of size_t, including old i->count.
         */
-       if (i->count > count) {
-               i->truncated += i->count - count;
+       if (i->count > count)
                i->count = count;
-       }
 }
 
 /*
@@ -267,7 +279,6 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
  */
 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
 {
-       i->truncated -= count - i->count;
        i->count = count;
 }
 
index 548a028..2c1fc92 100644 (file)
@@ -124,6 +124,7 @@ struct usb_hcd {
 #define HCD_FLAG_RH_RUNNING            5       /* root hub is running? */
 #define HCD_FLAG_DEAD                  6       /* controller has died? */
 #define HCD_FLAG_INTF_AUTHORIZED       7       /* authorize interfaces? */
+#define HCD_FLAG_DEFER_RH_REGISTER     8       /* Defer roothub registration */
 
        /* The flags can be tested using these macros; they are likely to
         * be slightly faster than test_bit().
@@ -134,6 +135,7 @@ struct usb_hcd {
 #define HCD_WAKEUP_PENDING(hcd)        ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
 #define HCD_RH_RUNNING(hcd)    ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
 #define HCD_DEAD(hcd)          ((hcd)->flags & (1U << HCD_FLAG_DEAD))
+#define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER))
 
        /*
         * Specifies if interfaces are authorized by default
index f9a1714..d784e76 100644 (file)
@@ -447,6 +447,11 @@ static inline bool dsa_port_is_user(struct dsa_port *dp)
        return dp->type == DSA_PORT_TYPE_USER;
 }
 
+static inline bool dsa_port_is_unused(struct dsa_port *dp)
+{
+       return dp->type == DSA_PORT_TYPE_UNUSED;
+}
+
 static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
 {
        return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
@@ -580,8 +585,16 @@ struct dsa_switch_ops {
        int     (*change_tag_protocol)(struct dsa_switch *ds, int port,
                                       enum dsa_tag_protocol proto);
 
+       /* Optional switch-wide initialization and destruction methods */
        int     (*setup)(struct dsa_switch *ds);
        void    (*teardown)(struct dsa_switch *ds);
+
+       /* Per-port initialization and destruction methods. Mandatory if the
+        * driver registers devlink port regions, optional otherwise.
+        */
+       int     (*port_setup)(struct dsa_switch *ds, int port);
+       void    (*port_teardown)(struct dsa_switch *ds, int port);
+
        u32     (*get_phy_flags)(struct dsa_switch *ds, int port);
 
        /*
@@ -1041,6 +1054,7 @@ static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
 
 void dsa_unregister_switch(struct dsa_switch *ds);
 int dsa_register_switch(struct dsa_switch *ds);
+void dsa_switch_shutdown(struct dsa_switch *ds);
 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
 #ifdef CONFIG_PM_SLEEP
 int dsa_switch_suspend(struct dsa_switch *ds);
index 66a9a90..c005c3c 100644 (file)
@@ -1640,6 +1640,7 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
                release_sock(sk);
                __release(&sk->sk_lock.slock);
        } else {
+               mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
                spin_unlock_bh(&sk->sk_lock.slock);
        }
 }
index 09a17f6..b97e142 100644 (file)
@@ -146,7 +146,6 @@ struct scsi_device {
        struct scsi_vpd __rcu *vpd_pg83;
        struct scsi_vpd __rcu *vpd_pg80;
        struct scsi_vpd __rcu *vpd_pg89;
-       unsigned char current_tag;      /* current tag */
        struct scsi_target      *sdev_target;
 
        blist_flags_t           sdev_bflags; /* black/white flags as also found in
index 9f73ed2..bca73e8 100644 (file)
@@ -306,11 +306,13 @@ enum afs_flock_operation {
 
 enum afs_cb_break_reason {
        afs_cb_break_no_break,
+       afs_cb_break_no_promise,
        afs_cb_break_for_callback,
        afs_cb_break_for_deleted,
        afs_cb_break_for_lapsed,
+       afs_cb_break_for_s_reinit,
        afs_cb_break_for_unlink,
-       afs_cb_break_for_vsbreak,
+       afs_cb_break_for_v_break,
        afs_cb_break_for_volume_callback,
        afs_cb_break_for_zap,
 };
@@ -602,11 +604,13 @@ enum afs_cb_break_reason {
 
 #define afs_cb_break_reasons                                           \
        EM(afs_cb_break_no_break,               "no-break")             \
+       EM(afs_cb_break_no_promise,             "no-promise")           \
        EM(afs_cb_break_for_callback,           "break-cb")             \
        EM(afs_cb_break_for_deleted,            "break-del")            \
        EM(afs_cb_break_for_lapsed,             "break-lapsed")         \
+       EM(afs_cb_break_for_s_reinit,           "s-reinit")             \
        EM(afs_cb_break_for_unlink,             "break-unlink")         \
-       EM(afs_cb_break_for_vsbreak,            "break-vs")             \
+       EM(afs_cb_break_for_v_break,            "break-v")              \
        EM(afs_cb_break_for_volume_callback,    "break-v-cb")           \
        E_(afs_cb_break_for_zap,                "break-zap")
 
index bf9806f..db4f2ce 100644 (file)
@@ -35,20 +35,20 @@ TRACE_EVENT(erofs_lookup,
        TP_STRUCT__entry(
                __field(dev_t,          dev     )
                __field(erofs_nid_t,    nid     )
-               __field(const char *,   name    )
+               __string(name,          dentry->d_name.name     )
                __field(unsigned int,   flags   )
        ),
 
        TP_fast_assign(
                __entry->dev    = dir->i_sb->s_dev;
                __entry->nid    = EROFS_I(dir)->nid;
-               __entry->name   = dentry->d_name.name;
+               __assign_str(name, dentry->d_name.name);
                __entry->flags  = flags;
        ),
 
        TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x",
                show_dev_nid(__entry),
-               __entry->name,
+               __get_str(name),
                __entry->flags)
 );
 
index 20e435f..3246f2c 100644 (file)
@@ -225,7 +225,14 @@ struct binder_freeze_info {
 
 struct binder_frozen_status_info {
        __u32            pid;
+
+       /* process received sync transactions since last frozen
+        * bit 0: received sync transaction after being frozen
+        * bit 1: new pending sync transaction during freezing
+        */
        __u32            sync_recv;
+
+       /* process received async transactions since last frozen */
        __u32            async_recv;
 };
 
index 6982920..8e87d27 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
 /*
- *   include/uapi/linux/cifs/cifs_mount.h
  *
  *   Author(s): Scott Lovenberg (scott.lovenberg@gmail.com)
  *
index 59ef351..b270a07 100644 (file)
@@ -317,13 +317,19 @@ enum {
        IORING_REGISTER_IOWQ_AFF                = 17,
        IORING_UNREGISTER_IOWQ_AFF              = 18,
 
-       /* set/get max number of workers */
+       /* set/get max number of io-wq workers */
        IORING_REGISTER_IOWQ_MAX_WORKERS        = 19,
 
        /* this goes last */
        IORING_REGISTER_LAST
 };
 
+/* io-wq worker categories */
+enum {
+       IO_WQ_BOUND,
+       IO_WQ_UNBOUND,
+};
+
 /* deprecated, see struct io_uring_rsrc_update */
 struct io_uring_files_update {
        __u32 offset;
index 39a5580..db28e79 100644 (file)
@@ -46,19 +46,7 @@ extern unsigned long *xen_contiguous_bitmap;
 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
                                unsigned int address_bits,
                                dma_addr_t *dma_handle);
-
 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
-#else
-static inline int xen_create_contiguous_region(phys_addr_t pstart,
-                                              unsigned int order,
-                                              unsigned int address_bits,
-                                              dma_addr_t *dma_handle)
-{
-       return 0;
-}
-
-static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
-                                                unsigned int order) { }
 #endif
 
 #if defined(CONFIG_XEN_PV)
index 2ed30ff..762b534 100644 (file)
@@ -338,20 +338,19 @@ __setup("rootflags=", root_data_setup);
 __setup("rootfstype=", fs_names_setup);
 __setup("rootdelay=", root_delay_setup);
 
-static int __init split_fs_names(char *page, char *names)
+/* This can return zero length strings. Caller should check */
+static int __init split_fs_names(char *page, size_t size, char *names)
 {
-       int count = 0;
+       int count = 1;
        char *p = page;
 
-       strcpy(p, root_fs_names);
+       strlcpy(p, root_fs_names, size);
        while (*p++) {
-               if (p[-1] == ',')
+               if (p[-1] == ',') {
                        p[-1] = '\0';
+                       count++;
+               }
        }
-       *p = '\0';
-
-       for (p = page; *p; p += strlen(p)+1)
-               count++;
 
        return count;
 }
@@ -404,12 +403,16 @@ void __init mount_block_root(char *name, int flags)
        scnprintf(b, BDEVNAME_SIZE, "unknown-block(%u,%u)",
                  MAJOR(ROOT_DEV), MINOR(ROOT_DEV));
        if (root_fs_names)
-               num_fs = split_fs_names(fs_names, root_fs_names);
+               num_fs = split_fs_names(fs_names, PAGE_SIZE, root_fs_names);
        else
                num_fs = list_bdev_fs_names(fs_names, PAGE_SIZE);
 retry:
        for (i = 0, p = fs_names; i < num_fs; i++, p += strlen(p)+1) {
-               int err = do_mount_root(name, p, flags, root_mount_data);
+               int err;
+
+               if (!*p)
+                       continue;
+               err = do_mount_root(name, p, flags, root_mount_data);
                switch (err) {
                        case 0:
                                goto out;
@@ -543,19 +546,18 @@ static int __init mount_nodev_root(void)
        fs_names = (void *)__get_free_page(GFP_KERNEL);
        if (!fs_names)
                return -EINVAL;
-       num_fs = split_fs_names(fs_names, root_fs_names);
+       num_fs = split_fs_names(fs_names, PAGE_SIZE, root_fs_names);
 
        for (i = 0, fstype = fs_names; i < num_fs;
             i++, fstype += strlen(fstype) + 1) {
+               if (!*fstype)
+                       continue;
                if (!fs_is_nodev(fstype))
                        continue;
                err = do_mount_root(root_device_name, fstype, root_mountflags,
                                    root_mount_data);
                if (!err)
                        break;
-               if (err != -EACCES && err != -EINVAL)
-                       panic("VFS: Unable to mount root \"%s\" (%s), err=%d\n",
-                             root_device_name, fstype, err);
        }
 
        free_page((unsigned long)fs_names);
index 5c9a48d..81a79a7 100644 (file)
@@ -924,7 +924,7 @@ static void __init print_unknown_bootoptions(void)
                end += sprintf(end, " %s", *p);
 
        pr_notice("Unknown command line parameters:%s\n", unknown_options);
-       memblock_free(__pa(unknown_options), len);
+       memblock_free_ptr(unknown_options, len);
 }
 
 asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
@@ -1242,7 +1242,7 @@ trace_initcall_start_cb(void *data, initcall_t fn)
 {
        ktime_t *calltime = (ktime_t *)data;
 
-       printk(KERN_DEBUG "calling  %pS @ %i irqs_disabled() %d\n", fn, task_pid_nr(current), irqs_disabled());
+       printk(KERN_DEBUG "calling  %pS @ %i\n", fn, task_pid_nr(current));
        *calltime = ktime_get();
 }
 
@@ -1256,8 +1256,8 @@ trace_initcall_finish_cb(void *data, initcall_t fn, int ret)
        rettime = ktime_get();
        delta = ktime_sub(rettime, *calltime);
        duration = (unsigned long long) ktime_to_ns(delta) >> 10;
-       printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs, irqs_disabled() %d\n",
-                fn, ret, duration, irqs_disabled());
+       printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs\n",
+                fn, ret, duration);
 }
 
 static ktime_t initcall_calltime;
index f833238..6693daf 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -2238,7 +2238,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
                return -EINVAL;
 
        if (nsops > SEMOPM_FAST) {
-               sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL_ACCOUNT);
+               sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
                if (sops == NULL)
                        return -ENOMEM;
        }
index ca3cd9a..7b4afb7 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  * Copyright (c) 2016 Facebook
  */
index e546b18..a4b0407 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  * Copyright (c) 2016 Facebook
  */
index e8eefdf..09a3fd9 100644 (file)
@@ -179,7 +179,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
         * with build_id.
         */
        if (!user || !current || !current->mm || irq_work_busy ||
-           !mmap_read_trylock_non_owner(current->mm)) {
+           !mmap_read_trylock(current->mm)) {
                /* cannot access current->mm, fall back to ips */
                for (i = 0; i < trace_nr; i++) {
                        id_offs[i].status = BPF_STACK_BUILD_ID_IP;
@@ -204,9 +204,15 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
        }
 
        if (!work) {
-               mmap_read_unlock_non_owner(current->mm);
+               mmap_read_unlock(current->mm);
        } else {
                work->mm = current->mm;
+
+               /* The lock will be released once we're out of interrupt
+                * context. Tell lockdep that we've released it now so
+                * it doesn't complain that we forgot to release it.
+                */
+               rwsem_release(&current->mm->mmap_lock.dep_map, _RET_IP_);
                irq_work_queue(&work->irq_work);
        }
 }
index 047ac4b..e76b559 100644 (file)
@@ -9912,6 +9912,8 @@ static int check_btf_line(struct bpf_verifier_env *env,
        nr_linfo = attr->line_info_cnt;
        if (!nr_linfo)
                return 0;
+       if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
+               return -EINVAL;
 
        rec_size = attr->line_info_rec_size;
        if (rec_size < MIN_BPF_LINEINFO_SIZE ||
index 881ce14..8afa869 100644 (file)
@@ -6572,74 +6572,44 @@ int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v)
  */
 #ifdef CONFIG_SOCK_CGROUP_DATA
 
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
-
-DEFINE_SPINLOCK(cgroup_sk_update_lock);
-static bool cgroup_sk_alloc_disabled __read_mostly;
-
-void cgroup_sk_alloc_disable(void)
-{
-       if (cgroup_sk_alloc_disabled)
-               return;
-       pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n");
-       cgroup_sk_alloc_disabled = true;
-}
-
-#else
-
-#define cgroup_sk_alloc_disabled       false
-
-#endif
-
 void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
 {
-       if (cgroup_sk_alloc_disabled) {
-               skcd->no_refcnt = 1;
-               return;
-       }
-
        /* Don't associate the sock with unrelated interrupted task's cgroup. */
        if (in_interrupt())
                return;
 
        rcu_read_lock();
-
        while (true) {
                struct css_set *cset;
 
                cset = task_css_set(current);
                if (likely(cgroup_tryget(cset->dfl_cgrp))) {
-                       skcd->val = (unsigned long)cset->dfl_cgrp;
+                       skcd->cgroup = cset->dfl_cgrp;
                        cgroup_bpf_get(cset->dfl_cgrp);
                        break;
                }
                cpu_relax();
        }
-
        rcu_read_unlock();
 }
 
 void cgroup_sk_clone(struct sock_cgroup_data *skcd)
 {
-       if (skcd->val) {
-               if (skcd->no_refcnt)
-                       return;
-               /*
-                * We might be cloning a socket which is left in an empty
-                * cgroup and the cgroup might have already been rmdir'd.
-                * Don't use cgroup_get_live().
-                */
-               cgroup_get(sock_cgroup_ptr(skcd));
-               cgroup_bpf_get(sock_cgroup_ptr(skcd));
-       }
+       struct cgroup *cgrp = sock_cgroup_ptr(skcd);
+
+       /*
+        * We might be cloning a socket which is left in an empty
+        * cgroup and the cgroup might have already been rmdir'd.
+        * Don't use cgroup_get_live().
+        */
+       cgroup_get(cgrp);
+       cgroup_bpf_get(cgrp);
 }
 
 void cgroup_sk_free(struct sock_cgroup_data *skcd)
 {
        struct cgroup *cgrp = sock_cgroup_ptr(skcd);
 
-       if (skcd->no_refcnt)
-               return;
        cgroup_bpf_put(cgrp);
        cgroup_put(cgrp);
 }
index 6c90c69..95445bd 100644 (file)
@@ -567,7 +567,8 @@ static void add_dma_entry(struct dma_debug_entry *entry)
                pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
                global_disable = true;
        } else if (rc == -EEXIST) {
-               pr_err("cacheline tracking EEXIST, overlapping mappings aren't supported\n");
+               err_printk(entry->dev, entry,
+                       "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
        }
 }
 
index 7ee5284..06fec55 100644 (file)
@@ -206,7 +206,8 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
 /**
  * dma_map_sg_attrs - Map the given buffer for DMA
  * @dev:       The device for which to perform the DMA operation
- * @sg:        The sg_table object describing the buffer
+ * @sg:                The sg_table object describing the buffer
+ * @nents:     Number of entries to map
  * @dir:       DMA direction
  * @attrs:     Optional DMA attributes for the map operation
  *
index bf16395..d5a61d5 100644 (file)
@@ -171,10 +171,8 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
                if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
                        handle_signal_work(regs, ti_work);
 
-               if (ti_work & _TIF_NOTIFY_RESUME) {
+               if (ti_work & _TIF_NOTIFY_RESUME)
                        tracehook_notify_resume(regs);
-                       rseq_handle_notify_resume(NULL, regs);
-               }
 
                /* Architecture specific TIF work */
                arch_exit_to_user_mode_work(regs, ti_work);
index 744e872..0c000cb 100644 (file)
@@ -10193,7 +10193,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
                return;
 
        if (ifh->nr_file_filters) {
-               mm = get_task_mm(event->ctx->task);
+               mm = get_task_mm(task);
                if (!mm)
                        goto restart;
 
index 19e83e9..4d8fc65 100644 (file)
@@ -136,7 +136,7 @@ EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
  * Allocates and initializes an irq_domain structure.
  * Returns pointer to IRQ domain, or NULL on failure.
  */
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
                                    irq_hw_number_t hwirq_max, int direct_max,
                                    const struct irq_domain_ops *ops,
                                    void *host_data)
index 4ba1508..88191f6 100644 (file)
  * The risk of writer starvation is there, but the pathological use cases
  * which trigger it are not necessarily the typical RT workloads.
  *
+ * Fast-path orderings:
+ * The lock/unlock of readers can run in fast paths: lock and unlock are only
+ * atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
+ * semantics of rwbase_rt. Atomic ops should thus provide _acquire()
+ * and _release() (or stronger).
+ *
  * Common code shared between RT rw_semaphore and rwlock
  */
 
@@ -53,6 +59,7 @@ static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
         * set.
         */
        for (r = atomic_read(&rwb->readers); r < 0;) {
+               /* Fully-ordered if cmpxchg() succeeds, provides ACQUIRE */
                if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1)))
                        return 1;
        }
@@ -162,6 +169,8 @@ static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
        /*
         * rwb->readers can only hit 0 when a writer is waiting for the
         * active readers to leave the critical section.
+        *
+        * dec_and_test() is fully ordered, provides RELEASE.
         */
        if (unlikely(atomic_dec_and_test(&rwb->readers)))
                __rwbase_read_unlock(rwb, state);
@@ -172,7 +181,11 @@ static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
 {
        struct rt_mutex_base *rtm = &rwb->rtmutex;
 
-       atomic_add(READER_BIAS - bias, &rwb->readers);
+       /*
+        * _release() is needed in case that reader is in fast path, pairing
+        * with atomic_try_cmpxchg() in rwbase_read_trylock(), provides RELEASE
+        */
+       (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
        raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
        rwbase_rtmutex_unlock(rtm);
 }
@@ -196,6 +209,23 @@ static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
        __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
 }
 
+static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
+{
+       /* Can do without CAS because we're serialized by wait_lock. */
+       lockdep_assert_held(&rwb->rtmutex.wait_lock);
+
+       /*
+        * _acquire is needed in case the reader is in the fast path, pairing
+        * with rwbase_read_unlock(), provides ACQUIRE.
+        */
+       if (!atomic_read_acquire(&rwb->readers)) {
+               atomic_set(&rwb->readers, WRITER_BIAS);
+               return 1;
+       }
+
+       return 0;
+}
+
 static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
                                     unsigned int state)
 {
@@ -210,34 +240,30 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
        atomic_sub(READER_BIAS, &rwb->readers);
 
        raw_spin_lock_irqsave(&rtm->wait_lock, flags);
-       /*
-        * set_current_state() for rw_semaphore
-        * current_save_and_set_rtlock_wait_state() for rwlock
-        */
-       rwbase_set_and_save_current_state(state);
+       if (__rwbase_write_trylock(rwb))
+               goto out_unlock;
 
-       /* Block until all readers have left the critical section. */
-       for (; atomic_read(&rwb->readers);) {
+       rwbase_set_and_save_current_state(state);
+       for (;;) {
                /* Optimized out for rwlocks */
                if (rwbase_signal_pending_state(state, current)) {
-                       __set_current_state(TASK_RUNNING);
+                       rwbase_restore_current_state();
                        __rwbase_write_unlock(rwb, 0, flags);
                        return -EINTR;
                }
+
+               if (__rwbase_write_trylock(rwb))
+                       break;
+
                raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
+               rwbase_schedule();
+               raw_spin_lock_irqsave(&rtm->wait_lock, flags);
 
-               /*
-                * Schedule and wait for the readers to leave the critical
-                * section. The last reader leaving it wakes the waiter.
-                */
-               if (atomic_read(&rwb->readers) != 0)
-                       rwbase_schedule();
                set_current_state(state);
-               raw_spin_lock_irqsave(&rtm->wait_lock, flags);
        }
-
-       atomic_set(&rwb->readers, WRITER_BIAS);
        rwbase_restore_current_state();
+
+out_unlock:
        raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
        return 0;
 }
@@ -253,8 +279,7 @@ static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
        atomic_sub(READER_BIAS, &rwb->readers);
 
        raw_spin_lock_irqsave(&rtm->wait_lock, flags);
-       if (!atomic_read(&rwb->readers)) {
-               atomic_set(&rwb->readers, WRITER_BIAS);
+       if (__rwbase_write_trylock(rwb)) {
                raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
                return 1;
        }
index 825277e..a8d0a58 100644 (file)
@@ -1166,9 +1166,9 @@ void __init setup_log_buf(int early)
        return;
 
 err_free_descs:
-       memblock_free(__pa(new_descs), new_descs_size);
+       memblock_free_ptr(new_descs, new_descs_size);
 err_free_log_buf:
-       memblock_free(__pa(new_log_buf), new_log_buf_len);
+       memblock_free_ptr(new_log_buf, new_log_buf_len);
 }
 
 static bool __read_mostly ignore_loglevel;
index 35f7bd0..6d45ac3 100644 (file)
@@ -282,9 +282,17 @@ void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
 
        if (unlikely(t->flags & PF_EXITING))
                return;
-       ret = rseq_ip_fixup(regs);
-       if (unlikely(ret < 0))
-               goto error;
+
+       /*
+        * regs is NULL if and only if the caller is in a syscall path.  Skip
+        * fixup and leave rseq_cs as is so that rseq_sycall() will detect and
+        * kill a misbehaving userspace on debug kernels.
+        */
+       if (regs) {
+               ret = rseq_ip_fixup(regs);
+               if (unlikely(ret < 0))
+                       goto error;
+       }
        if (unlikely(rseq_update_cpu_id(t)))
                goto error;
        return;
index ee73686..643d412 100644 (file)
@@ -1404,7 +1404,8 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
                        }
                }
 
-               *newval += now;
+               if (*newval)
+                       *newval += now;
        }
 
        /*
index c221e4c..fa91f39 100644 (file)
@@ -1605,6 +1605,14 @@ static int blk_trace_remove_queue(struct request_queue *q)
        if (bt == NULL)
                return -EINVAL;
 
+       if (bt->trace_state == Blktrace_running) {
+               bt->trace_state = Blktrace_stopped;
+               spin_lock_irq(&running_trace_lock);
+               list_del_init(&bt->running_list);
+               spin_unlock_irq(&running_trace_lock);
+               relay_flush(bt->rchan);
+       }
+
        put_probe_ref();
        synchronize_rcu();
        blk_trace_free(bt);
index ed4a31e..2a9b6dc 100644 (file)
@@ -295,7 +295,7 @@ config DEBUG_INFO_DWARF4
 
 config DEBUG_INFO_DWARF5
        bool "Generate DWARF Version 5 debuginfo"
-       depends on GCC_VERSION >= 50000 || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
+       depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
        depends on !DEBUG_INFO_BTF
        help
          Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
@@ -346,7 +346,7 @@ config FRAME_WARN
        int "Warn for stack frames larger than"
        range 0 8192
        default 2048 if GCC_PLUGIN_LATENT_ENTROPY
-       default 1536 if (!64BIT && PARISC)
+       default 1536 if (!64BIT && (PARISC || XTENSA))
        default 1024 if (!64BIT && !PARISC)
        default 2048 if 64BIT
        help
index 1e2d10f..cdc842d 100644 (file)
@@ -66,6 +66,7 @@ choice
 config KASAN_GENERIC
        bool "Generic mode"
        depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
+       depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
        select SLUB_DEBUG if SLUB
        select CONSTRUCTORS
        help
@@ -86,6 +87,7 @@ config KASAN_GENERIC
 config KASAN_SW_TAGS
        bool "Software tag-based mode"
        depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
+       depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
        select SLUB_DEBUG if SLUB
        select CONSTRUCTORS
        help
index f8419cf..5ae248b 100644 (file)
@@ -792,7 +792,7 @@ void __init xbc_destroy_all(void)
        xbc_data = NULL;
        xbc_data_size = 0;
        xbc_node_num = 0;
-       memblock_free(__pa(xbc_nodes), sizeof(struct xbc_node) * XBC_NODE_MAX);
+       memblock_free_ptr(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX);
        xbc_nodes = NULL;
        brace_index = 0;
 }
index f2d50d6..755c10c 100644 (file)
@@ -1972,3 +1972,39 @@ int import_single_range(int rw, void __user *buf, size_t len,
        return 0;
 }
 EXPORT_SYMBOL(import_single_range);
+
+/**
+ * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
+ *     iov_iter_save_state() was called.
+ *
+ * @i: &struct iov_iter to restore
+ * @state: state to restore from
+ *
+ * Used after iov_iter_save_state() to bring restore @i, if operations may
+ * have advanced it.
+ *
+ * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
+ */
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
+{
+       if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
+                        !iov_iter_is_kvec(i))
+               return;
+       i->iov_offset = state->iov_offset;
+       i->count = state->count;
+       /*
+        * For the *vec iters, nr_segs + iov is constant - if we increment
+        * the vec, then we also decrement the nr_segs count. Hence we don't
+        * need to track both of these, just one is enough and we can deduct
+        * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
+        * size, so we can just increment the iov pointer as they are unionzed.
+        * ITER_BVEC _may_ be the same size on some archs, but on others it is
+        * not. Be safe and handle it separately.
+        */
+       BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
+       if (iov_iter_is_bvec(i))
+               i->bvec -= state->nr_segs - i->nr_segs;
+       else
+               i->iov -= state->nr_segs - i->nr_segs;
+       i->nr_segs = state->nr_segs;
+}
index 6ed72dc..9a72f4b 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #include <linux/packing.h>
index 2d3eb1c..ce39ce9 100644 (file)
@@ -134,4 +134,47 @@ void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
        return pci_iomap_wc_range(dev, bar, 0, maxlen);
 }
 EXPORT_SYMBOL_GPL(pci_iomap_wc);
+
+/*
+ * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
+ * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
+ * the different IOMAP ranges.
+ *
+ * But if the architecture does not use the generic iomap code, and if
+ * it has _not_ defined it's own private pci_iounmap function, we define
+ * it here.
+ *
+ * NOTE! This default implementation assumes that if the architecture
+ * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
+ * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
+ * and does not need unmapping with 'ioport_unmap()'.
+ *
+ * If you have different rules for your architecture, you need to
+ * implement your own pci_iounmap() that knows the rules for where
+ * and how IO vs MEM get mapped.
+ *
+ * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
+ * from legacy <asm-generic/io.h> header file behavior. In particular,
+ * it would seem to make sense to do the iounmap(p) for the non-IO-space
+ * case here regardless, but that's not what the old header file code
+ * did. Probably incorrectly, but this is meant to be bug-for-bug
+ * compatible.
+ */
+#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
+       uintptr_t start = (uintptr_t) PCI_IOBASE;
+       uintptr_t addr = (uintptr_t) p;
+
+       if (addr >= start && addr < start + IO_SPACE_LIMIT)
+               return;
+       iounmap(p);
+#endif
+}
+EXPORT_SYMBOL(pci_iounmap);
+
+#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
+
 #endif /* CONFIG_PCI */
index f19c4fb..2843f9b 100644 (file)
@@ -253,13 +253,12 @@ void inflate_fast(z_streamp strm, unsigned start)
 
                        sfrom = (unsigned short *)(from);
                        loops = len >> 1;
-                       do
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-                           *sout++ = *sfrom++;
-#else
-                           *sout++ = get_unaligned16(sfrom++);
-#endif
-                       while (--loops);
+                       do {
+                           if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+                               *sout++ = *sfrom++;
+                           else
+                               *sout++ = get_unaligned16(sfrom++);
+                       } while (--loops);
                        out = (unsigned char *)sout;
                        from = (unsigned char *)sfrom;
                    } else { /* dist == 1 or dist == 2 */
index 930e83b..4eddcfa 100644 (file)
@@ -20,27 +20,27 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
        ssize_t nr_integers = 0, i;
 
        question = "123";
-       answers = str_to_target_ids(question, strnlen(question, 128),
+       answers = str_to_target_ids(question, strlen(question),
                        &nr_integers);
        KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
        KUNIT_EXPECT_EQ(test, 123ul, answers[0]);
        kfree(answers);
 
        question = "123abc";
-       answers = str_to_target_ids(question, strnlen(question, 128),
+       answers = str_to_target_ids(question, strlen(question),
                        &nr_integers);
        KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
        KUNIT_EXPECT_EQ(test, 123ul, answers[0]);
        kfree(answers);
 
        question = "a123";
-       answers = str_to_target_ids(question, strnlen(question, 128),
+       answers = str_to_target_ids(question, strlen(question),
                        &nr_integers);
        KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
        kfree(answers);
 
        question = "12 35";
-       answers = str_to_target_ids(question, strnlen(question, 128),
+       answers = str_to_target_ids(question, strlen(question),
                        &nr_integers);
        KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
        for (i = 0; i < nr_integers; i++)
@@ -48,7 +48,7 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
        kfree(answers);
 
        question = "12 35 46";
-       answers = str_to_target_ids(question, strnlen(question, 128),
+       answers = str_to_target_ids(question, strlen(question),
                        &nr_integers);
        KUNIT_EXPECT_EQ(test, (ssize_t)3, nr_integers);
        for (i = 0; i < nr_integers; i++)
@@ -56,7 +56,7 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
        kfree(answers);
 
        question = "12 35 abc 46";
-       answers = str_to_target_ids(question, strnlen(question, 128),
+       answers = str_to_target_ids(question, strlen(question),
                        &nr_integers);
        KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
        for (i = 0; i < 2; i++)
@@ -64,13 +64,13 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
        kfree(answers);
 
        question = "";
-       answers = str_to_target_ids(question, strnlen(question, 128),
+       answers = str_to_target_ids(question, strlen(question),
                        &nr_integers);
        KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
        kfree(answers);
 
        question = "\n";
-       answers = str_to_target_ids(question, strnlen(question, 128),
+       answers = str_to_target_ids(question, strlen(question),
                        &nr_integers);
        KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
        kfree(answers);
index e73fe0a..fae0f81 100644 (file)
@@ -24,7 +24,9 @@ const char *migrate_reason_names[MR_TYPES] = {
        "syscall_or_cpuset",
        "mempolicy_mbind",
        "numa_misplaced",
-       "cma",
+       "contig_range",
+       "longterm_pin",
+       "demotion",
 };
 
 const struct trace_print_flags pageflag_names[] = {
index 0253381..a5716fd 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -651,10 +651,8 @@ static void remove_node_from_stable_tree(struct stable_node *stable_node)
         * from &migrate_nodes. This will verify that future list.h changes
         * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it.
         */
-#if defined(GCC_VERSION) && GCC_VERSION >= 40903
        BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
        BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
-#endif
 
        if (stable_node->head == &migrate_nodes)
                list_del(&stable_node->list);
index 0ab5a74..184dcd2 100644 (file)
@@ -472,7 +472,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
                kfree(old_array);
        else if (old_array != memblock_memory_init_regions &&
                 old_array != memblock_reserved_init_regions)
-               memblock_free(__pa(old_array), old_alloc_size);
+               memblock_free_ptr(old_array, old_alloc_size);
 
        /*
         * Reserve the new array if that comes from the memblock.  Otherwise, we
@@ -796,6 +796,20 @@ int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
 }
 
 /**
+ * memblock_free_ptr - free boot memory allocation
+ * @ptr: starting address of the  boot memory allocation
+ * @size: size of the boot memory block in bytes
+ *
+ * Free boot memory block previously allocated by memblock_alloc_xx() API.
+ * The freeing memory will not be released to the buddy allocator.
+ */
+void __init_memblock memblock_free_ptr(void *ptr, size_t size)
+{
+       if (ptr)
+               memblock_free(__pa(ptr), size);
+}
+
+/**
  * memblock_free - free boot memory block
  * @base: phys starting address of the  boot memory block
  * @size: size of the boot memory block in bytes
index b762215..6da5020 100644 (file)
@@ -106,9 +106,6 @@ static bool do_memsw_account(void)
 /* memcg and lruvec stats flushing */
 static void flush_memcg_stats_dwork(struct work_struct *w);
 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
-static void flush_memcg_stats_work(struct work_struct *w);
-static DECLARE_WORK(stats_flush_work, flush_memcg_stats_work);
-static DEFINE_PER_CPU(unsigned int, stats_flush_threshold);
 static DEFINE_SPINLOCK(stats_flush_lock);
 
 #define THRESHOLDS_EVENTS_TARGET 128
@@ -682,8 +679,6 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 
        /* Update lruvec */
        __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
-       if (!(__this_cpu_inc_return(stats_flush_threshold) % MEMCG_CHARGE_BATCH))
-               queue_work(system_unbound_wq, &stats_flush_work);
 }
 
 /**
@@ -5361,11 +5356,6 @@ static void flush_memcg_stats_dwork(struct work_struct *w)
        queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
 }
 
-static void flush_memcg_stats_work(struct work_struct *w)
-{
-       mem_cgroup_flush_stats();
-}
-
 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
index 54879c3..3e6449f 100644 (file)
@@ -306,6 +306,7 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page,
                struct vm_area_struct *vma)
 {
        unsigned long address = vma_address(page, vma);
+       unsigned long ret = 0;
        pgd_t *pgd;
        p4d_t *p4d;
        pud_t *pud;
@@ -329,11 +330,10 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page,
        if (pmd_devmap(*pmd))
                return PMD_SHIFT;
        pte = pte_offset_map(pmd, address);
-       if (!pte_present(*pte))
-               return 0;
-       if (pte_devmap(*pte))
-               return PAGE_SHIFT;
-       return 0;
+       if (pte_present(*pte) && pte_devmap(*pte))
+               ret = PAGE_SHIFT;
+       pte_unmap(pte);
+       return ret;
 }
 
 /*
@@ -1126,7 +1126,7 @@ static int page_action(struct page_state *ps, struct page *p,
  */
 static inline bool HWPoisonHandlable(struct page *page)
 {
-       return PageLRU(page) || __PageMovable(page);
+       return PageLRU(page) || __PageMovable(page) || is_free_buddy_page(page);
 }
 
 static int __get_hwpoison_page(struct page *page)
index 25fc46e..adf9b9e 100644 (file)
@@ -3403,6 +3403,7 @@ void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
        i_mmap_unlock_write(mapping);
 }
+EXPORT_SYMBOL_GPL(unmap_mapping_pages);
 
 /**
  * unmap_mapping_range - unmap the portion of all mmaps in the specified
index 8874295..b5860f4 100644 (file)
@@ -490,9 +490,9 @@ bool shmem_is_huge(struct vm_area_struct *vma,
        case SHMEM_HUGE_ALWAYS:
                return true;
        case SHMEM_HUGE_WITHIN_SIZE:
-               index = round_up(index, HPAGE_PMD_NR);
+               index = round_up(index + 1, HPAGE_PMD_NR);
                i_size = round_up(i_size_read(inode), PAGE_SIZE);
-               if (i_size >= HPAGE_PMD_SIZE && (i_size >> PAGE_SHIFT) >= index)
+               if (i_size >> PAGE_SHIFT >= index)
                        return true;
                fallthrough;
        case SHMEM_HUGE_ADVISE:
index 897200d..af3cad4 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -620,7 +620,6 @@ void lru_add_drain_cpu(int cpu)
                pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
 
        activate_page_drain(cpu);
-       invalidate_bh_lrus_cpu(cpu);
 }
 
 /**
@@ -703,6 +702,20 @@ void lru_add_drain(void)
        local_unlock(&lru_pvecs.lock);
 }
 
+/*
+ * It's called from per-cpu workqueue context in SMP case so
+ * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on
+ * the same cpu. It shouldn't be a problem in !SMP case since
+ * the core is only one and the locks will disable preemption.
+ */
+static void lru_add_and_bh_lrus_drain(void)
+{
+       local_lock(&lru_pvecs.lock);
+       lru_add_drain_cpu(smp_processor_id());
+       local_unlock(&lru_pvecs.lock);
+       invalidate_bh_lrus_cpu();
+}
+
 void lru_add_drain_cpu_zone(struct zone *zone)
 {
        local_lock(&lru_pvecs.lock);
@@ -717,7 +730,7 @@ static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
 
 static void lru_add_drain_per_cpu(struct work_struct *dummy)
 {
-       lru_add_drain();
+       lru_add_and_bh_lrus_drain();
 }
 
 /*
@@ -858,7 +871,7 @@ void lru_cache_disable(void)
         */
        __lru_add_drain_all(true);
 #else
-       lru_add_drain();
+       lru_add_and_bh_lrus_drain();
 #endif
 }
 
index 499b6b5..bacabe4 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -787,7 +787,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
                size_t *lenp, loff_t *ppos)
 {
        struct ctl_table t;
-       int new_policy;
+       int new_policy = -1;
        int ret;
 
        /*
@@ -805,7 +805,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
                t = *table;
                t.data = &new_policy;
                ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
-               if (ret)
+               if (ret || new_policy == -1)
                        return ret;
 
                mm_compute_batch(new_policy);
index d4268d8..d5b81e4 100644 (file)
@@ -352,6 +352,7 @@ void workingset_refault(struct page *page, void *shadow)
 
        inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
 
+       mem_cgroup_flush_stats();
        /*
         * Compare the distance to the existing workingset size. We
         * don't activate pages that couldn't stay resident even if
index 37b6719..414dc56 100644 (file)
@@ -53,20 +53,6 @@ struct chnl_net {
        enum caif_states state;
 };
 
-static void robust_list_del(struct list_head *delete_node)
-{
-       struct list_head *list_node;
-       struct list_head *n;
-       ASSERT_RTNL();
-       list_for_each_safe(list_node, n, &chnl_net_list) {
-               if (list_node == delete_node) {
-                       list_del(list_node);
-                       return;
-               }
-       }
-       WARN_ON(1);
-}
-
 static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
 {
        struct sk_buff *skb;
@@ -364,6 +350,7 @@ static int chnl_net_init(struct net_device *dev)
        ASSERT_RTNL();
        priv = netdev_priv(dev);
        strncpy(priv->name, dev->name, sizeof(priv->name));
+       INIT_LIST_HEAD(&priv->list_field);
        return 0;
 }
 
@@ -372,7 +359,7 @@ static void chnl_net_uninit(struct net_device *dev)
        struct chnl_net *priv;
        ASSERT_RTNL();
        priv = netdev_priv(dev);
-       robust_list_del(&priv->list_field);
+       list_del_init(&priv->list_field);
 }
 
 static const struct net_device_ops netdev_ops = {
@@ -537,7 +524,7 @@ static void __exit chnl_exit_module(void)
        rtnl_lock();
        list_for_each_safe(list_node, _tmp, &chnl_net_list) {
                dev = list_entry(list_node, struct chnl_net, list_field);
-               list_del(list_node);
+               list_del_init(list_node);
                delete_device(dev);
        }
        rtnl_unlock();
index 74fd402..7ee9fec 100644 (file)
@@ -6923,12 +6923,16 @@ EXPORT_SYMBOL(napi_disable);
  */
 void napi_enable(struct napi_struct *n)
 {
-       BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
-       smp_mb__before_atomic();
-       clear_bit(NAPI_STATE_SCHED, &n->state);
-       clear_bit(NAPI_STATE_NPSVC, &n->state);
-       if (n->dev->threaded && n->thread)
-               set_bit(NAPI_STATE_THREADED, &n->state);
+       unsigned long val, new;
+
+       do {
+               val = READ_ONCE(n->state);
+               BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
+
+               new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
+               if (n->dev->threaded && n->thread)
+                       new |= NAPIF_STATE_THREADED;
+       } while (cmpxchg(&n->state, val, new) != val);
 }
 EXPORT_SYMBOL(napi_enable);
 
index b49c57d..1a6a866 100644 (file)
@@ -71,11 +71,8 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n)
        struct update_classid_context *ctx = (void *)v;
        struct socket *sock = sock_from_file(file);
 
-       if (sock) {
-               spin_lock(&cgroup_sk_update_lock);
+       if (sock)
                sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid);
-               spin_unlock(&cgroup_sk_update_lock);
-       }
        if (--ctx->batch == 0) {
                ctx->batch = UPDATE_CLASSID_BATCH;
                return n + 1;
@@ -121,8 +118,6 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
        struct css_task_iter it;
        struct task_struct *p;
 
-       cgroup_sk_alloc_disable();
-
        cs->classid = (u32)value;
 
        css_task_iter_start(css, 0, &it);
index 99a431c..8456dfb 100644 (file)
@@ -207,8 +207,6 @@ static ssize_t write_priomap(struct kernfs_open_file *of,
        if (!dev)
                return -ENODEV;
 
-       cgroup_sk_alloc_disable();
-
        rtnl_lock();
 
        ret = netprio_set_prio(of_css(of), dev, prio);
@@ -221,12 +219,10 @@ static ssize_t write_priomap(struct kernfs_open_file *of,
 static int update_netprio(const void *v, struct file *file, unsigned n)
 {
        struct socket *sock = sock_from_file(file);
-       if (sock) {
-               spin_lock(&cgroup_sk_update_lock);
+
+       if (sock)
                sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data,
                                        (unsigned long)v);
-               spin_unlock(&cgroup_sk_update_lock);
-       }
        return 0;
 }
 
@@ -235,8 +231,6 @@ static void net_prio_attach(struct cgroup_taskset *tset)
        struct task_struct *p;
        struct cgroup_subsys_state *css;
 
-       cgroup_sk_alloc_disable();
-
        cgroup_taskset_for_each(p, css, tset) {
                void *v = (void *)(unsigned long)css->id;
 
index 62627e8..512e629 100644 (file)
@@ -3179,17 +3179,15 @@ EXPORT_SYMBOL(sock_init_data);
 
 void lock_sock_nested(struct sock *sk, int subclass)
 {
+       /* The sk_lock has mutex_lock() semantics here. */
+       mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
+
        might_sleep();
        spin_lock_bh(&sk->sk_lock.slock);
        if (sk->sk_lock.owned)
                __lock_sock(sk);
        sk->sk_lock.owned = 1;
-       spin_unlock(&sk->sk_lock.slock);
-       /*
-        * The sk_lock has mutex_lock() semantics here:
-        */
-       mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
-       local_bh_enable();
+       spin_unlock_bh(&sk->sk_lock.slock);
 }
 EXPORT_SYMBOL(lock_sock_nested);
 
@@ -3227,24 +3225,35 @@ EXPORT_SYMBOL(release_sock);
  */
 bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
 {
+       /* The sk_lock has mutex_lock() semantics here. */
+       mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+
        might_sleep();
        spin_lock_bh(&sk->sk_lock.slock);
 
-       if (!sk->sk_lock.owned)
+       if (!sk->sk_lock.owned) {
                /*
-                * Note : We must disable BH
+                * Fast path return with bottom halves disabled and
+                * sock::sk_lock.slock held.
+                *
+                * The 'mutex' is not contended and holding
+                * sock::sk_lock.slock prevents all other lockers to
+                * proceed so the corresponding unlock_sock_fast() can
+                * avoid the slow path of release_sock() completely and
+                * just release slock.
+                *
+                * From a semantical POV this is equivalent to 'acquiring'
+                * the 'mutex', hence the corresponding lockdep
+                * mutex_release() has to happen in the fast path of
+                * unlock_sock_fast().
                 */
                return false;
+       }
 
        __lock_sock(sk);
        sk->sk_lock.owned = 1;
-       spin_unlock(&sk->sk_lock.slock);
-       /*
-        * The sk_lock has mutex_lock() semantics here:
-        */
-       mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
        __acquire(&sk->sk_lock.slock);
-       local_bh_enable();
+       spin_unlock_bh(&sk->sk_lock.slock);
        return true;
 }
 EXPORT_SYMBOL(lock_sock_fast);
index c5c74a3..91e7a22 100644 (file)
@@ -94,6 +94,8 @@ struct sock *dccp_create_openreq_child(const struct sock *sk,
                newdp->dccps_role           = DCCP_ROLE_SERVER;
                newdp->dccps_hc_rx_ackvec   = NULL;
                newdp->dccps_service_list   = NULL;
+               newdp->dccps_hc_rx_ccid     = NULL;
+               newdp->dccps_hc_tx_ccid     = NULL;
                newdp->dccps_service        = dreq->dreq_service;
                newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo;
                newdp->dccps_timestamp_time = dreq->dreq_timestamp_time;
index 1dc45e4..41f36ad 100644 (file)
@@ -345,6 +345,11 @@ bool dsa_schedule_work(struct work_struct *work)
        return queue_work(dsa_owq, work);
 }
 
+void dsa_flush_workqueue(void)
+{
+       flush_workqueue(dsa_owq);
+}
+
 int dsa_devlink_param_get(struct devlink *dl, u32 id,
                          struct devlink_param_gset_ctx *ctx)
 {
index 1b2b25d..b29262e 100644 (file)
@@ -429,6 +429,7 @@ static int dsa_port_setup(struct dsa_port *dp)
 {
        struct devlink_port *dlp = &dp->devlink_port;
        bool dsa_port_link_registered = false;
+       struct dsa_switch *ds = dp->ds;
        bool dsa_port_enabled = false;
        int err = 0;
 
@@ -438,6 +439,12 @@ static int dsa_port_setup(struct dsa_port *dp)
        INIT_LIST_HEAD(&dp->fdbs);
        INIT_LIST_HEAD(&dp->mdbs);
 
+       if (ds->ops->port_setup) {
+               err = ds->ops->port_setup(ds, dp->index);
+               if (err)
+                       return err;
+       }
+
        switch (dp->type) {
        case DSA_PORT_TYPE_UNUSED:
                dsa_port_disable(dp);
@@ -480,8 +487,11 @@ static int dsa_port_setup(struct dsa_port *dp)
                dsa_port_disable(dp);
        if (err && dsa_port_link_registered)
                dsa_port_link_unregister_of(dp);
-       if (err)
+       if (err) {
+               if (ds->ops->port_teardown)
+                       ds->ops->port_teardown(ds, dp->index);
                return err;
+       }
 
        dp->setup = true;
 
@@ -533,11 +543,15 @@ static int dsa_port_devlink_setup(struct dsa_port *dp)
 static void dsa_port_teardown(struct dsa_port *dp)
 {
        struct devlink_port *dlp = &dp->devlink_port;
+       struct dsa_switch *ds = dp->ds;
        struct dsa_mac_addr *a, *tmp;
 
        if (!dp->setup)
                return;
 
+       if (ds->ops->port_teardown)
+               ds->ops->port_teardown(ds, dp->index);
+
        devlink_port_type_clear(dlp);
 
        switch (dp->type) {
@@ -581,6 +595,36 @@ static void dsa_port_devlink_teardown(struct dsa_port *dp)
        dp->devlink_port_setup = false;
 }
 
+/* Destroy the current devlink port, and create a new one which has the UNUSED
+ * flavour. At this point, any call to ds->ops->port_setup has been already
+ * balanced out by a call to ds->ops->port_teardown, so we know that any
+ * devlink port regions the driver had are now unregistered. We then call its
+ * ds->ops->port_setup again, in order for the driver to re-create them on the
+ * new devlink port.
+ */
+static int dsa_port_reinit_as_unused(struct dsa_port *dp)
+{
+       struct dsa_switch *ds = dp->ds;
+       int err;
+
+       dsa_port_devlink_teardown(dp);
+       dp->type = DSA_PORT_TYPE_UNUSED;
+       err = dsa_port_devlink_setup(dp);
+       if (err)
+               return err;
+
+       if (ds->ops->port_setup) {
+               /* On error, leave the devlink port registered,
+                * dsa_switch_teardown will clean it up later.
+                */
+               err = ds->ops->port_setup(ds, dp->index);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
 static int dsa_devlink_info_get(struct devlink *dl,
                                struct devlink_info_req *req,
                                struct netlink_ext_ack *extack)
@@ -836,7 +880,7 @@ static int dsa_switch_setup(struct dsa_switch *ds)
        devlink_params_publish(ds->devlink);
 
        if (!ds->slave_mii_bus && ds->ops->phy_read) {
-               ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+               ds->slave_mii_bus = mdiobus_alloc();
                if (!ds->slave_mii_bus) {
                        err = -ENOMEM;
                        goto teardown;
@@ -846,13 +890,16 @@ static int dsa_switch_setup(struct dsa_switch *ds)
 
                err = mdiobus_register(ds->slave_mii_bus);
                if (err < 0)
-                       goto teardown;
+                       goto free_slave_mii_bus;
        }
 
        ds->setup = true;
 
        return 0;
 
+free_slave_mii_bus:
+       if (ds->slave_mii_bus && ds->ops->phy_read)
+               mdiobus_free(ds->slave_mii_bus);
 teardown:
        if (ds->ops->teardown)
                ds->ops->teardown(ds);
@@ -877,8 +924,11 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
        if (!ds->setup)
                return;
 
-       if (ds->slave_mii_bus && ds->ops->phy_read)
+       if (ds->slave_mii_bus && ds->ops->phy_read) {
                mdiobus_unregister(ds->slave_mii_bus);
+               mdiobus_free(ds->slave_mii_bus);
+               ds->slave_mii_bus = NULL;
+       }
 
        dsa_switch_unregister_notifier(ds);
 
@@ -897,6 +947,33 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
        ds->setup = false;
 }
 
+/* First tear down the non-shared, then the shared ports. This ensures that
+ * all work items scheduled by our switchdev handlers for user ports have
+ * completed before we destroy the refcounting kept on the shared ports.
+ */
+static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
+{
+       struct dsa_port *dp;
+
+       list_for_each_entry(dp, &dst->ports, list)
+               if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
+                       dsa_port_teardown(dp);
+
+       dsa_flush_workqueue();
+
+       list_for_each_entry(dp, &dst->ports, list)
+               if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
+                       dsa_port_teardown(dp);
+}
+
+static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
+{
+       struct dsa_port *dp;
+
+       list_for_each_entry(dp, &dst->ports, list)
+               dsa_switch_teardown(dp->ds);
+}
+
 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
 {
        struct dsa_port *dp;
@@ -911,38 +988,22 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
        list_for_each_entry(dp, &dst->ports, list) {
                err = dsa_port_setup(dp);
                if (err) {
-                       dsa_port_devlink_teardown(dp);
-                       dp->type = DSA_PORT_TYPE_UNUSED;
-                       err = dsa_port_devlink_setup(dp);
+                       err = dsa_port_reinit_as_unused(dp);
                        if (err)
                                goto teardown;
-                       continue;
                }
        }
 
        return 0;
 
 teardown:
-       list_for_each_entry(dp, &dst->ports, list)
-               dsa_port_teardown(dp);
+       dsa_tree_teardown_ports(dst);
 
-       list_for_each_entry(dp, &dst->ports, list)
-               dsa_switch_teardown(dp->ds);
+       dsa_tree_teardown_switches(dst);
 
        return err;
 }
 
-static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
-{
-       struct dsa_port *dp;
-
-       list_for_each_entry(dp, &dst->ports, list)
-               dsa_port_teardown(dp);
-
-       list_for_each_entry(dp, &dst->ports, list)
-               dsa_switch_teardown(dp->ds);
-}
-
 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
 {
        struct dsa_port *dp;
@@ -1034,6 +1095,7 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
 teardown_master:
        dsa_tree_teardown_master(dst);
 teardown_switches:
+       dsa_tree_teardown_ports(dst);
        dsa_tree_teardown_switches(dst);
 teardown_cpu_ports:
        dsa_tree_teardown_cpu_ports(dst);
@@ -1052,6 +1114,8 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
 
        dsa_tree_teardown_master(dst);
 
+       dsa_tree_teardown_ports(dst);
+
        dsa_tree_teardown_switches(dst);
 
        dsa_tree_teardown_cpu_ports(dst);
@@ -1546,3 +1610,53 @@ void dsa_unregister_switch(struct dsa_switch *ds)
        mutex_unlock(&dsa2_mutex);
 }
 EXPORT_SYMBOL_GPL(dsa_unregister_switch);
+
+/* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
+ * blocking that operation from completion, due to the dev_hold taken inside
+ * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
+ * the DSA master, so that the system can reboot successfully.
+ */
+void dsa_switch_shutdown(struct dsa_switch *ds)
+{
+       struct net_device *master, *slave_dev;
+       LIST_HEAD(unregister_list);
+       struct dsa_port *dp;
+
+       mutex_lock(&dsa2_mutex);
+       rtnl_lock();
+
+       list_for_each_entry(dp, &ds->dst->ports, list) {
+               if (dp->ds != ds)
+                       continue;
+
+               if (!dsa_port_is_user(dp))
+                       continue;
+
+               master = dp->cpu_dp->master;
+               slave_dev = dp->slave;
+
+               netdev_upper_dev_unlink(master, slave_dev);
+               /* Just unlinking ourselves as uppers of the master is not
+                * sufficient. When the master net device unregisters, that will
+                * also call dev_close, which we will catch as NETDEV_GOING_DOWN
+                * and trigger a dev_close on our own devices (dsa_slave_close).
+                * In turn, that will call dev_mc_unsync on the master's net
+                * device. If the master is also a DSA switch port, this will
+                * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on
+                * its own master. Lockdep will complain about the fact that
+                * all cascaded masters have the same dsa_master_addr_list_lock_key,
+                * which it normally would not do if the cascaded masters would
+                * be in a proper upper/lower relationship, which we've just
+                * destroyed.
+                * To suppress the lockdep warnings, let's actually unregister
+                * the DSA slave interfaces too, to avoid the nonsensical
+                * multicast address list synchronization on shutdown.
+                */
+               unregister_netdevice_queue(slave_dev, &unregister_list);
+       }
+       unregister_netdevice_many(&unregister_list);
+
+       rtnl_unlock();
+       mutex_unlock(&dsa2_mutex);
+}
+EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
index 33ab7d7..a5c9bc7 100644 (file)
@@ -170,6 +170,7 @@ void dsa_tag_driver_put(const struct dsa_device_ops *ops);
 const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
 
 bool dsa_schedule_work(struct work_struct *work);
+void dsa_flush_workqueue(void);
 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
 
 static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
index 662ff53..a2bf2d8 100644 (file)
@@ -1854,13 +1854,11 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
                 * use the switch internal MDIO bus instead
                 */
                ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
-               if (ret) {
-                       netdev_err(slave_dev,
-                                  "failed to connect to port %d: %d\n",
-                                  dp->index, ret);
-                       phylink_destroy(dp->pl);
-                       return ret;
-               }
+       }
+       if (ret) {
+               netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
+                          ERR_PTR(ret));
+               phylink_destroy(dp->pl);
        }
 
        return ret;
index d37ab98..8025ed7 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright 2019 NXP Semiconductors
+/* Copyright 2019 NXP
  */
 #include <linux/dsa/ocelot.h>
 #include <soc/mscc/ocelot.h>
index 3038a25..5907293 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright 2020-2021 NXP Semiconductors
+/* Copyright 2020-2021 NXP
  *
  * An implementation of the software-defined tag_8021q.c tagger format, which
  * also preserves full functionality under a vlan_filtering bridge. It does
index 75ca4b6..9e81007 100644 (file)
@@ -1982,6 +1982,8 @@ static int replace_nexthop_grp(struct net *net, struct nexthop *old,
        rcu_assign_pointer(old->nh_grp, newg);
 
        if (newg->resilient) {
+               /* Make sure concurrent readers are not using 'oldg' anymore. */
+               synchronize_net();
                rcu_assign_pointer(oldg->res_table, tmp_table);
                rcu_assign_pointer(oldg->spare->res_table, tmp_table);
        }
@@ -3565,6 +3567,7 @@ static struct notifier_block nh_netdev_notifier = {
 };
 
 static int nexthops_dump(struct net *net, struct notifier_block *nb,
+                        enum nexthop_event_type event_type,
                         struct netlink_ext_ack *extack)
 {
        struct rb_root *root = &net->nexthop.rb_root;
@@ -3575,8 +3578,7 @@ static int nexthops_dump(struct net *net, struct notifier_block *nb,
                struct nexthop *nh;
 
                nh = rb_entry(node, struct nexthop, rb_node);
-               err = call_nexthop_notifier(nb, net, NEXTHOP_EVENT_REPLACE, nh,
-                                           extack);
+               err = call_nexthop_notifier(nb, net, event_type, nh, extack);
                if (err)
                        break;
        }
@@ -3590,7 +3592,7 @@ int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
        int err;
 
        rtnl_lock();
-       err = nexthops_dump(net, nb, extack);
+       err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
        if (err)
                goto unlock;
        err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
@@ -3603,8 +3605,17 @@ EXPORT_SYMBOL(register_nexthop_notifier);
 
 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
 {
-       return blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
-                                                 nb);
+       int err;
+
+       rtnl_lock();
+       err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
+                                                nb);
+       if (err)
+               goto unlock;
+       nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
+unlock:
+       rtnl_unlock();
+       return err;
 }
 EXPORT_SYMBOL(unregister_nexthop_notifier);
 
index 3f7bd7a..141e85e 100644 (file)
@@ -1346,7 +1346,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
        if (dup_sack && (sacked & TCPCB_RETRANS)) {
                if (tp->undo_marker && tp->undo_retrans > 0 &&
                    after(end_seq, tp->undo_marker))
-                       tp->undo_retrans--;
+                       tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount);
                if ((sacked & TCPCB_SACKED_ACKED) &&
                    before(start_seq, state->reord))
                                state->reord = start_seq;
index 0d122ed..b910035 100644 (file)
@@ -935,7 +935,7 @@ static int __init udp_tunnel_nic_init_module(void)
 {
        int err;
 
-       udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0);
+       udp_tunnel_nic_workqueue = alloc_ordered_workqueue("udp_tunnel_nic", 0);
        if (!udp_tunnel_nic_workqueue)
                return -ENOMEM;
 
index 1bec5b2..0371d2c 100644 (file)
@@ -1378,7 +1378,6 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
        int err = -ENOMEM;
        int allow_create = 1;
        int replace_required = 0;
-       int sernum = fib6_new_sernum(info->nl_net);
 
        if (info->nlh) {
                if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
@@ -1478,7 +1477,7 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
        if (!err) {
                if (rt->nh)
                        list_add(&rt->nh_list, &rt->nh->f6i_list);
-               __fib6_update_sernum_upto_root(rt, sernum);
+               __fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net));
                fib6_start_gc(info->nl_net, rt);
        }
 
index 53486b1..93271a2 100644 (file)
@@ -869,8 +869,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
        }
 
        if (tunnel->version == L2TP_HDR_VER_3 &&
-           l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+           l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
+               l2tp_session_dec_refcount(session);
                goto invalid;
+       }
 
        l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
        l2tp_session_dec_refcount(session);
index 5265525..5ca186d 100644 (file)
@@ -1083,8 +1083,10 @@ static void __net_exit mctp_routes_net_exit(struct net *net)
 {
        struct mctp_route *rt;
 
+       rcu_read_lock();
        list_for_each_entry_rcu(rt, &net->mctp.routes, list)
                mctp_route_release(rt);
+       rcu_read_unlock();
 }
 
 static struct pernet_operations mctp_net_ops = {
index 2602f13..dbcebf5 100644 (file)
@@ -1316,7 +1316,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
                        goto alloc_skb;
                }
 
-               must_collapse = (info->size_goal - skb->len > 0) &&
+               must_collapse = (info->size_goal > skb->len) &&
                                (skb_shinfo(skb)->nr_frags < sysctl_max_skb_frags);
                if (must_collapse) {
                        size_bias = skb->len;
@@ -1325,7 +1325,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
        }
 
 alloc_skb:
-       if (!must_collapse && !ssk->sk_tx_skb_cache &&
+       if (!must_collapse &&
            !mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held))
                return 0;
 
index 543365f..2a2bc64 100644 (file)
@@ -46,6 +46,8 @@
  *                                     Copyright (C) 2011, <lokec@ccs.neu.edu>
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/ethtool.h>
 #include <linux/types.h>
 #include <linux/mm.h>
index e286daf..6ec1ebe 100644 (file)
@@ -230,7 +230,8 @@ static int smc_clc_prfx_set(struct socket *clcsock,
                goto out_rel;
        }
        /* get address to which the internal TCP socket is bound */
-       kernel_getsockname(clcsock, (struct sockaddr *)&addrs);
+       if (kernel_getsockname(clcsock, (struct sockaddr *)&addrs) < 0)
+               goto out_rel;
        /* analyze IP specific data of net_device belonging to TCP socket */
        addr6 = (struct sockaddr_in6 *)&addrs;
        rcu_read_lock();
index af227b6..8280c93 100644 (file)
@@ -1474,7 +1474,9 @@ static void smc_conn_abort_work(struct work_struct *work)
                                                   abort_work);
        struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
 
+       lock_sock(&smc->sk);
        smc_conn_kill(conn, true);
+       release_sock(&smc->sk);
        sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
 }
 
index a0a27d8..ad570c2 100644 (file)
@@ -2423,7 +2423,7 @@ static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
                            u32 dport, struct sk_buff_head *xmitq)
 {
-       unsigned long time_limit = jiffies + 2;
+       unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
        struct sk_buff *skb;
        unsigned int lim;
        atomic_t *dcnt;
index eb47b9d..92345c9 100644 (file)
@@ -3073,7 +3073,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
 
                other = unix_peer(sk);
                if (other && unix_peer(other) != sk &&
-                   unix_recvq_full(other) &&
+                   unix_recvq_full_lockless(other) &&
                    unix_dgram_peer_wake_me(sk, other))
                        writable = 0;
 
index 4cce8fd..51fc23e 100644 (file)
@@ -29,7 +29,12 @@ CLANG_FLAGS  += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
 else
 CLANG_FLAGS    += -fintegrated-as
 endif
+# By default, clang only warns when it encounters an unknown warning flag or
+# certain optimization flags it knows it has not implemented.
+# Make it behave more like gcc by erroring when these flags are encountered
+# so they can be implemented or wrapped in cc-option.
 CLANG_FLAGS    += -Werror=unknown-warning-option
+CLANG_FLAGS    += -Werror=ignored-optimization-argument
 KBUILD_CFLAGS  += $(CLANG_FLAGS)
 KBUILD_AFLAGS  += $(CLANG_FLAGS)
 export CLANG_FLAGS
index 801c415..b9e94c5 100644 (file)
@@ -33,10 +33,11 @@ else
        CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
         $(call cc-param,asan-globals=1) \
         $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
-        $(call cc-param,asan-stack=$(stack_enable)) \
         $(call cc-param,asan-instrument-allocas=1)
 endif
 
+CFLAGS_KASAN += $(call cc-param,asan-stack=$(stack_enable))
+
 endif # CONFIG_KASAN_GENERIC
 
 ifdef CONFIG_KASAN_SW_TAGS
index eef56d6..48585c4 100644 (file)
@@ -13,7 +13,7 @@
 # Stage 2 is handled by this file and does the following
 # 1) Find all modules listed in modules.order
 # 2) modpost is then used to
-# 3)  create one <module>.mod.c file pr. module
+# 3)  create one <module>.mod.c file per module
 # 4)  create one Module.symvers file with CRC for all exported symbols
 
 # Step 3 is used to place certain information in the module's ELF
index b9b0f15..217d21a 100755 (executable)
@@ -34,7 +34,6 @@ REGEX_SOURCE_SYMBOL = re.compile(SOURCE_SYMBOL)
 REGEX_KCONFIG_DEF = re.compile(DEF)
 REGEX_KCONFIG_EXPR = re.compile(EXPR)
 REGEX_KCONFIG_STMT = re.compile(STMT)
-REGEX_KCONFIG_HELP = re.compile(r"^\s+help\s*$")
 REGEX_FILTER_SYMBOLS = re.compile(r"[A-Za-z0-9]$")
 REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+")
 REGEX_QUOTES = re.compile("(\"(.*?)\")")
@@ -102,6 +101,9 @@ def parse_options():
                      "continue.")
 
     if args.commit:
+        if args.commit.startswith('HEAD'):
+            sys.exit("The --commit option can't use the HEAD ref")
+
         args.find = False
 
     if args.ignore:
@@ -432,7 +434,6 @@ def parse_kconfig_file(kfile):
     lines = []
     defined = []
     references = []
-    skip = False
 
     if not os.path.exists(kfile):
         return defined, references
@@ -448,12 +449,6 @@ def parse_kconfig_file(kfile):
         if REGEX_KCONFIG_DEF.match(line):
             symbol_def = REGEX_KCONFIG_DEF.findall(line)
             defined.append(symbol_def[0])
-            skip = False
-        elif REGEX_KCONFIG_HELP.match(line):
-            skip = True
-        elif skip:
-            # ignore content of help messages
-            pass
         elif REGEX_KCONFIG_STMT.match(line):
             line = REGEX_QUOTES.sub("", line)
             symbols = get_symbols_in_line(line)
index 0033eed..1d1bde1 100755 (executable)
@@ -13,6 +13,7 @@ import logging
 import os
 import re
 import subprocess
+import sys
 
 _DEFAULT_OUTPUT = 'compile_commands.json'
 _DEFAULT_LOG_LEVEL = 'WARNING'
index 319f921..4edc708 100755 (executable)
@@ -17,13 +17,7 @@ binutils)
        echo 2.23.0
        ;;
 gcc)
-       # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293
-       # https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk
-       if [ "$SRCARCH" = arm64 ]; then
-               echo 5.1.0
-       else
-               echo 4.9.0
-       fi
+       echo 5.1.0
        ;;
 icc)
        # temporary
index f355869..6ee4fa8 100644 (file)
 #define EM_ARCV2       195
 #endif
 
+#ifndef EM_RISCV
+#define EM_RISCV       243
+#endif
+
 static uint32_t (*r)(const uint32_t *);
 static uint16_t (*r2)(const uint16_t *);
 static uint64_t (*r8)(const uint64_t *);
index 6517f22..e7ebd45 100644 (file)
@@ -2157,7 +2157,7 @@ static int selinux_ptrace_access_check(struct task_struct *child,
 static int selinux_ptrace_traceme(struct task_struct *parent)
 {
        return avc_has_perm(&selinux_state,
-                           task_sid_subj(parent), task_sid_obj(current),
+                           task_sid_obj(parent), task_sid_obj(current),
                            SECCLASS_PROCESS, PROCESS__PTRACE, NULL);
 }
 
@@ -6222,7 +6222,7 @@ static int selinux_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *m
        struct ipc_security_struct *isec;
        struct msg_security_struct *msec;
        struct common_audit_data ad;
-       u32 sid = task_sid_subj(target);
+       u32 sid = task_sid_obj(target);
        int rc;
 
        isec = selinux_ipc(msq);
index cacbe75..21a0e7c 100644 (file)
@@ -2016,7 +2016,7 @@ static int smk_curacc_on_task(struct task_struct *p, int access,
                                const char *caller)
 {
        struct smk_audit_info ad;
-       struct smack_known *skp = smk_of_task_struct_subj(p);
+       struct smack_known *skp = smk_of_task_struct_obj(p);
        int rc;
 
        smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
@@ -3480,7 +3480,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
  */
 static int smack_getprocattr(struct task_struct *p, char *name, char **value)
 {
-       struct smack_known *skp = smk_of_task_struct_subj(p);
+       struct smack_known *skp = smk_of_task_struct_obj(p);
        char *cp;
        int slen;
 
similarity index 83%
rename from tools/arch/x86/include/asm/unistd_64.h
rename to tools/arch/x86/include/uapi/asm/unistd_64.h
index 4205ed4..cb52a3a 100644 (file)
@@ -1,7 +1,4 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NR_userfaultfd
-#define __NR_userfaultfd 282
-#endif
 #ifndef __NR_perf_event_open
 # define __NR_perf_event_open 298
 #endif
index c41f958..7976994 100644 (file)
        ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
 
 #define __get_next(t, insn)    \
-       ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
+       ({ t r; memcpy(&r, insn->next_byte, sizeof(t)); insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
 
 #define __peek_nbyte_next(t, insn, n)  \
-       ({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); })
+       ({ t r; memcpy(&r, (insn)->next_byte + n, sizeof(t)); leXX_to_cpu(t, r); })
 
 #define get_next(t, insn)      \
        ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
index 7862f21..f2e506f 100644 (file)
@@ -4,9 +4,8 @@
 
 #include <stdlib.h>
 
-#define __pa(addr)     (addr)
 #define SMP_CACHE_BYTES        0
 #define memblock_alloc(size, align)    malloc(size)
-#define memblock_free(paddr, size)     free(paddr)
+#define memblock_free_ptr(paddr, size) free(paddr)
 
 #endif
index 95c072b..8816f06 100644 (file)
@@ -16,9 +16,9 @@
 # define __fallthrough __attribute__ ((fallthrough))
 #endif
 
-#if GCC_VERSION >= 40300
+#if __has_attribute(__error__)
 # define __compiletime_error(message) __attribute__((error(message)))
-#endif /* GCC_VERSION >= 40300 */
+#endif
 
 /* &a[0] degrades to a pointer: a different type from an array */
 #define __must_be_array(a)     BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
@@ -38,7 +38,3 @@
 #endif
 #define __printf(a, b) __attribute__((format(printf, a, b)))
 #define __scanf(a, b)  __attribute__((format(scanf, a, b)))
-
-#if GCC_VERSION >= 50100
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
-#endif
index 8712ff7..dcb0c1b 100644 (file)
@@ -5,12 +5,9 @@
 #include <linux/compiler.h>
 
 /*
- * In the fallback code below, we need to compute the minimum and
- * maximum values representable in a given type. These macros may also
- * be useful elsewhere, so we provide them outside the
- * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
- *
- * It would seem more obvious to do something like
+ * We need to compute the minimum and maximum values representable in a given
+ * type. These macros may also be useful elsewhere. It would seem more obvious
+ * to do something like:
  *
  * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
  * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
@@ -36,8 +33,6 @@
 #define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
 #define type_min(T) ((T)((T)-type_max(T)-(T)1))
 
-
-#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
 /*
  * For simplicity and code hygiene, the fallback code below insists on
  * a, b and *d having the same type (similar to the min() and max()
        __builtin_mul_overflow(__a, __b, __d);  \
 })
 
-#else
-
-
-/* Checking for unsigned overflow is relatively easy without causing UB. */
-#define __unsigned_add_overflow(a, b, d) ({    \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = __a + __b;                       \
-       *__d < __a;                             \
-})
-#define __unsigned_sub_overflow(a, b, d) ({    \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = __a - __b;                       \
-       __a < __b;                              \
-})
-/*
- * If one of a or b is a compile-time constant, this avoids a division.
- */
-#define __unsigned_mul_overflow(a, b, d) ({            \
-       typeof(a) __a = (a);                            \
-       typeof(b) __b = (b);                            \
-       typeof(d) __d = (d);                            \
-       (void) (&__a == &__b);                          \
-       (void) (&__a == __d);                           \
-       *__d = __a * __b;                               \
-       __builtin_constant_p(__b) ?                     \
-         __b > 0 && __a > type_max(typeof(__a)) / __b : \
-         __a > 0 && __b > type_max(typeof(__b)) / __a;  \
-})
-
-/*
- * For signed types, detecting overflow is much harder, especially if
- * we want to avoid UB. But the interface of these macros is such that
- * we must provide a result in *d, and in fact we must produce the
- * result promised by gcc's builtins, which is simply the possibly
- * wrapped-around value. Fortunately, we can just formally do the
- * operations in the widest relevant unsigned type (u64) and then
- * truncate the result - gcc is smart enough to generate the same code
- * with and without the (u64) casts.
- */
-
-/*
- * Adding two signed integers can overflow only if they have the same
- * sign, and overflow has happened iff the result has the opposite
- * sign.
- */
-#define __signed_add_overflow(a, b, d) ({      \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = (u64)__a + (u64)__b;             \
-       (((~(__a ^ __b)) & (*__d ^ __a))        \
-               & type_min(typeof(__a))) != 0;  \
-})
-
-/*
- * Subtraction is similar, except that overflow can now happen only
- * when the signs are opposite. In this case, overflow has happened if
- * the result has the opposite sign of a.
- */
-#define __signed_sub_overflow(a, b, d) ({      \
-       typeof(a) __a = (a);                    \
-       typeof(b) __b = (b);                    \
-       typeof(d) __d = (d);                    \
-       (void) (&__a == &__b);                  \
-       (void) (&__a == __d);                   \
-       *__d = (u64)__a - (u64)__b;             \
-       ((((__a ^ __b)) & (*__d ^ __a))         \
-               & type_min(typeof(__a))) != 0;  \
-})
-
-/*
- * Signed multiplication is rather hard. gcc always follows C99, so
- * division is truncated towards 0. This means that we can write the
- * overflow check like this:
- *
- * (a > 0 && (b > MAX/a || b < MIN/a)) ||
- * (a < -1 && (b > MIN/a || b < MAX/a) ||
- * (a == -1 && b == MIN)
- *
- * The redundant casts of -1 are to silence an annoying -Wtype-limits
- * (included in -Wextra) warning: When the type is u8 or u16, the
- * __b_c_e in check_mul_overflow obviously selects
- * __unsigned_mul_overflow, but unfortunately gcc still parses this
- * code and warns about the limited range of __b.
- */
-
-#define __signed_mul_overflow(a, b, d) ({                              \
-       typeof(a) __a = (a);                                            \
-       typeof(b) __b = (b);                                            \
-       typeof(d) __d = (d);                                            \
-       typeof(a) __tmax = type_max(typeof(a));                         \
-       typeof(a) __tmin = type_min(typeof(a));                         \
-       (void) (&__a == &__b);                                          \
-       (void) (&__a == __d);                                           \
-       *__d = (u64)__a * (u64)__b;                                     \
-       (__b > 0   && (__a > __tmax/__b || __a < __tmin/__b)) ||        \
-       (__b < (typeof(__b))-1  && (__a > __tmin/__b || __a < __tmax/__b)) || \
-       (__b == (typeof(__b))-1 && __a == __tmin);                      \
-})
-
-
-#define check_add_overflow(a, b, d)                                    \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_add_overflow(a, b, d),                 \
-                       __unsigned_add_overflow(a, b, d))
-
-#define check_sub_overflow(a, b, d)                                    \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_sub_overflow(a, b, d),                 \
-                       __unsigned_sub_overflow(a, b, d))
-
-#define check_mul_overflow(a, b, d)                                    \
-       __builtin_choose_expr(is_signed_type(typeof(a)),                \
-                       __signed_mul_overflow(a, b, d),                 \
-                       __unsigned_mul_overflow(a, b, d))
-
-
-#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
-
 /**
  * array_size() - Calculate size of 2-dimensional array.
  *
index d888672..8441e3e 100644 (file)
@@ -43,7 +43,7 @@ void perf_evsel__delete(struct perf_evsel *evsel)
        free(evsel);
 }
 
-#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
+#define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y))
 #define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
 
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
@@ -54,7 +54,10 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
                int cpu, thread;
                for (cpu = 0; cpu < ncpus; cpu++) {
                        for (thread = 0; thread < nthreads; thread++) {
-                               FD(evsel, cpu, thread) = -1;
+                               int *fd = FD(evsel, cpu, thread);
+
+                               if (fd)
+                                       *fd = -1;
                        }
                }
        }
@@ -80,7 +83,7 @@ sys_perf_event_open(struct perf_event_attr *attr,
 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd)
 {
        struct perf_evsel *leader = evsel->leader;
-       int fd;
+       int *fd;
 
        if (evsel == leader) {
                *group_fd = -1;
@@ -95,10 +98,10 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *grou
                return -ENOTCONN;
 
        fd = FD(leader, cpu, thread);
-       if (fd == -1)
+       if (fd == NULL || *fd == -1)
                return -EBADF;
 
-       *group_fd = fd;
+       *group_fd = *fd;
 
        return 0;
 }
@@ -138,7 +141,11 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
 
        for (cpu = 0; cpu < cpus->nr; cpu++) {
                for (thread = 0; thread < threads->nr; thread++) {
-                       int fd, group_fd;
+                       int fd, group_fd, *evsel_fd;
+
+                       evsel_fd = FD(evsel, cpu, thread);
+                       if (evsel_fd == NULL)
+                               return -EINVAL;
 
                        err = get_group_fd(evsel, cpu, thread, &group_fd);
                        if (err < 0)
@@ -151,7 +158,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
                        if (fd < 0)
                                return -errno;
 
-                       FD(evsel, cpu, thread) = fd;
+                       *evsel_fd = fd;
                }
        }
 
@@ -163,9 +170,12 @@ static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
        int thread;
 
        for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
-               if (FD(evsel, cpu, thread) >= 0)
-                       close(FD(evsel, cpu, thread));
-               FD(evsel, cpu, thread) = -1;
+               int *fd = FD(evsel, cpu, thread);
+
+               if (fd && *fd >= 0) {
+                       close(*fd);
+                       *fd = -1;
+               }
        }
 }
 
@@ -209,13 +219,12 @@ void perf_evsel__munmap(struct perf_evsel *evsel)
 
        for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
                for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
-                       int fd = FD(evsel, cpu, thread);
-                       struct perf_mmap *map = MMAP(evsel, cpu, thread);
+                       int *fd = FD(evsel, cpu, thread);
 
-                       if (fd < 0)
+                       if (fd == NULL || *fd < 0)
                                continue;
 
-                       perf_mmap__munmap(map);
+                       perf_mmap__munmap(MMAP(evsel, cpu, thread));
                }
        }
 
@@ -239,15 +248,16 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
 
        for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
                for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
-                       int fd = FD(evsel, cpu, thread);
-                       struct perf_mmap *map = MMAP(evsel, cpu, thread);
+                       int *fd = FD(evsel, cpu, thread);
+                       struct perf_mmap *map;
 
-                       if (fd < 0)
+                       if (fd == NULL || *fd < 0)
                                continue;
 
+                       map = MMAP(evsel, cpu, thread);
                        perf_mmap__init(map, NULL, false, NULL);
 
-                       ret = perf_mmap__mmap(map, &mp, fd, cpu);
+                       ret = perf_mmap__mmap(map, &mp, *fd, cpu);
                        if (ret) {
                                perf_evsel__munmap(evsel);
                                return ret;
@@ -260,7 +270,9 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
 
 void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
 {
-       if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL)
+       int *fd = FD(evsel, cpu, thread);
+
+       if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL)
                return NULL;
 
        return MMAP(evsel, cpu, thread)->base;
@@ -295,17 +307,18 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
                     struct perf_counts_values *count)
 {
        size_t size = perf_evsel__read_size(evsel);
+       int *fd = FD(evsel, cpu, thread);
 
        memset(count, 0, sizeof(*count));
 
-       if (FD(evsel, cpu, thread) < 0)
+       if (fd == NULL || *fd < 0)
                return -EINVAL;
 
        if (MMAP(evsel, cpu, thread) &&
            !perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
                return 0;
 
-       if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
+       if (readn(*fd, count->values, size) <= 0)
                return -errno;
 
        return 0;
@@ -318,8 +331,13 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
        int thread;
 
        for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
-               int fd = FD(evsel, cpu, thread),
-                   err = ioctl(fd, ioc, arg);
+               int err;
+               int *fd = FD(evsel, cpu, thread);
+
+               if (fd == NULL || *fd < 0)
+                       return -1;
+
+               err = ioctl(*fd, ioc, arg);
 
                if (err)
                        return err;
index 52152d1..7993635 100644 (file)
@@ -164,7 +164,7 @@ const char unwinding_data[n]: an array of unwinding data, consisting of the EH F
 The EH Frame header follows the Linux Standard Base (LSB) specification as described in the document at https://refspecs.linuxfoundation.org/LSB_1.3.0/gLSB/gLSB/ehframehdr.html
 
 
-The EH Frame follows the LSB specicfication as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html
+The EH Frame follows the LSB specification as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html
 
 
 NOTE: The mapped_size is generally either the same as unwind_data_size (if the unwinding data was mapped in memory by the running process) or zero (if the unwinding data is not mapped by the process). If the unwinding data was not mapped, then only the EH Frame Header will be read, which can be used to specify FP based unwinding for a function which does not have unwinding information.
index de6beed..3b6a2c8 100644 (file)
@@ -261,7 +261,7 @@ COALESCE
 User can specify how to sort offsets for cacheline.
 
 Following fields are available and governs the final
-output fields set for caheline offsets output:
+output fields set for cacheline offsets output:
 
   tid   - coalesced by process TIDs
   pid   - coalesced by process PIDs
index 184ba62..db465fa 100644 (file)
@@ -883,7 +883,7 @@ and "r" can be combined to get calls and returns.
 
 "Transactions" events correspond to the start or end of transactions. The
 'flags' field can be used in perf script to determine whether the event is a
-tranasaction start, commit or abort.
+transaction start, commit or abort.
 
 Note that "instructions", "branches" and "transactions" events depend on code
 flow packets which can be disabled by using the config term "branch=0".  Refer
index 74d7745..1b4d452 100644 (file)
@@ -44,7 +44,7 @@ COMMON OPTIONS
 
 -f::
 --force::
-       Don't complan, do it.
+       Don't complain, do it.
 
 REPORT OPTIONS
 --------------
index 5a1f681..fa4f39d 100644 (file)
@@ -54,7 +54,7 @@ all sched_wakeup events in the system:
 Traces meant to be processed using a script should be recorded with
 the above option: -a to enable system-wide collection.
 
-The format file for the sched_wakep event defines the following fields
+The format file for the sched_wakeup event defines the following fields
 (see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
 
 ----
index 0250dc6..cf4b7f4 100644 (file)
@@ -448,7 +448,7 @@ all sched_wakeup events in the system:
 Traces meant to be processed using a script should be recorded with
 the above option: -a to enable system-wide collection.
 
-The format file for the sched_wakep event defines the following fields
+The format file for the sched_wakeup event defines the following fields
 (see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
 
 ----
index 4c9310b..7e6fb7c 100644 (file)
@@ -385,7 +385,7 @@ Aggregate counts per physical processor for system-wide mode measurements.
 Print metrics or metricgroups specified in a comma separated list.
 For a group all metrics from the group are added.
 The events from the metrics are automatically measured.
-See perf list output for the possble metrics and metricgroups.
+See perf list output for the possible metrics and metricgroups.
 
 -A::
 --no-aggr::
index c6302df..a15b93f 100644 (file)
@@ -2,7 +2,7 @@ Using TopDown metrics in user space
 -----------------------------------
 
 Intel CPUs (since Sandy Bridge and Silvermont) support a TopDown
-methology to break down CPU pipeline execution into 4 bottlenecks:
+methodology to break down CPU pipeline execution into 4 bottlenecks:
 frontend bound, backend bound, bad speculation, retiring.
 
 For more details on Topdown see [1][5]
index c7c7ec0..5fc6a2a 100644 (file)
@@ -8,10 +8,10 @@
 #include <linux/coresight-pmu.h>
 #include <linux/zalloc.h>
 
-#include "../../util/auxtrace.h"
-#include "../../util/debug.h"
-#include "../../util/evlist.h"
-#include "../../util/pmu.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/debug.h"
+#include "../../../util/evlist.h"
+#include "../../../util/pmu.h"
 #include "cs-etm.h"
 #include "arm-spe.h"
 
index 515aae4..293a23b 100644 (file)
 #include <linux/zalloc.h>
 
 #include "cs-etm.h"
-#include "../../util/debug.h"
-#include "../../util/record.h"
-#include "../../util/auxtrace.h"
-#include "../../util/cpumap.h"
-#include "../../util/event.h"
-#include "../../util/evlist.h"
-#include "../../util/evsel.h"
-#include "../../util/perf_api_probe.h"
-#include "../../util/evsel_config.h"
-#include "../../util/pmu.h"
-#include "../../util/cs-etm.h"
+#include "../../../util/debug.h"
+#include "../../../util/record.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/cpumap.h"
+#include "../../../util/event.h"
+#include "../../../util/evlist.h"
+#include "../../../util/evsel.h"
+#include "../../../util/perf_api_probe.h"
+#include "../../../util/evsel_config.h"
+#include "../../../util/pmu.h"
+#include "../../../util/cs-etm.h"
 #include <internal/lib.h> // page_size
-#include "../../util/session.h"
+#include "../../../util/session.h"
 
 #include <errno.h>
 #include <stdlib.h>
index 2864e2e..2833e10 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-#include "../../util/perf_regs.h"
+#include "../../../util/perf_regs.h"
 
 const struct sample_reg sample_reg_masks[] = {
        SMPL_REG_END
index bbc297a..b8b23b9 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/string.h>
 
 #include "arm-spe.h"
-#include "../../util/pmu.h"
+#include "../../../util/pmu.h"
 
 struct perf_event_attr
 *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
index 36ba4c6..b7692cb 100644 (file)
@@ -1,8 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <elfutils/libdwfl.h>
-#include "../../util/unwind-libdw.h"
-#include "../../util/perf_regs.h"
-#include "../../util/event.h"
+#include "../../../util/unwind-libdw.h"
+#include "../../../util/perf_regs.h"
+#include "../../../util/event.h"
 
 bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
 {
index 3a55022..438906b 100644 (file)
@@ -3,8 +3,8 @@
 #include <errno.h>
 #include <libunwind.h>
 #include "perf_regs.h"
-#include "../../util/unwind.h"
-#include "../../util/debug.h"
+#include "../../../util/unwind.h"
+#include "../../../util/debug.h"
 
 int libunwind__arch_reg_id(int regnum)
 {
index eeafe97..792cd75 100644 (file)
@@ -432,7 +432,7 @@ void iostat_print_metric(struct perf_stat_config *config, struct evsel *evsel,
        u8 die = ((struct iio_root_port *)evsel->priv)->die;
        struct perf_counts_values *count = perf_counts(evsel->counts, die, 0);
 
-       if (count->run && count->ena) {
+       if (count && count->run && count->ena) {
                if (evsel->prev_raw_counts && !out->force_header) {
                        struct perf_counts_values *prev_count =
                                perf_counts(evsel->prev_raw_counts, die, 0);
index 0e824f7..6211d0b 100644 (file)
@@ -368,16 +368,6 @@ static inline int output_type(unsigned int type)
        return OUTPUT_TYPE_OTHER;
 }
 
-static inline unsigned int attr_type(unsigned int type)
-{
-       switch (type) {
-       case OUTPUT_TYPE_SYNTH:
-               return PERF_TYPE_SYNTH;
-       default:
-               return type;
-       }
-}
-
 static bool output_set_by_user(void)
 {
        int j;
@@ -556,6 +546,18 @@ static void set_print_ip_opts(struct perf_event_attr *attr)
                output[type].print_ip_opts |= EVSEL__PRINT_SRCLINE;
 }
 
+static struct evsel *find_first_output_type(struct evlist *evlist,
+                                           unsigned int type)
+{
+       struct evsel *evsel;
+
+       evlist__for_each_entry(evlist, evsel) {
+               if (output_type(evsel->core.attr.type) == (int)type)
+                       return evsel;
+       }
+       return NULL;
+}
+
 /*
  * verify all user requested events exist and the samples
  * have the expected data
@@ -567,7 +569,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
        struct evsel *evsel;
 
        for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
-               evsel = perf_session__find_first_evtype(session, attr_type(j));
+               evsel = find_first_output_type(session->evlist, j);
 
                /*
                 * even if fields is set to 0 (ie., show nothing) event must
index f6e87b7..f0ecfda 100644 (file)
@@ -2408,6 +2408,8 @@ int cmd_stat(int argc, const char **argv)
                        goto out;
                } else if (verbose)
                        iostat_list(evsel_list, &stat_config);
+               if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target))
+                       target.system_wide = true;
        }
 
        if (add_default_attributes())
index 84a0ced..f1f2965 100644 (file)
   {
     "EventCode": "0x4e010",
     "EventName": "PM_GCT_NOSLOT_IC_L3MISS",
-    "BriefDescription": "Gct empty for this thread due to icach l3 miss",
+    "BriefDescription": "Gct empty for this thread due to icache l3 miss",
     "PublicDescription": ""
   },
   {
index 9866cdd..9b4a765 100644 (file)
@@ -229,8 +229,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
                            struct thread *thread, struct state *state)
 {
        struct addr_location al;
-       unsigned char buf1[BUFSZ];
-       unsigned char buf2[BUFSZ];
+       unsigned char buf1[BUFSZ] = {0};
+       unsigned char buf2[BUFSZ] = {0};
        size_t ret_len;
        u64 objdump_addr;
        const char *objdump_name;
index a288035..c756284 100644 (file)
 /* For bsearch. We try to unwind functions in shared object. */
 #include <stdlib.h>
 
+/*
+ * The test will assert frames are on the stack but tail call optimizations lose
+ * the frame of the caller. Clang can disable this optimization on a called
+ * function but GCC currently (11/2020) lacks this attribute. The barrier is
+ * used to inhibit tail calls in these cases.
+ */
+#ifdef __has_attribute
+#if __has_attribute(disable_tail_calls)
+#define NO_TAIL_CALL_ATTRIBUTE __attribute__((disable_tail_calls))
+#define NO_TAIL_CALL_BARRIER
+#endif
+#endif
+#ifndef NO_TAIL_CALL_ATTRIBUTE
+#define NO_TAIL_CALL_ATTRIBUTE
+#define NO_TAIL_CALL_BARRIER __asm__ __volatile__("" : : : "memory");
+#endif
+
 static int mmap_handler(struct perf_tool *tool __maybe_unused,
                        union perf_event *event,
                        struct perf_sample *sample,
@@ -91,7 +108,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
        return strcmp((const char *) symbol, funcs[idx]);
 }
 
-noinline int test_dwarf_unwind__thread(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thread)
 {
        struct perf_sample sample;
        unsigned long cnt = 0;
@@ -122,7 +139,7 @@ noinline int test_dwarf_unwind__thread(struct thread *thread)
 
 static int global_unwind_retval = -INT_MAX;
 
-noinline int test_dwarf_unwind__compare(void *p1, void *p2)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__compare(void *p1, void *p2)
 {
        /* Any possible value should be 'thread' */
        struct thread *thread = *(struct thread **)p1;
@@ -141,7 +158,7 @@ noinline int test_dwarf_unwind__compare(void *p1, void *p2)
        return p1 - p2;
 }
 
-noinline int test_dwarf_unwind__krava_3(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_3(struct thread *thread)
 {
        struct thread *array[2] = {thread, thread};
        void *fp = &bsearch;
@@ -160,14 +177,22 @@ noinline int test_dwarf_unwind__krava_3(struct thread *thread)
        return global_unwind_retval;
 }
 
-noinline int test_dwarf_unwind__krava_2(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_2(struct thread *thread)
 {
-       return test_dwarf_unwind__krava_3(thread);
+       int ret;
+
+       ret =  test_dwarf_unwind__krava_3(thread);
+       NO_TAIL_CALL_BARRIER;
+       return ret;
 }
 
-noinline int test_dwarf_unwind__krava_1(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_1(struct thread *thread)
 {
-       return test_dwarf_unwind__krava_2(thread);
+       int ret;
+
+       ret =  test_dwarf_unwind__krava_2(thread);
+       NO_TAIL_CALL_BARRIER;
+       return ret;
 }
 
 int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unused)
index 781afe4..fa5bd5c 100644 (file)
@@ -757,25 +757,40 @@ void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
 }
 
 void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column,
-                           unsigned int row, bool arrow_down)
+                           unsigned int row, int diff, bool arrow_down)
 {
-       unsigned int end_row;
+       int end_row;
 
-       if (row >= browser->top_idx)
-               end_row = row - browser->top_idx;
-       else
+       if (diff <= 0)
                return;
 
        SLsmg_set_char_set(1);
 
        if (arrow_down) {
+               if (row + diff <= browser->top_idx)
+                       return;
+
+               end_row = row + diff - browser->top_idx;
                ui_browser__gotorc(browser, end_row, column - 1);
-               SLsmg_write_char(SLSMG_ULCORN_CHAR);
-               ui_browser__gotorc(browser, end_row, column);
-               SLsmg_draw_hline(2);
-               ui_browser__gotorc(browser, end_row + 1, column - 1);
                SLsmg_write_char(SLSMG_LTEE_CHAR);
+
+               while (--end_row >= 0 && end_row > (int)(row - browser->top_idx)) {
+                       ui_browser__gotorc(browser, end_row, column - 1);
+                       SLsmg_draw_vline(1);
+               }
+
+               end_row = (int)(row - browser->top_idx);
+               if (end_row >= 0) {
+                       ui_browser__gotorc(browser, end_row, column - 1);
+                       SLsmg_write_char(SLSMG_ULCORN_CHAR);
+                       ui_browser__gotorc(browser, end_row, column);
+                       SLsmg_draw_hline(2);
+               }
        } else {
+               if (row < browser->top_idx)
+                       return;
+
+               end_row = row - browser->top_idx;
                ui_browser__gotorc(browser, end_row, column - 1);
                SLsmg_write_char(SLSMG_LTEE_CHAR);
                ui_browser__gotorc(browser, end_row, column);
index 3678eb8..510ce45 100644 (file)
@@ -51,7 +51,7 @@ void ui_browser__write_graph(struct ui_browser *browser, int graph);
 void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
                              u64 start, u64 end);
 void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column,
-                           unsigned int row, bool arrow_down);
+                           unsigned int row, int diff, bool arrow_down);
 void __ui_browser__show_title(struct ui_browser *browser, const char *title);
 void ui_browser__show_title(struct ui_browser *browser, const char *title);
 int ui_browser__show(struct ui_browser *browser, const char *title,
index ef4da42..e81c249 100644 (file)
@@ -125,13 +125,20 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
                ab->selection = al;
 }
 
-static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
+static int is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
 {
        struct disasm_line *pos = list_prev_entry(cursor, al.node);
        const char *name;
+       int diff = 1;
+
+       while (pos && pos->al.offset == -1) {
+               pos = list_prev_entry(pos, al.node);
+               if (!ab->opts->hide_src_code)
+                       diff++;
+       }
 
        if (!pos)
-               return false;
+               return 0;
 
        if (ins__is_lock(&pos->ins))
                name = pos->ops.locked.ins.name;
@@ -139,9 +146,11 @@ static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
                name = pos->ins.name;
 
        if (!name || !cursor->ins.name)
-               return false;
+               return 0;
 
-       return ins__is_fused(ab->arch, name, cursor->ins.name);
+       if (ins__is_fused(ab->arch, name, cursor->ins.name))
+               return diff;
+       return 0;
 }
 
 static void annotate_browser__draw_current_jump(struct ui_browser *browser)
@@ -155,6 +164,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
        struct annotation *notes = symbol__annotation(sym);
        u8 pcnt_width = annotation__pcnt_width(notes);
        int width;
+       int diff = 0;
 
        /* PLT symbols contain external offsets */
        if (strstr(sym->name, "@plt"))
@@ -205,11 +215,11 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
                                 pcnt_width + 2 + notes->widths.addr + width,
                                 from, to);
 
-       if (is_fused(ab, cursor)) {
+       diff = is_fused(ab, cursor);
+       if (diff > 0) {
                ui_browser__mark_fused(browser,
                                       pcnt_width + 3 + notes->widths.addr + width,
-                                      from - 1,
-                                      to > from);
+                                      from - diff, diff, to > from);
        }
 }
 
index 683f6d6..1a7112a 100644 (file)
 struct btf * __weak btf__load_from_kernel_by_id(__u32 id)
 {
        struct btf *btf;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
        int err = btf__get_from_id(id, &btf);
+#pragma GCC diagnostic pop
 
        return err ? ERR_PTR(err) : btf;
 }
index 4fb5e90..60ce590 100644 (file)
@@ -801,7 +801,7 @@ int perf_config_set(struct perf_config_set *set,
                                  section->name, item->name);
                        ret = fn(key, value, data);
                        if (ret < 0) {
-                               pr_err("Error: wrong config key-value pair %s=%s\n",
+                               pr_err("Error in the given config file: wrong config key-value pair %s=%s\n",
                                       key, value);
                                /*
                                 * Can't be just a 'break', as perf_config_set__for_each_entry()
index da19be7..44e40ba 100644 (file)
@@ -2149,6 +2149,7 @@ static int add_callchain_ip(struct thread *thread,
 
        al.filtered = 0;
        al.sym = NULL;
+       al.srcline = NULL;
        if (!cpumode) {
                thread__find_cpumode_addr_location(thread, ip, &al);
        } else {
index 6836510..22722ab 100644 (file)
@@ -266,16 +266,19 @@ int test_init(struct tdescr *td)
                        td->feats_supported |= FEAT_SSBS;
                if (getauxval(AT_HWCAP) & HWCAP_SVE)
                        td->feats_supported |= FEAT_SVE;
-               if (feats_ok(td))
+               if (feats_ok(td)) {
                        fprintf(stderr,
                                "Required Features: [%s] supported\n",
                                feats_to_string(td->feats_required &
                                                td->feats_supported));
-               else
+               } else {
                        fprintf(stderr,
                                "Required Features: [%s] NOT supported\n",
                                feats_to_string(td->feats_required &
                                                ~td->feats_supported));
+                       td->result = KSFT_SKIP;
+                       return 0;
+               }
        }
 
        /* Perform test specific additional initialization */
index 0330517..f3daa44 100644 (file)
 #include <unistd.h>
 #include <ftw.h>
 
-
 #include "cgroup_helpers.h"
 
 /*
  * To avoid relying on the system setup, when setup_cgroup_env is called
- * we create a new mount namespace, and cgroup namespace. The cgroup2
- * root is mounted at CGROUP_MOUNT_PATH
- *
- * Unfortunately, most people don't have cgroupv2 enabled at this point in time.
- * It's easier to create our own mount namespace and manage it ourselves.
+ * we create a new mount namespace, and cgroup namespace. The cgroupv2
+ * root is mounted at CGROUP_MOUNT_PATH. Unfortunately, most people don't
+ * have cgroupv2 enabled at this point in time. It's easier to create our
+ * own mount namespace and manage it ourselves. We assume /mnt exists.
  *
- * We assume /mnt exists.
+ * Related cgroupv1 helpers are named *classid*(), since we only use the
+ * net_cls controller for tagging net_cls.classid. We assume the default
+ * mount under /sys/fs/cgroup/net_cls, which should be the case for the
+ * vast majority of users.
  */
 
 #define WALK_FD_LIMIT                  16
+
 #define CGROUP_MOUNT_PATH              "/mnt"
+#define CGROUP_MOUNT_DFLT              "/sys/fs/cgroup"
+#define NETCLS_MOUNT_PATH              CGROUP_MOUNT_DFLT "/net_cls"
 #define CGROUP_WORK_DIR                        "/cgroup-test-work-dir"
+
 #define format_cgroup_path(buf, path) \
        snprintf(buf, sizeof(buf), "%s%s%s", CGROUP_MOUNT_PATH, \
                 CGROUP_WORK_DIR, path)
 
+#define format_classid_path(buf)                               \
+       snprintf(buf, sizeof(buf), "%s%s", NETCLS_MOUNT_PATH,   \
+                CGROUP_WORK_DIR)
+
 /**
  * enable_all_controllers() - Enable all available cgroup v2 controllers
  *
@@ -139,8 +148,7 @@ static int nftwfunc(const char *filename, const struct stat *statptr,
        return 0;
 }
 
-
-static int join_cgroup_from_top(char *cgroup_path)
+static int join_cgroup_from_top(const char *cgroup_path)
 {
        char cgroup_procs_path[PATH_MAX + 1];
        pid_t pid = getpid();
@@ -313,3 +321,114 @@ int cgroup_setup_and_join(const char *path) {
        }
        return cg_fd;
 }
+
+/**
+ * setup_classid_environment() - Setup the cgroupv1 net_cls environment
+ *
+ * After calling this function, cleanup_classid_environment should be called
+ * once testing is complete.
+ *
+ * This function will print an error to stderr and return 1 if it is unable
+ * to setup the cgroup environment. If setup is successful, 0 is returned.
+ */
+int setup_classid_environment(void)
+{
+       char cgroup_workdir[PATH_MAX + 1];
+
+       format_classid_path(cgroup_workdir);
+
+       if (mount("tmpfs", CGROUP_MOUNT_DFLT, "tmpfs", 0, NULL) &&
+           errno != EBUSY) {
+               log_err("mount cgroup base");
+               return 1;
+       }
+
+       if (mkdir(NETCLS_MOUNT_PATH, 0777) && errno != EEXIST) {
+               log_err("mkdir cgroup net_cls");
+               return 1;
+       }
+
+       if (mount("net_cls", NETCLS_MOUNT_PATH, "cgroup", 0, "net_cls") &&
+           errno != EBUSY) {
+               log_err("mount cgroup net_cls");
+               return 1;
+       }
+
+       cleanup_classid_environment();
+
+       if (mkdir(cgroup_workdir, 0777) && errno != EEXIST) {
+               log_err("mkdir cgroup work dir");
+               return 1;
+       }
+
+       return 0;
+}
+
+/**
+ * set_classid() - Set a cgroupv1 net_cls classid
+ * @id: the numeric classid
+ *
+ * Writes the passed classid into the cgroup work dir's net_cls.classid
+ * file in order to later on trigger socket tagging.
+ *
+ * On success, it returns 0, otherwise on failure it returns 1. If there
+ * is a failure, it prints the error to stderr.
+ */
+int set_classid(unsigned int id)
+{
+       char cgroup_workdir[PATH_MAX - 42];
+       char cgroup_classid_path[PATH_MAX + 1];
+       int fd, rc = 0;
+
+       format_classid_path(cgroup_workdir);
+       snprintf(cgroup_classid_path, sizeof(cgroup_classid_path),
+                "%s/net_cls.classid", cgroup_workdir);
+
+       fd = open(cgroup_classid_path, O_WRONLY);
+       if (fd < 0) {
+               log_err("Opening cgroup classid: %s", cgroup_classid_path);
+               return 1;
+       }
+
+       if (dprintf(fd, "%u\n", id) < 0) {
+               log_err("Setting cgroup classid");
+               rc = 1;
+       }
+
+       close(fd);
+       return rc;
+}
+
+/**
+ * join_classid() - Join a cgroupv1 net_cls classid
+ *
+ * This function expects the cgroup work dir to be already created, as we
+ * join it here. This causes the process sockets to be tagged with the given
+ * net_cls classid.
+ *
+ * On success, it returns 0, otherwise on failure it returns 1.
+ */
+int join_classid(void)
+{
+       char cgroup_workdir[PATH_MAX + 1];
+
+       format_classid_path(cgroup_workdir);
+       return join_cgroup_from_top(cgroup_workdir);
+}
+
+/**
+ * cleanup_classid_environment() - Cleanup the cgroupv1 net_cls environment
+ *
+ * At call time, it moves the calling process to the root cgroup, and then
+ * runs the deletion process.
+ *
+ * On failure, it will print an error to stderr, and try to continue.
+ */
+void cleanup_classid_environment(void)
+{
+       char cgroup_workdir[PATH_MAX + 1];
+
+       format_classid_path(cgroup_workdir);
+       join_cgroup_from_top(NETCLS_MOUNT_PATH);
+       nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT);
+}
index 5fe3d88..629da38 100644 (file)
@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #ifndef __CGROUP_HELPERS_H
 #define __CGROUP_HELPERS_H
+
 #include <errno.h>
 #include <string.h>
 
@@ -8,12 +9,21 @@
 #define log_err(MSG, ...) fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
        __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__)
 
-
+/* cgroupv2 related */
 int cgroup_setup_and_join(const char *path);
 int create_and_get_cgroup(const char *path);
+unsigned long long get_cgroup_id(const char *path);
+
 int join_cgroup(const char *path);
+
 int setup_cgroup_environment(void);
 void cleanup_cgroup_environment(void);
-unsigned long long get_cgroup_id(const char *path);
 
-#endif
+/* cgroupv1 related */
+int set_classid(unsigned int id);
+int join_classid(void);
+
+int setup_classid_environment(void);
+void cleanup_classid_environment(void);
+
+#endif /* __CGROUP_HELPERS_H */
index 7e9f637..6db1af8 100644 (file)
@@ -208,11 +208,26 @@ error_close:
 
 static int connect_fd_to_addr(int fd,
                              const struct sockaddr_storage *addr,
-                             socklen_t addrlen)
+                             socklen_t addrlen, const bool must_fail)
 {
-       if (connect(fd, (const struct sockaddr *)addr, addrlen)) {
-               log_err("Failed to connect to server");
-               return -1;
+       int ret;
+
+       errno = 0;
+       ret = connect(fd, (const struct sockaddr *)addr, addrlen);
+       if (must_fail) {
+               if (!ret) {
+                       log_err("Unexpected success to connect to server");
+                       return -1;
+               }
+               if (errno != EPERM) {
+                       log_err("Unexpected error from connect to server");
+                       return -1;
+               }
+       } else {
+               if (ret) {
+                       log_err("Failed to connect to server");
+                       return -1;
+               }
        }
 
        return 0;
@@ -257,7 +272,7 @@ int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts)
                       strlen(opts->cc) + 1))
                goto error_close;
 
-       if (connect_fd_to_addr(fd, &addr, addrlen))
+       if (connect_fd_to_addr(fd, &addr, addrlen, opts->must_fail))
                goto error_close;
 
        return fd;
@@ -289,7 +304,7 @@ int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms)
                return -1;
        }
 
-       if (connect_fd_to_addr(client_fd, &addr, len))
+       if (connect_fd_to_addr(client_fd, &addr, len, false))
                return -1;
 
        return 0;
index da7e132..d198181 100644 (file)
@@ -20,6 +20,7 @@ typedef __u16 __sum16;
 struct network_helper_opts {
        const char *cc;
        int timeout_ms;
+       bool must_fail;
 };
 
 /* ipv4 test vector */
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c
new file mode 100644 (file)
index 0000000..ab3b9bc
--- /dev/null
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#include "connect4_dropper.skel.h"
+
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+
+static int run_test(int cgroup_fd, int server_fd, bool classid)
+{
+       struct network_helper_opts opts = {
+               .must_fail = true,
+       };
+       struct connect4_dropper *skel;
+       int fd, err = 0;
+
+       skel = connect4_dropper__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "skel_open"))
+               return -1;
+
+       skel->links.connect_v4_dropper =
+               bpf_program__attach_cgroup(skel->progs.connect_v4_dropper,
+                                          cgroup_fd);
+       if (!ASSERT_OK_PTR(skel->links.connect_v4_dropper, "prog_attach")) {
+               err = -1;
+               goto out;
+       }
+
+       if (classid && !ASSERT_OK(join_classid(), "join_classid")) {
+               err = -1;
+               goto out;
+       }
+
+       fd = connect_to_fd_opts(server_fd, &opts);
+       if (fd < 0)
+               err = -1;
+       else
+               close(fd);
+out:
+       connect4_dropper__destroy(skel);
+       return err;
+}
+
+void test_cgroup_v1v2(void)
+{
+       struct network_helper_opts opts = {};
+       int server_fd, client_fd, cgroup_fd;
+       static const int port = 60123;
+
+       /* Step 1: Check base connectivity works without any BPF. */
+       server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0);
+       if (!ASSERT_GE(server_fd, 0, "server_fd"))
+               return;
+       client_fd = connect_to_fd_opts(server_fd, &opts);
+       if (!ASSERT_GE(client_fd, 0, "client_fd")) {
+               close(server_fd);
+               return;
+       }
+       close(client_fd);
+       close(server_fd);
+
+       /* Step 2: Check BPF policy prog attached to cgroups drops connectivity. */
+       cgroup_fd = test__join_cgroup("/connect_dropper");
+       if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd"))
+               return;
+       server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0);
+       if (!ASSERT_GE(server_fd, 0, "server_fd")) {
+               close(cgroup_fd);
+               return;
+       }
+       ASSERT_OK(run_test(cgroup_fd, server_fd, false), "cgroup-v2-only");
+       setup_classid_environment();
+       set_classid(42);
+       ASSERT_OK(run_test(cgroup_fd, server_fd, true), "cgroup-v1v2");
+       cleanup_classid_environment();
+       close(server_fd);
+       close(cgroup_fd);
+}
index 53f0e0f..37c20b5 100644 (file)
@@ -1,7 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #define _GNU_SOURCE
 #include <test_progs.h>
-#include <linux/ptrace.h>
 #include "test_task_pt_regs.skel.h"
 
 void test_task_pt_regs(void)
diff --git a/tools/testing/selftests/bpf/progs/connect4_dropper.c b/tools/testing/selftests/bpf/progs/connect4_dropper.c
new file mode 100644 (file)
index 0000000..b565d99
--- /dev/null
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <string.h>
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+
+#include <sys/socket.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define VERDICT_REJECT 0
+#define VERDICT_PROCEED        1
+
+SEC("cgroup/connect4")
+int connect_v4_dropper(struct bpf_sock_addr *ctx)
+{
+       if (ctx->type != SOCK_STREAM)
+               return VERDICT_PROCEED;
+       if (ctx->user_port == bpf_htons(60123))
+               return VERDICT_REJECT;
+       return VERDICT_PROCEED;
+}
+
+char _license[] SEC("license") = "GPL";
index 6c059f1..e6cb092 100644 (file)
@@ -1,12 +1,17 @@
 // SPDX-License-Identifier: GPL-2.0
 
-#include <linux/ptrace.h>
-#include <linux/bpf.h>
+#include "vmlinux.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 
-struct pt_regs current_regs = {};
-struct pt_regs ctx_regs = {};
+#define PT_REGS_SIZE sizeof(struct pt_regs)
+
+/*
+ * The kernel struct pt_regs isn't exported in its entirety to userspace.
+ * Pass it as an array to task_pt_regs.c
+ */
+char current_regs[PT_REGS_SIZE] = {};
+char ctx_regs[PT_REGS_SIZE] = {};
 int uprobe_res = 0;
 
 SEC("uprobe/trigger_func")
@@ -17,8 +22,10 @@ int handle_uprobe(struct pt_regs *ctx)
 
        current = bpf_get_current_task_btf();
        regs = (struct pt_regs *) bpf_task_pt_regs(current);
-       __builtin_memcpy(&current_regs, regs, sizeof(*regs));
-       __builtin_memcpy(&ctx_regs, ctx, sizeof(*ctx));
+       if (bpf_probe_read_kernel(current_regs, PT_REGS_SIZE, regs))
+               return 0;
+       if (bpf_probe_read_kernel(ctx_regs, PT_REGS_SIZE, ctx))
+               return 0;
 
        /* Prove that uprobe was run */
        uprobe_res = 1;
index beee0d5..f7d8454 100755 (executable)
@@ -1,6 +1,6 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
-# Copyright 2020 NXP Semiconductors
+# Copyright 2020 NXP
 
 WAIT_TIME=1
 NUM_NETIFS=4
index 98053d3..b8dbabe 100644 (file)
@@ -24,6 +24,7 @@
 /x86_64/smm_test
 /x86_64/state_test
 /x86_64/svm_vmcall_test
+/x86_64/svm_int_ctl_test
 /x86_64/sync_regs_test
 /x86_64/tsc_msrs_test
 /x86_64/userspace_msr_exit_test
@@ -48,6 +49,7 @@
 /kvm_page_table_test
 /memslot_modification_stress_test
 /memslot_perf_test
+/rseq_test
 /set_memory_region_test
 /steal_time
 /kvm_binary_stats_test
index 5d05801..d1774f4 100644 (file)
@@ -56,6 +56,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/smm_test
 TEST_GEN_PROGS_x86_64 += x86_64/state_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
+TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
 TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
 TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test
@@ -80,6 +81,7 @@ TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
 TEST_GEN_PROGS_x86_64 += kvm_page_table_test
 TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test
 TEST_GEN_PROGS_x86_64 += memslot_perf_test
+TEST_GEN_PROGS_x86_64 += rseq_test
 TEST_GEN_PROGS_x86_64 += set_memory_region_test
 TEST_GEN_PROGS_x86_64 += steal_time
 TEST_GEN_PROGS_x86_64 += kvm_binary_stats_test
@@ -93,6 +95,7 @@ TEST_GEN_PROGS_aarch64 += dirty_log_test
 TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
 TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
 TEST_GEN_PROGS_aarch64 += kvm_page_table_test
+TEST_GEN_PROGS_aarch64 += rseq_test
 TEST_GEN_PROGS_aarch64 += set_memory_region_test
 TEST_GEN_PROGS_aarch64 += steal_time
 TEST_GEN_PROGS_aarch64 += kvm_binary_stats_test
@@ -104,6 +107,7 @@ TEST_GEN_PROGS_s390x += demand_paging_test
 TEST_GEN_PROGS_s390x += dirty_log_test
 TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
 TEST_GEN_PROGS_s390x += kvm_page_table_test
+TEST_GEN_PROGS_s390x += rseq_test
 TEST_GEN_PROGS_s390x += set_memory_region_test
 TEST_GEN_PROGS_s390x += kvm_binary_stats_test
 
index 71e277c..5d95113 100644 (file)
@@ -371,9 +371,7 @@ static void help(char *name)
        printf(" -v: specify the number of vCPUs to run.\n");
        printf(" -o: Overlap guest memory accesses instead of partitioning\n"
               "     them into a separate region of memory for each vCPU.\n");
-       printf(" -s: specify the type of memory that should be used to\n"
-              "     back the guest data region.\n\n");
-       backing_src_help();
+       backing_src_help("-s");
        puts("");
        exit(0);
 }
@@ -381,7 +379,7 @@ static void help(char *name)
 int main(int argc, char *argv[])
 {
        struct test_params params = {
-               .backing_src = VM_MEM_SRC_ANONYMOUS,
+               .backing_src = DEFAULT_VM_MEM_SRC,
                .vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE,
                .vcpus = 1,
        };
index e79c1b6..1510b21 100644 (file)
@@ -179,7 +179,7 @@ static void *uffd_handler_thread_fn(void *arg)
                        return NULL;
                }
 
-               if (!pollfd[0].revents & POLLIN)
+               if (!(pollfd[0].revents & POLLIN))
                        continue;
 
                r = read(uffd, &msg, sizeof(msg));
@@ -416,7 +416,7 @@ static void help(char *name)
 {
        puts("");
        printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n"
-              "          [-b memory] [-t type] [-v vcpus] [-o]\n", name);
+              "          [-b memory] [-s type] [-v vcpus] [-o]\n", name);
        guest_modes_help();
        printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n"
               "     UFFD registration mode: 'MISSING' or 'MINOR'.\n");
@@ -426,8 +426,7 @@ static void help(char *name)
        printf(" -b: specify the size of the memory region which should be\n"
               "     demand paged by each vCPU. e.g. 10M or 3G.\n"
               "     Default: 1G\n");
-       printf(" -t: The type of backing memory to use. Default: anonymous\n");
-       backing_src_help();
+       backing_src_help("-s");
        printf(" -v: specify the number of vCPUs to run.\n");
        printf(" -o: Overlap guest memory accesses instead of partitioning\n"
               "     them into a separate region of memory for each vCPU.\n");
@@ -439,14 +438,14 @@ int main(int argc, char *argv[])
 {
        int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
        struct test_params p = {
-               .src_type = VM_MEM_SRC_ANONYMOUS,
+               .src_type = DEFAULT_VM_MEM_SRC,
                .partition_vcpu_memory_access = true,
        };
        int opt;
 
        guest_modes_append_default();
 
-       while ((opt = getopt(argc, argv, "hm:u:d:b:t:v:o")) != -1) {
+       while ((opt = getopt(argc, argv, "hm:u:d:b:s:v:o")) != -1) {
                switch (opt) {
                case 'm':
                        guest_modes_cmdline(optarg);
@@ -465,7 +464,7 @@ int main(int argc, char *argv[])
                case 'b':
                        guest_percpu_mem_size = parse_size(optarg);
                        break;
-               case 't':
+               case 's':
                        p.src_type = parse_backing_src_type(optarg);
                        break;
                case 'v':
@@ -485,7 +484,7 @@ int main(int argc, char *argv[])
 
        if (p.uffd_mode == UFFDIO_REGISTER_MODE_MINOR &&
            !backing_src_is_shared(p.src_type)) {
-               TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -t");
+               TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -s");
        }
 
        for_each_guest_mode(run_test, &p);
index 4798685..7ffab5b 100644 (file)
@@ -118,42 +118,64 @@ static inline void disable_dirty_logging(struct kvm_vm *vm, int slots)
        toggle_dirty_logging(vm, slots, false);
 }
 
-static void get_dirty_log(struct kvm_vm *vm, int slots, unsigned long *bitmap,
-                         uint64_t nr_pages)
+static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
 {
-       uint64_t slot_pages = nr_pages / slots;
        int i;
 
        for (i = 0; i < slots; i++) {
                int slot = PERF_TEST_MEM_SLOT_INDEX + i;
-               unsigned long *slot_bitmap = bitmap + i * slot_pages;
 
-               kvm_vm_get_dirty_log(vm, slot, slot_bitmap);
+               kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
        }
 }
 
-static void clear_dirty_log(struct kvm_vm *vm, int slots, unsigned long *bitmap,
-                           uint64_t nr_pages)
+static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
+                           int slots, uint64_t pages_per_slot)
 {
-       uint64_t slot_pages = nr_pages / slots;
        int i;
 
        for (i = 0; i < slots; i++) {
                int slot = PERF_TEST_MEM_SLOT_INDEX + i;
-               unsigned long *slot_bitmap = bitmap + i * slot_pages;
 
-               kvm_vm_clear_dirty_log(vm, slot, slot_bitmap, 0, slot_pages);
+               kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
        }
 }
 
+static unsigned long **alloc_bitmaps(int slots, uint64_t pages_per_slot)
+{
+       unsigned long **bitmaps;
+       int i;
+
+       bitmaps = malloc(slots * sizeof(bitmaps[0]));
+       TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
+
+       for (i = 0; i < slots; i++) {
+               bitmaps[i] = bitmap_zalloc(pages_per_slot);
+               TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
+       }
+
+       return bitmaps;
+}
+
+static void free_bitmaps(unsigned long *bitmaps[], int slots)
+{
+       int i;
+
+       for (i = 0; i < slots; i++)
+               free(bitmaps[i]);
+
+       free(bitmaps);
+}
+
 static void run_test(enum vm_guest_mode mode, void *arg)
 {
        struct test_params *p = arg;
        pthread_t *vcpu_threads;
        struct kvm_vm *vm;
-       unsigned long *bmap;
+       unsigned long **bitmaps;
        uint64_t guest_num_pages;
        uint64_t host_num_pages;
+       uint64_t pages_per_slot;
        int vcpu_id;
        struct timespec start;
        struct timespec ts_diff;
@@ -171,7 +193,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
        guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
        host_num_pages = vm_num_host_pages(mode, guest_num_pages);
-       bmap = bitmap_zalloc(host_num_pages);
+       pages_per_slot = host_num_pages / p->slots;
+
+       bitmaps = alloc_bitmaps(p->slots, pages_per_slot);
 
        if (dirty_log_manual_caps) {
                cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
@@ -239,7 +263,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                        iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
 
                clock_gettime(CLOCK_MONOTONIC, &start);
-               get_dirty_log(vm, p->slots, bmap, host_num_pages);
+               get_dirty_log(vm, bitmaps, p->slots);
                ts_diff = timespec_elapsed(start);
                get_dirty_log_total = timespec_add(get_dirty_log_total,
                                                   ts_diff);
@@ -248,7 +272,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
 
                if (dirty_log_manual_caps) {
                        clock_gettime(CLOCK_MONOTONIC, &start);
-                       clear_dirty_log(vm, p->slots, bmap, host_num_pages);
+                       clear_dirty_log(vm, bitmaps, p->slots, pages_per_slot);
                        ts_diff = timespec_elapsed(start);
                        clear_dirty_log_total = timespec_add(clear_dirty_log_total,
                                                             ts_diff);
@@ -281,7 +305,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                        clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
        }
 
-       free(bmap);
+       free_bitmaps(bitmaps, p->slots);
        free(vcpu_threads);
        perf_test_destroy_vm(vm);
 }
@@ -308,11 +332,9 @@ static void help(char *name)
        printf(" -v: specify the number of vCPUs to run.\n");
        printf(" -o: Overlap guest memory accesses instead of partitioning\n"
               "     them into a separate region of memory for each vCPU.\n");
-       printf(" -s: specify the type of memory that should be used to\n"
-              "     back the guest data region.\n\n");
+       backing_src_help("-s");
        printf(" -x: Split the memory region into this number of memslots.\n"
-              "     (default: 1)");
-       backing_src_help();
+              "     (default: 1)\n");
        puts("");
        exit(0);
 }
@@ -324,7 +346,7 @@ int main(int argc, char *argv[])
                .iterations = TEST_HOST_LOOP_N,
                .wr_fract = 1,
                .partition_vcpu_memory_access = true,
-               .backing_src = VM_MEM_SRC_ANONYMOUS,
+               .backing_src = DEFAULT_VM_MEM_SRC,
                .slots = 1,
        };
        int opt;
index d79be15..f8fddc8 100644 (file)
@@ -90,18 +90,23 @@ enum vm_mem_backing_src_type {
        NUM_SRC_TYPES,
 };
 
+#define DEFAULT_VM_MEM_SRC VM_MEM_SRC_ANONYMOUS
+
 struct vm_mem_backing_src_alias {
        const char *name;
        uint32_t flag;
 };
 
+#define MIN_RUN_DELAY_NS       200000UL
+
 bool thp_configured(void);
 size_t get_trans_hugepagesz(void);
 size_t get_def_hugetlb_pagesz(void);
 const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i);
 size_t get_backing_src_pagesz(uint32_t i);
-void backing_src_help(void);
+void backing_src_help(const char *flag);
 enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);
+long get_run_delay(void);
 
 /*
  * Whether or not the given source type is shared memory (as opposed to
index 242ae8e..eba8bd0 100644 (file)
@@ -312,37 +312,37 @@ static inline void set_xmm(int n, unsigned long val)
        }
 }
 
-typedef unsigned long v1di __attribute__ ((vector_size (8)));
+#define GET_XMM(__xmm)                                                 \
+({                                                                     \
+       unsigned long __val;                                            \
+       asm volatile("movq %%"#__xmm", %0" : "=r"(__val) : : #__xmm);   \
+       __val;                                                          \
+})
+
 static inline unsigned long get_xmm(int n)
 {
        assert(n >= 0 && n <= 7);
 
-       register v1di xmm0 __asm__("%xmm0");
-       register v1di xmm1 __asm__("%xmm1");
-       register v1di xmm2 __asm__("%xmm2");
-       register v1di xmm3 __asm__("%xmm3");
-       register v1di xmm4 __asm__("%xmm4");
-       register v1di xmm5 __asm__("%xmm5");
-       register v1di xmm6 __asm__("%xmm6");
-       register v1di xmm7 __asm__("%xmm7");
        switch (n) {
        case 0:
-               return (unsigned long)xmm0;
+               return GET_XMM(xmm0);
        case 1:
-               return (unsigned long)xmm1;
+               return GET_XMM(xmm1);
        case 2:
-               return (unsigned long)xmm2;
+               return GET_XMM(xmm2);
        case 3:
-               return (unsigned long)xmm3;
+               return GET_XMM(xmm3);
        case 4:
-               return (unsigned long)xmm4;
+               return GET_XMM(xmm4);
        case 5:
-               return (unsigned long)xmm5;
+               return GET_XMM(xmm5);
        case 6:
-               return (unsigned long)xmm6;
+               return GET_XMM(xmm6);
        case 7:
-               return (unsigned long)xmm7;
+               return GET_XMM(xmm7);
        }
+
+       /* never reached */
        return 0;
 }
 
index 0d04a7d..36407cb 100644 (file)
@@ -456,10 +456,7 @@ static void help(char *name)
               "     (default: 1G)\n");
        printf(" -v: specify the number of vCPUs to run\n"
               "     (default: 1)\n");
-       printf(" -s: specify the type of memory that should be used to\n"
-              "     back the guest data region.\n"
-              "     (default: anonymous)\n\n");
-       backing_src_help();
+       backing_src_help("-s");
        puts("");
 }
 
@@ -468,7 +465,7 @@ int main(int argc, char *argv[])
        int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
        struct test_params p = {
                .test_mem_size = DEFAULT_TEST_MEM_SIZE,
-               .src_type = VM_MEM_SRC_ANONYMOUS,
+               .src_type = DEFAULT_VM_MEM_SRC,
        };
        int opt;
 
index af1031f..b724291 100644 (file)
@@ -11,6 +11,7 @@
 #include <stdlib.h>
 #include <time.h>
 #include <sys/stat.h>
+#include <sys/syscall.h>
 #include <linux/mman.h>
 #include "linux/kernel.h"
 
@@ -129,13 +130,16 @@ size_t get_trans_hugepagesz(void)
 {
        size_t size;
        FILE *f;
+       int ret;
 
        TEST_ASSERT(thp_configured(), "THP is not configured in host kernel");
 
        f = fopen("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", "r");
        TEST_ASSERT(f != NULL, "Error in opening transparent_hugepage/hpage_pmd_size");
 
-       fscanf(f, "%ld", &size);
+       ret = fscanf(f, "%ld", &size);
+       ret = fscanf(f, "%ld", &size);
+       TEST_ASSERT(ret < 1, "Error reading transparent_hugepage/hpage_pmd_size");
        fclose(f);
 
        return size;
@@ -279,13 +283,22 @@ size_t get_backing_src_pagesz(uint32_t i)
        }
 }
 
-void backing_src_help(void)
+static void print_available_backing_src_types(const char *prefix)
 {
        int i;
 
-       printf("Available backing src types:\n");
+       printf("%sAvailable backing src types:\n", prefix);
+
        for (i = 0; i < NUM_SRC_TYPES; i++)
-               printf("\t%s\n", vm_mem_backing_src_alias(i)->name);
+               printf("%s    %s\n", prefix, vm_mem_backing_src_alias(i)->name);
+}
+
+void backing_src_help(const char *flag)
+{
+       printf(" %s: specify the type of memory that should be used to\n"
+              "     back the guest data region. (default: %s)\n",
+              flag, vm_mem_backing_src_alias(DEFAULT_VM_MEM_SRC)->name);
+       print_available_backing_src_types("     ");
 }
 
 enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name)
@@ -296,7 +309,23 @@ enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name)
                if (!strcmp(type_name, vm_mem_backing_src_alias(i)->name))
                        return i;
 
-       backing_src_help();
+       print_available_backing_src_types("");
        TEST_FAIL("Unknown backing src type: %s", type_name);
        return -1;
 }
+
+long get_run_delay(void)
+{
+       char path[64];
+       long val[2];
+       FILE *fp;
+
+       sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid));
+       fp = fopen(path, "r");
+       /* Return MIN_RUN_DELAY_NS upon failure just to be safe */
+       if (fscanf(fp, "%ld %ld ", &val[0], &val[1]) < 2)
+               val[1] = MIN_RUN_DELAY_NS;
+       fclose(fp);
+
+       return val[1];
+}
diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c
new file mode 100644 (file)
index 0000000..c5e0dd6
--- /dev/null
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+#include <syscall.h>
+#include <sys/ioctl.h>
+#include <asm/barrier.h>
+#include <linux/atomic.h>
+#include <linux/rseq.h>
+#include <linux/unistd.h>
+
+#include "kvm_util.h"
+#include "processor.h"
+#include "test_util.h"
+
+#define VCPU_ID 0
+
+static __thread volatile struct rseq __rseq = {
+       .cpu_id = RSEQ_CPU_ID_UNINITIALIZED,
+};
+
+/*
+ * Use an arbitrary, bogus signature for configuring rseq, this test does not
+ * actually enter an rseq critical section.
+ */
+#define RSEQ_SIG 0xdeadbeef
+
+/*
+ * Any bug related to task migration is likely to be timing-dependent; perform
+ * a large number of migrations to reduce the odds of a false negative.
+ */
+#define NR_TASK_MIGRATIONS 100000
+
+static pthread_t migration_thread;
+static cpu_set_t possible_mask;
+static bool done;
+
+static atomic_t seq_cnt;
+
+static void guest_code(void)
+{
+       for (;;)
+               GUEST_SYNC(0);
+}
+
+static void sys_rseq(int flags)
+{
+       int r;
+
+       r = syscall(__NR_rseq, &__rseq, sizeof(__rseq), flags, RSEQ_SIG);
+       TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno));
+}
+
+static void *migration_worker(void *ign)
+{
+       cpu_set_t allowed_mask;
+       int r, i, nr_cpus, cpu;
+
+       CPU_ZERO(&allowed_mask);
+
+       nr_cpus = CPU_COUNT(&possible_mask);
+
+       for (i = 0; i < NR_TASK_MIGRATIONS; i++) {
+               cpu = i % nr_cpus;
+               if (!CPU_ISSET(cpu, &possible_mask))
+                       continue;
+
+               CPU_SET(cpu, &allowed_mask);
+
+               /*
+                * Bump the sequence count twice to allow the reader to detect
+                * that a migration may have occurred in between rseq and sched
+                * CPU ID reads.  An odd sequence count indicates a migration
+                * is in-progress, while a completely different count indicates
+                * a migration occurred since the count was last read.
+                */
+               atomic_inc(&seq_cnt);
+
+               /*
+                * Ensure the odd count is visible while sched_getcpu() isn't
+                * stable, i.e. while changing affinity is in-progress.
+                */
+               smp_wmb();
+               r = sched_setaffinity(0, sizeof(allowed_mask), &allowed_mask);
+               TEST_ASSERT(!r, "sched_setaffinity failed, errno = %d (%s)",
+                           errno, strerror(errno));
+               smp_wmb();
+               atomic_inc(&seq_cnt);
+
+               CPU_CLR(cpu, &allowed_mask);
+
+               /*
+                * Wait 1-10us before proceeding to the next iteration and more
+                * specifically, before bumping seq_cnt again.  A delay is
+                * needed on three fronts:
+                *
+                *  1. To allow sched_setaffinity() to prompt migration before
+                *     ioctl(KVM_RUN) enters the guest so that TIF_NOTIFY_RESUME
+                *     (or TIF_NEED_RESCHED, which indirectly leads to handling
+                *     NOTIFY_RESUME) is handled in KVM context.
+                *
+                *     If NOTIFY_RESUME/NEED_RESCHED is set after KVM enters
+                *     the guest, the guest will trigger a IO/MMIO exit all the
+                *     way to userspace and the TIF flags will be handled by
+                *     the generic "exit to userspace" logic, not by KVM.  The
+                *     exit to userspace is necessary to give the test a chance
+                *     to check the rseq CPU ID (see #2).
+                *
+                *     Alternatively, guest_code() could include an instruction
+                *     to trigger an exit that is handled by KVM, but any such
+                *     exit requires architecture specific code.
+                *
+                *  2. To let ioctl(KVM_RUN) make its way back to the test
+                *     before the next round of migration.  The test's check on
+                *     the rseq CPU ID must wait for migration to complete in
+                *     order to avoid false positive, thus any kernel rseq bug
+                *     will be missed if the next migration starts before the
+                *     check completes.
+                *
+                *  3. To ensure the read-side makes efficient forward progress,
+                *     e.g. if sched_getcpu() involves a syscall.  Stalling the
+                *     read-side means the test will spend more time waiting for
+                *     sched_getcpu() to stabilize and less time trying to hit
+                *     the timing-dependent bug.
+                *
+                * Because any bug in this area is likely to be timing-dependent,
+                * run with a range of delays at 1us intervals from 1us to 10us
+                * as a best effort to avoid tuning the test to the point where
+                * it can hit _only_ the original bug and not detect future
+                * regressions.
+                *
+                * The original bug can reproduce with a delay up to ~500us on
+                * x86-64, but starts to require more iterations to reproduce
+                * as the delay creeps above ~10us, and the average runtime of
+                * each iteration obviously increases as well.  Cap the delay
+                * at 10us to keep test runtime reasonable while minimizing
+                * potential coverage loss.
+                *
+                * The lower bound for reproducing the bug is likely below 1us,
+                * e.g. failures occur on x86-64 with nanosleep(0), but at that
+                * point the overhead of the syscall likely dominates the delay.
+                * Use usleep() for simplicity and to avoid unnecessary kernel
+                * dependencies.
+                */
+               usleep((i % 10) + 1);
+       }
+       done = true;
+       return NULL;
+}
+
+int main(int argc, char *argv[])
+{
+       int r, i, snapshot;
+       struct kvm_vm *vm;
+       u32 cpu, rseq_cpu;
+
+       /* Tell stdout not to buffer its content */
+       setbuf(stdout, NULL);
+
+       r = sched_getaffinity(0, sizeof(possible_mask), &possible_mask);
+       TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,
+                   strerror(errno));
+
+       if (CPU_COUNT(&possible_mask) < 2) {
+               print_skip("Only one CPU, task migration not possible\n");
+               exit(KSFT_SKIP);
+       }
+
+       sys_rseq(0);
+
+       /*
+        * Create and run a dummy VM that immediately exits to userspace via
+        * GUEST_SYNC, while concurrently migrating the process by setting its
+        * CPU affinity.
+        */
+       vm = vm_create_default(VCPU_ID, 0, guest_code);
+       ucall_init(vm, NULL);
+
+       pthread_create(&migration_thread, NULL, migration_worker, 0);
+
+       for (i = 0; !done; i++) {
+               vcpu_run(vm, VCPU_ID);
+               TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
+                           "Guest failed?");
+
+               /*
+                * Verify rseq's CPU matches sched's CPU.  Ensure migration
+                * doesn't occur between sched_getcpu() and reading the rseq
+                * cpu_id by rereading both if the sequence count changes, or
+                * if the count is odd (migration in-progress).
+                */
+               do {
+                       /*
+                        * Drop bit 0 to force a mismatch if the count is odd,
+                        * i.e. if a migration is in-progress.
+                        */
+                       snapshot = atomic_read(&seq_cnt) & ~1;
+
+                       /*
+                        * Ensure reading sched_getcpu() and rseq.cpu_id
+                        * complete in a single "no migration" window, i.e. are
+                        * not reordered across the seq_cnt reads.
+                        */
+                       smp_rmb();
+                       cpu = sched_getcpu();
+                       rseq_cpu = READ_ONCE(__rseq.cpu_id);
+                       smp_rmb();
+               } while (snapshot != atomic_read(&seq_cnt));
+
+               TEST_ASSERT(rseq_cpu == cpu,
+                           "rseq CPU = %d, sched CPU = %d\n", rseq_cpu, cpu);
+       }
+
+       /*
+        * Sanity check that the test was able to enter the guest a reasonable
+        * number of times, e.g. didn't get stalled too often/long waiting for
+        * sched_getcpu() to stabilize.  A 2:1 migration:KVM_RUN ratio is a
+        * fairly conservative ratio on x86-64, which can do _more_ KVM_RUNs
+        * than migrations given the 1us+ delay in the migration task.
+        */
+       TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2),
+                   "Only performed %d KVM_RUNs, task stalled too much?\n", i);
+
+       pthread_join(migration_thread, NULL);
+
+       kvm_vm_free(vm);
+
+       sys_rseq(RSEQ_FLAG_UNREGISTER);
+
+       return 0;
+}
index ecec308..62f2eb9 100644 (file)
@@ -10,7 +10,6 @@
 #include <sched.h>
 #include <pthread.h>
 #include <linux/kernel.h>
-#include <sys/syscall.h>
 #include <asm/kvm.h>
 #include <asm/kvm_para.h>
 
@@ -20,7 +19,6 @@
 
 #define NR_VCPUS               4
 #define ST_GPA_BASE            (1 << 30)
-#define MIN_RUN_DELAY_NS       200000UL
 
 static void *st_gva[NR_VCPUS];
 static uint64_t guest_stolen_time[NR_VCPUS];
@@ -118,12 +116,12 @@ struct st_time {
        uint64_t st_time;
 };
 
-static int64_t smccc(uint32_t func, uint32_t arg)
+static int64_t smccc(uint32_t func, uint64_t arg)
 {
        unsigned long ret;
 
        asm volatile(
-               "mov    x0, %1\n"
+               "mov    w0, %w1\n"
                "mov    x1, %2\n"
                "hvc    #0\n"
                "mov    %0, x0\n"
@@ -217,20 +215,6 @@ static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpuid)
 
 #endif
 
-static long get_run_delay(void)
-{
-       char path[64];
-       long val[2];
-       FILE *fp;
-
-       sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid));
-       fp = fopen(path, "r");
-       fscanf(fp, "%ld %ld ", &val[0], &val[1]);
-       fclose(fp);
-
-       return val[1];
-}
-
 static void *do_steal_time(void *arg)
 {
        struct timespec ts, stop;
index e6480fd..8039e1e 100644 (file)
@@ -82,7 +82,8 @@ int get_warnings_count(void)
        FILE *f;
 
        f = popen("dmesg | grep \"WARNING:\" | wc -l", "r");
-       fscanf(f, "%d", &warnings);
+       if (fscanf(f, "%d", &warnings) < 1)
+               warnings = 0;
        fclose(f);
 
        return warnings;
diff --git a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
new file mode 100644 (file)
index 0000000..df04f56
--- /dev/null
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * svm_int_ctl_test
+ *
+ * Copyright (C) 2021, Red Hat, Inc.
+ *
+ * Nested SVM testing: test simultaneous use of V_IRQ from L1 and L0.
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+#include "apic.h"
+
+#define VCPU_ID                0
+
+static struct kvm_vm *vm;
+
+bool vintr_irq_called;
+bool intr_irq_called;
+
+#define VINTR_IRQ_NUMBER 0x20
+#define INTR_IRQ_NUMBER 0x30
+
+static void vintr_irq_handler(struct ex_regs *regs)
+{
+       vintr_irq_called = true;
+}
+
+static void intr_irq_handler(struct ex_regs *regs)
+{
+       x2apic_write_reg(APIC_EOI, 0x00);
+       intr_irq_called = true;
+}
+
+static void l2_guest_code(struct svm_test_data *svm)
+{
+       /* This code raises interrupt INTR_IRQ_NUMBER in the L1's LAPIC,
+        * and since L1 didn't enable virtual interrupt masking,
+        * L2 should receive it and not L1.
+        *
+        * L2 also has virtual interrupt 'VINTR_IRQ_NUMBER' pending in V_IRQ
+        * so it should also receive it after the following 'sti'.
+        */
+       x2apic_write_reg(APIC_ICR,
+               APIC_DEST_SELF | APIC_INT_ASSERT | INTR_IRQ_NUMBER);
+
+       __asm__ __volatile__(
+               "sti\n"
+               "nop\n"
+       );
+
+       GUEST_ASSERT(vintr_irq_called);
+       GUEST_ASSERT(intr_irq_called);
+
+       __asm__ __volatile__(
+               "vmcall\n"
+       );
+}
+
+static void l1_guest_code(struct svm_test_data *svm)
+{
+       #define L2_GUEST_STACK_SIZE 64
+       unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+       struct vmcb *vmcb = svm->vmcb;
+
+       x2apic_enable();
+
+       /* Prepare for L2 execution. */
+       generic_svm_setup(svm, l2_guest_code,
+                         &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+       /* No virtual interrupt masking */
+       vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
+
+       /* No intercepts for real and virtual interrupts */
+       vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR | INTERCEPT_VINTR);
+
+       /* Make a virtual interrupt VINTR_IRQ_NUMBER pending */
+       vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT);
+       vmcb->control.int_vector = VINTR_IRQ_NUMBER;
+
+       run_guest(vmcb, svm->vmcb_gpa);
+       GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
+       GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+       vm_vaddr_t svm_gva;
+
+       nested_svm_check_supported();
+
+       vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+
+       vm_init_descriptor_tables(vm);
+       vcpu_init_descriptor_tables(vm, VCPU_ID);
+
+       vm_install_exception_handler(vm, VINTR_IRQ_NUMBER, vintr_irq_handler);
+       vm_install_exception_handler(vm, INTR_IRQ_NUMBER, intr_irq_handler);
+
+       vcpu_alloc_svm(vm, &svm_gva);
+       vcpu_args_set(vm, VCPU_ID, 1, svm_gva);
+
+       struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+       struct ucall uc;
+
+       vcpu_run(vm, VCPU_ID);
+       TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                   "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
+                   run->exit_reason,
+                   exit_reason_str(run->exit_reason));
+
+       switch (get_ucall(vm, VCPU_ID, &uc)) {
+       case UCALL_ABORT:
+               TEST_FAIL("%s", (const char *)uc.args[0]);
+               break;
+               /* NOT REACHED */
+       case UCALL_DONE:
+               goto done;
+       default:
+               TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
+       }
+done:
+       kvm_vm_free(vm);
+       return 0;
+}
index 117bf49..eda0d2a 100644 (file)
@@ -14,7 +14,6 @@
 #include <stdint.h>
 #include <time.h>
 #include <sched.h>
-#include <sys/syscall.h>
 
 #define VCPU_ID                5
 
@@ -98,20 +97,6 @@ static void guest_code(void)
        GUEST_DONE();
 }
 
-static long get_run_delay(void)
-{
-        char path[64];
-        long val[2];
-        FILE *fp;
-
-        sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid));
-        fp = fopen(path, "r");
-        fscanf(fp, "%ld %ld ", &val[0], &val[1]);
-        fclose(fp);
-
-        return val[1];
-}
-
 static int cmp_timespec(struct timespec *a, struct timespec *b)
 {
        if (a->tv_sec > b->tv_sec)
index fa2ac0e..fe7ee2b 100644 (file)
@@ -48,6 +48,7 @@ ARCH          ?= $(SUBARCH)
 # When local build is done, headers are installed in the default
 # INSTALL_HDR_PATH usr/include.
 .PHONY: khdr
+.NOTPARALLEL:
 khdr:
 ifndef KSFT_KHDR_INSTALL_DONE
 ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
index e1bf55d..162c41e 100644 (file)
@@ -746,7 +746,7 @@ int read_write_nci_cmd(int nfc_sock, int virtual_fd, const __u8 *cmd, __u32 cmd_
                       const __u8 *rsp, __u32 rsp_len)
 {
        char buf[256];
-       unsigned int len;
+       int len;
 
        send(nfc_sock, &cmd[3], cmd_len - 3, 0);
        len = read(virtual_fd, buf, cmd_len);
index cfc7f4f..df34164 100644 (file)
@@ -1,5 +1,2 @@
-##TEST_GEN_FILES := test_unix_oob
-TEST_PROGS := test_unix_oob
+TEST_GEN_PROGS := test_unix_oob
 include ../../lib.mk
-
-all: $(TEST_PROGS)
index 0f3e376..3dece8b 100644 (file)
@@ -271,8 +271,9 @@ main(int argc, char **argv)
        read_oob(pfd, &oob);
 
        if (!signal_recvd || len != 127 || oob != '%' || atmark != 1) {
-               fprintf(stderr, "Test 3 failed, sigurg %d len %d OOB %c ",
-               "atmark %d\n", signal_recvd, len, oob, atmark);
+               fprintf(stderr,
+                       "Test 3 failed, sigurg %d len %d OOB %c atmark %d\n",
+                       signal_recvd, len, oob, atmark);
                die(1);
        }
 
index 4254ddc..1ef9e41 100755 (executable)
@@ -45,7 +45,7 @@ altnames_test()
        check_err $? "Got unexpected long alternative name from link show JSON"
 
        ip link property del $DUMMY_DEV altname $SHORT_NAME
-       check_err $? "Failed to add short alternative name"
+       check_err $? "Failed to delete short alternative name"
 
        ip -j -p link show $SHORT_NAME &>/dev/null
        check_fail $? "Unexpected success while trying to do link show with deleted short alternative name"
index bd1ca25..aed632d 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#include <ppc-asm.h>
+#include <basic_asm.h>
 #include <asm/unistd.h>
 
        .text
@@ -26,3 +26,38 @@ FUNC_START(getppid_tm_suspended)
 1:
        li      r3, -1
        blr
+
+
+.macro scv level
+       .long (0x44000001 | (\level) << 5)
+.endm
+
+FUNC_START(getppid_scv_tm_active)
+       PUSH_BASIC_STACK(0)
+       tbegin.
+       beq 1f
+       li      r0, __NR_getppid
+       scv     0
+       tend.
+       POP_BASIC_STACK(0)
+       blr
+1:
+       li      r3, -1
+       POP_BASIC_STACK(0)
+       blr
+
+FUNC_START(getppid_scv_tm_suspended)
+       PUSH_BASIC_STACK(0)
+       tbegin.
+       beq 1f
+       li      r0, __NR_getppid
+       tsuspend.
+       scv     0
+       tresume.
+       tend.
+       POP_BASIC_STACK(0)
+       blr
+1:
+       li      r3, -1
+       POP_BASIC_STACK(0)
+       blr
index 467a6b3..b763354 100644 (file)
 #include "utils.h"
 #include "tm.h"
 
+#ifndef PPC_FEATURE2_SCV
+#define PPC_FEATURE2_SCV               0x00100000 /* scv syscall */
+#endif
+
 extern int getppid_tm_active(void);
 extern int getppid_tm_suspended(void);
+extern int getppid_scv_tm_active(void);
+extern int getppid_scv_tm_suspended(void);
 
 unsigned retries = 0;
 
 #define TEST_DURATION 10 /* seconds */
 
-pid_t getppid_tm(bool suspend)
+pid_t getppid_tm(bool scv, bool suspend)
 {
        int i;
        pid_t pid;
 
        for (i = 0; i < TM_RETRIES; i++) {
-               if (suspend)
-                       pid = getppid_tm_suspended();
-               else
-                       pid = getppid_tm_active();
+               if (suspend) {
+                       if (scv)
+                               pid = getppid_scv_tm_suspended();
+                       else
+                               pid = getppid_tm_suspended();
+               } else {
+                       if (scv)
+                               pid = getppid_scv_tm_active();
+                       else
+                               pid = getppid_tm_active();
+               }
 
                if (pid >= 0)
                        return pid;
@@ -82,15 +95,24 @@ int tm_syscall(void)
                 * Test a syscall within a suspended transaction and verify
                 * that it succeeds.
                 */
-               FAIL_IF(getppid_tm(true) == -1); /* Should succeed. */
+               FAIL_IF(getppid_tm(false, true) == -1); /* Should succeed. */
 
                /*
                 * Test a syscall within an active transaction and verify that
                 * it fails with the correct failure code.
                 */
-               FAIL_IF(getppid_tm(false) != -1);  /* Should fail... */
+               FAIL_IF(getppid_tm(false, false) != -1);  /* Should fail... */
                FAIL_IF(!failure_is_persistent()); /* ...persistently... */
                FAIL_IF(!failure_is_syscall());    /* ...with code syscall. */
+
+               /* Now do it all again with scv if it is available. */
+               if (have_hwcap2(PPC_FEATURE2_SCV)) {
+                       FAIL_IF(getppid_tm(true, true) == -1); /* Should succeed. */
+                       FAIL_IF(getppid_tm(true, false) != -1);  /* Should fail... */
+                       FAIL_IF(!failure_is_persistent()); /* ...persistently... */
+                       FAIL_IF(!failure_is_syscall());    /* ...with code syscall. */
+               }
+
                gettimeofday(&now, 0);
        }
 
index ee8208b..69c3ead 100644 (file)
@@ -265,12 +265,6 @@ nomem:
        }
 
        entry->ifnum = ifnum;
-
-       /* FIXME update USBDEVFS_CONNECTINFO so it tells about high speed etc */
-
-       fprintf(stderr, "%s speed\t%s\t%u\n",
-               speed(entry->speed), entry->name, entry->ifnum);
-
        entry->next = testdevs;
        testdevs = entry;
        return 0;
@@ -299,6 +293,14 @@ static void *handle_testdev (void *arg)
                return 0;
        }
 
+       status  =  ioctl(fd, USBDEVFS_GET_SPEED, NULL);
+       if (status < 0)
+               fprintf(stderr, "USBDEVFS_GET_SPEED failed %d\n", status);
+       else
+               dev->speed = status;
+       fprintf(stderr, "%s speed\t%s\t%u\n",
+                       speed(dev->speed), dev->name, dev->ifnum);
+
 restart:
        for (i = 0; i < TEST_CASES; i++) {
                if (dev->test != -1 && dev->test != i)
index 0517c74..f62f10c 100644 (file)
@@ -1331,7 +1331,7 @@ int main(int argc, char *argv[])
        if (opt_list && opt_list_mapcnt)
                kpagecount_fd = checked_open(PROC_KPAGECOUNT, O_RDONLY);
 
-       if (opt_mark_idle && opt_file)
+       if (opt_mark_idle)
                page_idle_fd = checked_open(SYS_KERNEL_MM_PAGE_IDLE, O_RDWR);
 
        if (opt_list && opt_pid)
index 439d3b4..7851f3a 100644 (file)
@@ -235,9 +235,13 @@ static void ack_flush(void *_completed)
 {
 }
 
-static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
+static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait)
 {
-       if (unlikely(!cpus))
+       const struct cpumask *cpus;
+
+       if (likely(cpumask_available(tmp)))
+               cpus = tmp;
+       else
                cpus = cpu_online_mask;
 
        if (cpumask_empty(cpus))
@@ -263,14 +267,34 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
                        continue;
 
                kvm_make_request(req, vcpu);
-               cpu = vcpu->cpu;
 
                if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
                        continue;
 
-               if (tmp != NULL && cpu != -1 && cpu != me &&
-                   kvm_request_needs_ipi(vcpu, req))
-                       __cpumask_set_cpu(cpu, tmp);
+               /*
+                * tmp can be "unavailable" if cpumasks are allocated off stack
+                * as allocation of the mask is deliberately not fatal and is
+                * handled by falling back to kicking all online CPUs.
+                */
+               if (!cpumask_available(tmp))
+                       continue;
+
+               /*
+                * Note, the vCPU could get migrated to a different pCPU at any
+                * point after kvm_request_needs_ipi(), which could result in
+                * sending an IPI to the previous pCPU.  But, that's ok because
+                * the purpose of the IPI is to ensure the vCPU returns to
+                * OUTSIDE_GUEST_MODE, which is satisfied if the vCPU migrates.
+                * Entering READING_SHADOW_PAGE_TABLES after this point is also
+                * ok, as the requirement is only that KVM wait for vCPUs that
+                * were reading SPTEs _before_ any changes were finalized.  See
+                * kvm_vcpu_kick() for more details on handling requests.
+                */
+               if (kvm_request_needs_ipi(vcpu, req)) {
+                       cpu = READ_ONCE(vcpu->cpu);
+                       if (cpu != -1 && cpu != me)
+                               __cpumask_set_cpu(cpu, tmp);
+               }
        }
 
        called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
@@ -302,13 +326,8 @@ EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
 void kvm_flush_remote_tlbs(struct kvm *kvm)
 {
-       /*
-        * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
-        * kvm_make_all_cpus_request.
-        */
-       long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
-
        ++kvm->stat.generic.remote_tlb_flush_requests;
+
        /*
         * We want to publish modifications to the page tables before reading
         * mode. Pairs with a memory barrier in arch-specific code.
@@ -323,7 +342,6 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
        if (!kvm_arch_flush_remote_tlb(kvm)
            || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
                ++kvm->stat.generic.remote_tlb_flush;
-       cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
 }
 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
 #endif
@@ -528,7 +546,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
                }
        }
 
-       if (range->flush_on_ret && (ret || kvm->tlbs_dirty))
+       if (range->flush_on_ret && ret)
                kvm_flush_remote_tlbs(kvm);
 
        if (locked)
@@ -3134,15 +3152,19 @@ out:
 
 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
 {
-       unsigned int old, val, shrink;
+       unsigned int old, val, shrink, grow_start;
 
        old = val = vcpu->halt_poll_ns;
        shrink = READ_ONCE(halt_poll_ns_shrink);
+       grow_start = READ_ONCE(halt_poll_ns_grow_start);
        if (shrink == 0)
                val = 0;
        else
                val /= shrink;
 
+       if (val < grow_start)
+               val = 0;
+
        vcpu->halt_poll_ns = val;
        trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
 }
@@ -3290,16 +3312,24 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
  */
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
 {
-       int me;
-       int cpu = vcpu->cpu;
+       int me, cpu;
 
        if (kvm_vcpu_wake_up(vcpu))
                return;
 
+       /*
+        * Note, the vCPU could get migrated to a different pCPU at any point
+        * after kvm_arch_vcpu_should_kick(), which could result in sending an
+        * IPI to the previous pCPU.  But, that's ok because the purpose of the
+        * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
+        * vCPU also requires it to leave IN_GUEST_MODE.
+        */
        me = get_cpu();
-       if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
-               if (kvm_arch_vcpu_should_kick(vcpu))
+       if (kvm_arch_vcpu_should_kick(vcpu)) {
+               cpu = READ_ONCE(vcpu->cpu);
+               if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
                        smp_send_reschedule(cpu);
+       }
        put_cpu();
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_kick);