Merge tag 'for-5.7/block-2020-03-29' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 30 Mar 2020 18:20:13 +0000 (11:20 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 30 Mar 2020 18:20:13 +0000 (11:20 -0700)
Pull block updates from Jens Axboe:

 - Online capacity resizing (Balbir)

 - Number of hardware queue change fixes (Bart)

 - null_blk fault injection addition (Bart)

 - Cleanup of queue allocation, unifying the node/no-node API
   (Christoph)

 - Cleanup of genhd, moving code to where it makes sense (Christoph)

 - Cleanup of the partition handling code (Christoph)

 - disk stat fixes/improvements (Konstantin)

 - BFQ improvements (Paolo)

 - Various fixes and improvements

* tag 'for-5.7/block-2020-03-29' of git://git.kernel.dk/linux-block: (72 commits)
  block: return NULL in blk_alloc_queue() on error
  block: move bio_map_* to blk-map.c
  Revert "blkdev: check for valid request queue before issuing flush"
  block: simplify queue allocation
  bcache: pass the make_request methods to blk_queue_make_request
  null_blk: use blk_mq_init_queue_data
  block: add a blk_mq_init_queue_data helper
  block: move the ->devnode callback to struct block_device_operations
  block: move the part_stat* helpers from genhd.h to a new header
  block: move block layer internals out of include/linux/genhd.h
  block: move guard_bio_eod to bio.c
  block: unexport get_gendisk
  block: unexport disk_map_sector_rcu
  block: unexport disk_get_part
  block: mark part_in_flight and part_in_flight_rw static
  block: mark block_depr static
  block: factor out requeue handling from dispatch code
  block/diskstats: replace time_in_queue with sum of request times
  block/diskstats: accumulate all per-cpu counters in one pass
  block/diskstats: more accurate approximation of io_ticks for slow disks
  ...

726 files changed:
.clang-format
.mailmap
Documentation/arm64/silicon-errata.rst
Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt
Documentation/devicetree/bindings/net/fsl-fman.txt
Documentation/devicetree/bindings/vendor-prefixes.yaml
Documentation/driver-api/dmaengine/provider.rst
Documentation/filesystems/porting.rst
Documentation/filesystems/zonefs.txt
Documentation/kbuild/kbuild.rst
Documentation/kbuild/kconfig-macro-language.rst
Documentation/kbuild/makefiles.rst
Documentation/kbuild/modules.rst
Documentation/networking/devlink/devlink-region.rst
Documentation/networking/net_failover.rst
Documentation/networking/rds.txt
Documentation/virt/kvm/amd-memory-encryption.rst
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/configs/nps_defconfig
arch/arc/configs/nsimosci_defconfig
arch/arc/configs/nsimosci_hs_defconfig
arch/arc/configs/nsimosci_hs_smp_defconfig
arch/arc/include/asm/fpu.h
arch/arc/include/asm/linkage.h
arch/arc/kernel/setup.c
arch/arc/kernel/troubleshoot.c
arch/arm/Makefile
arch/arm/boot/compressed/Makefile
arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
arch/arm/boot/dts/bcm2835-rpi.dtsi
arch/arm/boot/dts/dm8148-evm.dts
arch/arm/boot/dts/dm8148-t410.dts
arch/arm/boot/dts/dra62x-j5eco-evm.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi
arch/arm/boot/dts/exynos4412-n710x.dts
arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
arch/arm/boot/dts/motorola-mapphone-common.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/ox810se.dtsi
arch/arm/boot/dts/ox820.dtsi
arch/arm/boot/dts/sun8i-a33.dtsi
arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
arch/arm/boot/dts/sun8i-a83t.dtsi
arch/arm/boot/dts/sun8i-r40.dtsi
arch/arm/kernel/vdso.c
arch/arm/lib/copy_from_user.S
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
arch/arm64/boot/dts/sprd/sc9863a.dtsi
arch/arm64/crypto/chacha-neon-glue.c
arch/arm64/include/asm/alternative.h
arch/arm64/include/asm/mmu.h
arch/arm64/include/asm/pgtable-prot.h
arch/arm64/include/asm/unistd.h
arch/arm64/kernel/smp.c
arch/mips/boot/dts/ingenic/ci20.dts
arch/mips/kernel/setup.c
arch/parisc/Kconfig
arch/parisc/Makefile
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/mm/kasan/kasan_init_32.c
arch/riscv/Kconfig
arch/riscv/Kconfig.socs
arch/riscv/configs/defconfig
arch/riscv/configs/rv32_defconfig
arch/riscv/include/asm/clint.h
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/uaccess.h
arch/riscv/kernel/smp.c
arch/riscv/lib/Makefile
arch/s390/kvm/kvm-s390.c
arch/x86/Makefile
arch/x86/crypto/Makefile
arch/x86/events/amd/uncore.c
arch/x86/include/asm/kvm_emulate.h
arch/x86/kernel/apic/vector.c
arch/x86/kernel/cpu/mce/intel.c
arch/x86/kernel/cpu/mce/therm_throt.c
arch/x86/kvm/Kconfig
arch/x86/kvm/emulate.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/lapic.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/fault.c
arch/x86/mm/ioremap.c
arch/x86/net/bpf_jit_comp32.c
block/blk-iocost.c
block/blk-mq-sched.c
block/genhd.c
drivers/acpi/apei/ghes.c
drivers/android/binderfs.c
drivers/ata/Kconfig
drivers/ata/Makefile
drivers/ata/ahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-pata-timings.c [new file with mode: 0644]
drivers/ata/libata-sata.c [new file with mode: 0644]
drivers/ata/libata-scsi.c
drivers/ata/libata-sff.c
drivers/ata/libata-transport.c
drivers/ata/libata.h
drivers/ata/sata_promise.c
drivers/atm/nicstar.c
drivers/auxdisplay/Kconfig
drivers/auxdisplay/charlcd.c
drivers/auxdisplay/img-ascii-lcd.c
drivers/base/memory.c
drivers/base/platform.c
drivers/block/virtio_blk.c
drivers/bus/sunxi-rsb.c
drivers/bus/ti-sysc.c
drivers/char/ipmi/ipmi_si_platform.c
drivers/char/tpm/eventlog/common.c
drivers/char/tpm/eventlog/of.c
drivers/char/tpm/eventlog/tpm1.c
drivers/char/tpm/eventlog/tpm2.c
drivers/char/tpm/tpm-chip.c
drivers/char/tpm/tpm.h
drivers/char/tpm/tpm2-cmd.c
drivers/char/tpm/tpm_ibmvtpm.c
drivers/char/tpm/tpm_ibmvtpm.h
drivers/char/tpm/tpm_tis_spi_cr50.c
drivers/char/tpm/tpm_tis_spi_main.c
drivers/clk/clk.c
drivers/clk/imx/clk-imx8mp.c
drivers/clk/imx/clk-scu.c
drivers/clk/qcom/dispcc-sc7180.c
drivers/clk/qcom/videocc-sc7180.c
drivers/clk/ti/clk-43xx.c
drivers/clocksource/hyperv_timer.c
drivers/dma/dmaengine.c
drivers/dma/idxd/cdev.c
drivers/dma/ti/k3-udma-glue.c
drivers/firmware/efi/efivars.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
drivers/gpu/drm/amd/powerplay/renoir_ppt.c
drivers/gpu/drm/arm/display/komeda/komeda_drv.c
drivers/gpu/drm/bochs/bochs_hw.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_lease.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_dma.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_drm_scaler.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_timeline.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/opregion.c
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/i915/i915_utils.h
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/hid/hid-google-hammer.c
drivers/hid/hid-ids.h
drivers/hid/hid-picolcd_fb.c
drivers/hid/hid-quirks.c
drivers/hid/hid-sensor-custom.c
drivers/hwtracing/intel_th/msu.c
drivers/hwtracing/intel_th/pci.c
drivers/hwtracing/stm/p_sys-t.c
drivers/i2c/busses/i2c-designware-pcidrv.c
drivers/i2c/busses/i2c-gpio.c
drivers/i2c/busses/i2c-hix5hd2.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-nvidia-gpu.c
drivers/i2c/busses/i2c-pca-platform.c
drivers/i2c/busses/i2c-st.c
drivers/i2c/i2c-core-acpi.c
drivers/i3c/device.c
drivers/i3c/master.c
drivers/i3c/master/dw-i3c-master.c
drivers/i3c/master/i3c-master-cdns.c
drivers/iio/accel/adxl372.c
drivers/iio/accel/st_accel_i2c.c
drivers/iio/adc/at91-sama5d2_adc.c
drivers/iio/adc/stm32-dfsdm-adc.c
drivers/iio/chemical/Kconfig
drivers/iio/light/vcnl4000.c
drivers/iio/magnetometer/ak8974.c
drivers/iio/proximity/ping.c
drivers/iio/trigger/stm32-timer-trigger.c
drivers/infiniband/core/device.c
drivers/infiniband/core/nldev.c
drivers/infiniband/core/security.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/core/user_mad.c
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/sw/rdmavt/cq.c
drivers/input/input.c
drivers/input/keyboard/tm2-touchkey.c
drivers/input/mouse/synaptics.c
drivers/input/rmi4/rmi_f11.c
drivers/input/touchscreen/raydium_i2c_ts.c
drivers/iommu/amd_iommu.c
drivers/iommu/dma-iommu.c
drivers/iommu/dmar.c
drivers/iommu/intel-iommu-debugfs.c
drivers/iommu/intel-iommu.c
drivers/iommu/io-pgtable-arm.c
drivers/irqchip/irq-gic-v3.c
drivers/macintosh/windfarm_ad7417_sensor.c
drivers/macintosh/windfarm_fcu_controls.c
drivers/macintosh/windfarm_lm75_sensor.c
drivers/macintosh/windfarm_lm87_sensor.c
drivers/macintosh/windfarm_max6690_sensor.c
drivers/macintosh/windfarm_smu_sat.c
drivers/misc/cardreader/rts5227.c
drivers/misc/cardreader/rts5249.c
drivers/misc/cardreader/rts5260.c
drivers/misc/cardreader/rts5261.c
drivers/misc/eeprom/at24.c
drivers/mmc/core/core.c
drivers/mmc/core/mmc.c
drivers/mmc/core/mmc_ops.c
drivers/mmc/host/rtsx_pci_sdmmc.c
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sdhci-cadence.c
drivers/mmc/host/sdhci-msm.c
drivers/mmc/host/sdhci-of-at91.c
drivers/mmc/host/sdhci-omap.c
drivers/mmc/host/sdhci-pci-gli.c
drivers/mmc/host/sdhci-tegra.c
drivers/net/Kconfig
drivers/net/bonding/bond_alb.c
drivers/net/caif/caif_spi.c
drivers/net/can/dev.c
drivers/net/can/slcan.c
drivers/net/dsa/mt7530.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/global2.c
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fman/Kconfig
drivers/net/ethernet/freescale/fman/fman.c
drivers/net/ethernet/freescale/fman/fman.h
drivers/net/ethernet/freescale/fman/fman_memac.c
drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
drivers/net/ethernet/huawei/hinic/hinic_rx.c
drivers/net/ethernet/huawei/hinic/hinic_tx.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/marvell/mvmdio.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/health.h
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
drivers/net/ethernet/micrel/ks8851_mll.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/neterion/vxge/vxge-config.h
drivers/net/ethernet/neterion/vxge/vxge-main.h
drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
drivers/net/ethernet/pensando/ionic/ionic_if.h
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_regs.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/efx_channels.c
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/sfc/tx_common.c
drivers/net/ethernet/sfc/tx_common.h
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/geneve.c
drivers/net/ifb.c
drivers/net/ipvlan/ipvlan_core.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/macsec.c
drivers/net/macvlan.c
drivers/net/netdevsim/ipsec.c
drivers/net/phy/bcm63xx.c
drivers/net/phy/dp83867.c
drivers/net/phy/mdio-bcm-unimac.c
drivers/net/phy/mdio-mux-bcm-iproc.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/phy/sfp-bus.c
drivers/net/slip/slhc.c
drivers/net/team/team.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/veth.c
drivers/net/vxlan.c
drivers/net/wireguard/device.c
drivers/net/wireguard/netlink.c
drivers/net/wireguard/noise.c
drivers/net/wireguard/noise.h
drivers/net/wireguard/peer.c
drivers/net/wireguard/queueing.h
drivers/net/wireguard/receive.c
drivers/net/wireless/intel/iwlwifi/cfg/22000.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.h
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.h
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/mediatek/mt76/dma.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
drivers/net/wireless/ti/wlcore/main.c
drivers/nfc/fdp/fdp.c
drivers/nvme/host/rdma.c
drivers/nvme/target/tcp.c
drivers/of/of_mdio.c
drivers/pinctrl/cirrus/pinctrl-madera-core.c
drivers/pinctrl/core.c
drivers/pinctrl/freescale/pinctrl-scu.c
drivers/pinctrl/meson/pinctrl-meson-gxl.c
drivers/pinctrl/pinctrl-falcon.c
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
drivers/rtc/Kconfig
drivers/s390/block/dasd.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_int.h
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_sys.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/qeth_l3_sys.c
drivers/scsi/Kconfig
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/libsas/Kconfig
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/sd.c
drivers/scsi/ufs/ufshcd.c
drivers/slimbus/qcom-ngd-ctrl.c
drivers/soc/fsl/dpio/dpio-driver.c
drivers/soc/samsung/exynos-chipid.c
drivers/staging/greybus/tools/loopback_test.c
drivers/staging/rtl8188eu/os_dep/usb_intf.c
drivers/staging/speakup/main.c
drivers/staging/wfx/hif_tx.c
drivers/staging/wfx/hif_tx.h
drivers/staging/wfx/hif_tx_mib.h
drivers/staging/wfx/sta.c
drivers/tee/amdtee/core.c
drivers/thunderbolt/switch.c
drivers/tty/tty_io.c
drivers/usb/chipidea/udc.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/quirks.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-trace.h
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/typec/ucsi/displayport.c
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_ring.c
drivers/watchdog/iTCO_vendor.h
drivers/watchdog/iTCO_vendor_support.c
drivers/watchdog/iTCO_wdt.c
fs/afs/addr_list.c
fs/afs/cmservice.c
fs/afs/fs_probe.c
fs/afs/internal.h
fs/afs/rxrpc.c
fs/btrfs/block-group.c
fs/btrfs/inode.c
fs/ceph/file.c
fs/ceph/snap.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/smb2ops.c
fs/crypto/keysetup.c
fs/eventpoll.c
fs/file.c
fs/fuse/dev.c
fs/fuse/fuse_i.h
fs/gfs2/inode.c
fs/inode.c
fs/io_uring.c
fs/locks.c
fs/nfs/client.c
fs/nfs/fs_context.c
fs/nfs/fscache.c
fs/nfs/namespace.c
fs/nfs/nfs4client.c
fs/open.c
fs/overlayfs/Kconfig
fs/overlayfs/file.c
fs/overlayfs/overlayfs.h
fs/overlayfs/super.c
fs/overlayfs/util.c
fs/zonefs/super.c
include/crypto/curve25519.h
include/drm/drm_dp_mst_helper.h
include/dt-bindings/clock/imx8mn-clock.h
include/linux/bpf.h
include/linux/ceph/messenger.h
include/linux/ceph/osdmap.h
include/linux/ceph/rados.h
include/linux/cgroup.h
include/linux/clk-provider.h
include/linux/dmar.h
include/linux/dsa/8021q.h
include/linux/file.h
include/linux/fs.h
include/linux/futex.h
include/linux/genhd.h
include/linux/i2c.h
include/linux/ieee80211.h
include/linux/inet_diag.h
include/linux/intel-iommu.h
include/linux/libata.h
include/linux/memcontrol.h
include/linux/mmc/host.h
include/linux/netlink.h
include/linux/of_clk.h
include/linux/page-flags.h
include/linux/pci_ids.h
include/linux/phy.h
include/linux/platform_device.h
include/linux/rhashtable.h
include/linux/skbuff.h
include/linux/socket.h
include/linux/vmalloc.h
include/linux/workqueue.h
include/net/af_rxrpc.h
include/net/fib_rules.h
include/net/sch_generic.h
include/soc/mscc/ocelot_dev.h
include/trace/events/afs.h
include/uapi/linux/in.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/serio.h
init/Kconfig
kernel/bpf/bpf_struct_ops.c
kernel/bpf/btf.c
kernel/bpf/cgroup.c
kernel/bpf/syscall.c
kernel/cgroup/cgroup-v1.c
kernel/cgroup/cgroup.c
kernel/fork.c
kernel/futex.c
kernel/irq/manage.c
kernel/notifier.c
kernel/pid.c
kernel/sys.c
kernel/trace/bpf_trace.c
kernel/trace/ftrace.c
kernel/workqueue.c
lib/crypto/chacha20poly1305-selftest.c
mm/hugetlb_cgroup.c
mm/madvise.c
mm/memcontrol.c
mm/mmu_notifier.c
mm/nommu.c
mm/slub.c
mm/sparse.c
mm/swapfile.c
mm/vmalloc.c
net/Kconfig
net/batman-adv/bat_iv_ogm.c
net/bpfilter/main.c
net/caif/caif_dev.c
net/ceph/messenger.c
net/ceph/osd_client.c
net/ceph/osdmap.c
net/core/dev.c
net/core/devlink.c
net/core/netclassid_cgroup.c
net/core/pktgen.c
net/core/sock.c
net/core/sock_map.c
net/dsa/dsa_priv.h
net/dsa/port.c
net/dsa/slave.c
net/dsa/tag_8021q.c
net/dsa/tag_brcm.c
net/dsa/tag_sja1105.c
net/ethtool/debug.c
net/ethtool/linkinfo.c
net/ethtool/linkmodes.c
net/ethtool/netlink.c
net/ethtool/wol.c
net/hsr/hsr_framereg.c
net/hsr/hsr_netlink.c
net/hsr/hsr_slave.c
net/ieee802154/nl_policy.c
net/ipv4/Kconfig
net/ipv4/bpf_tcp_ca.c
net/ipv4/fib_frontend.c
net/ipv4/gre_demux.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/ip_gre.c
net/ipv4/ip_vti.c
net/ipv4/raw_diag.c
net/ipv4/tcp.c
net/ipv4/tcp_output.c
net/ipv4/udp_diag.c
net/ipv6/addrconf.c
net/ipv6/ip6_vti.c
net/ipv6/seg6_iptunnel.c
net/ipv6/seg6_local.c
net/ipv6/xfrm6_tunnel.c
net/mac80211/debugfs_sta.c
net/mac80211/key.c
net/mac80211/mesh_hwmp.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/tx.c
net/mptcp/options.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_flow_table_ip.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/nf_synproxy_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nft_chain_nat.c
net/netfilter/nft_fwd_netdev.c
net/netfilter/nft_payload.c
net/netfilter/nft_set_pipapo.c
net/netfilter/nft_set_rbtree.c
net/netfilter/nft_tunnel.c
net/netfilter/x_tables.c
net/netfilter/xt_recent.c
net/netlink/af_netlink.c
net/nfc/hci/core.c
net/nfc/netlink.c
net/openvswitch/datapath.c
net/packet/af_packet.c
net/packet/internal.h
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/input.c
net/rxrpc/sendmsg.c
net/sched/act_ct.c
net/sched/act_mirred.c
net/sched/cls_route.c
net/sched/cls_tcindex.c
net/sched/sch_cbs.c
net/sched/sch_fq.c
net/sched/sch_taprio.c
net/sctp/diag.c
net/smc/smc_ib.c
net/socket.c
net/tipc/netlink.c
net/wireless/nl80211.c
net/wireless/scan.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
scripts/Kconfig.include
scripts/Makefile.extrawarn
scripts/dtc/dtc-lexer.l
scripts/export_report.pl
scripts/kallsyms.c
scripts/mod/devicetable-offsets.c
scripts/mod/file2alias.c
scripts/mod/modpost.c
scripts/parse-maintainers.pl
security/keys/key.c
security/keys/keyctl.c
sound/core/oss/pcm_plugin.c
sound/core/seq/oss/seq_oss_midi.c
sound/core/seq/seq_virmidi.c
sound/pci/hda/patch_realtek.c
sound/usb/line6/driver.c
sound/usb/line6/midibuf.c
tools/include/uapi/asm/errno.h
tools/include/uapi/linux/in.h
tools/perf/Makefile
tools/perf/arch/arm64/util/arm-spe.c
tools/perf/arch/arm64/util/perf_regs.c
tools/perf/arch/powerpc/util/perf_regs.c
tools/perf/arch/x86/util/auxtrace.c
tools/perf/arch/x86/util/event.c
tools/perf/arch/x86/util/header.c
tools/perf/arch/x86/util/intel-bts.c
tools/perf/arch/x86/util/intel-pt.c
tools/perf/arch/x86/util/machine.c
tools/perf/arch/x86/util/perf_regs.c
tools/perf/arch/x86/util/pmu.c
tools/perf/bench/bench.h
tools/perf/bench/epoll-ctl.c
tools/perf/bench/epoll-wait.c
tools/perf/bench/futex-hash.c
tools/perf/bench/futex-lock-pi.c
tools/perf/bench/futex-requeue.c
tools/perf/bench/futex-wake-parallel.c
tools/perf/bench/futex-wake.c
tools/perf/builtin-diff.c
tools/perf/builtin-top.c
tools/perf/pmu-events/jevents.c
tools/perf/tests/bp_account.c
tools/perf/util/block-info.c
tools/perf/util/env.c
tools/perf/util/map.c
tools/perf/util/parse-events.c
tools/perf/util/probe-file.c
tools/perf/util/probe-finder.c
tools/perf/util/setup.py
tools/perf/util/symbol.c
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
tools/power/x86/turbostat/Makefile
tools/power/x86/turbostat/turbostat.c
tools/scripts/Makefile.include
tools/testing/ktest/ktest.pl
tools/testing/ktest/sample.conf
tools/testing/selftests/Makefile
tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_send_signal_kern.c
tools/testing/selftests/bpf/test_btf.c
tools/testing/selftests/bpf/verifier/jmp32.c
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/forwarding/Makefile [new file with mode: 0644]
tools/testing/selftests/net/forwarding/ethtool_lib.sh [changed mode: 0755->0644]
tools/testing/selftests/net/reuseport_addr_any.c
tools/testing/selftests/netfilter/Makefile
tools/testing/selftests/netfilter/config
tools/testing/selftests/netfilter/nf-queue.c [new file with mode: 0644]
tools/testing/selftests/netfilter/nft_queue.sh [new file with mode: 0755]
tools/testing/selftests/tc-testing/config
tools/testing/selftests/wireguard/netns.sh
tools/testing/selftests/wireguard/qemu/Makefile
tools/testing/selftests/wireguard/qemu/init.c
tools/testing/selftests/wireguard/qemu/kernel.config
usr/Kconfig

index 196ca31..6ec5558 100644 (file)
@@ -86,6 +86,8 @@ ForEachMacros:
   - 'bio_for_each_segment_all'
   - 'bio_list_for_each'
   - 'bip_for_each_vec'
+  - 'bitmap_for_each_clear_region'
+  - 'bitmap_for_each_set_region'
   - 'blkg_for_each_descendant_post'
   - 'blkg_for_each_descendant_pre'
   - 'blk_queue_for_each_rl'
@@ -115,6 +117,7 @@ ForEachMacros:
   - 'drm_client_for_each_connector_iter'
   - 'drm_client_for_each_modeset'
   - 'drm_connector_for_each_possible_encoder'
+  - 'drm_for_each_bridge_in_chain'
   - 'drm_for_each_connector_iter'
   - 'drm_for_each_crtc'
   - 'drm_for_each_encoder'
@@ -136,9 +139,10 @@ ForEachMacros:
   - 'for_each_bio'
   - 'for_each_board_func_rsrc'
   - 'for_each_bvec'
+  - 'for_each_card_auxs'
+  - 'for_each_card_auxs_safe'
   - 'for_each_card_components'
-  - 'for_each_card_links'
-  - 'for_each_card_links_safe'
+  - 'for_each_card_pre_auxs'
   - 'for_each_card_prelinks'
   - 'for_each_card_rtds'
   - 'for_each_card_rtds_safe'
@@ -166,6 +170,7 @@ ForEachMacros:
   - 'for_each_dpcm_fe'
   - 'for_each_drhd_unit'
   - 'for_each_dss_dev'
+  - 'for_each_efi_handle'
   - 'for_each_efi_memory_desc'
   - 'for_each_efi_memory_desc_in_map'
   - 'for_each_element'
@@ -190,6 +195,7 @@ ForEachMacros:
   - 'for_each_lru'
   - 'for_each_matching_node'
   - 'for_each_matching_node_and_match'
+  - 'for_each_member'
   - 'for_each_memblock'
   - 'for_each_memblock_type'
   - 'for_each_memcg_cache_index'
@@ -200,9 +206,11 @@ ForEachMacros:
   - 'for_each_msi_entry'
   - 'for_each_msi_entry_safe'
   - 'for_each_net'
+  - 'for_each_net_continue_reverse'
   - 'for_each_netdev'
   - 'for_each_netdev_continue'
   - 'for_each_netdev_continue_rcu'
+  - 'for_each_netdev_continue_reverse'
   - 'for_each_netdev_feature'
   - 'for_each_netdev_in_bond_rcu'
   - 'for_each_netdev_rcu'
@@ -254,10 +262,10 @@ ForEachMacros:
   - 'for_each_reserved_mem_region'
   - 'for_each_rtd_codec_dai'
   - 'for_each_rtd_codec_dai_rollback'
-  - 'for_each_rtdcom'
-  - 'for_each_rtdcom_safe'
+  - 'for_each_rtd_components'
   - 'for_each_set_bit'
   - 'for_each_set_bit_from'
+  - 'for_each_set_clump8'
   - 'for_each_sg'
   - 'for_each_sg_dma_page'
   - 'for_each_sg_page'
@@ -267,6 +275,7 @@ ForEachMacros:
   - 'for_each_subelement_id'
   - '__for_each_thread'
   - 'for_each_thread'
+  - 'for_each_wakeup_source'
   - 'for_each_zone'
   - 'for_each_zone_zonelist'
   - 'for_each_zone_zonelist_nodemask'
@@ -330,6 +339,7 @@ ForEachMacros:
   - 'list_for_each'
   - 'list_for_each_codec'
   - 'list_for_each_codec_safe'
+  - 'list_for_each_continue'
   - 'list_for_each_entry'
   - 'list_for_each_entry_continue'
   - 'list_for_each_entry_continue_rcu'
@@ -351,6 +361,7 @@ ForEachMacros:
   - 'llist_for_each_entry'
   - 'llist_for_each_entry_safe'
   - 'llist_for_each_safe'
+  - 'mci_for_each_dimm'
   - 'media_device_for_each_entity'
   - 'media_device_for_each_intf'
   - 'media_device_for_each_link'
@@ -444,10 +455,16 @@ ForEachMacros:
   - 'virtio_device_for_each_vq'
   - 'xa_for_each'
   - 'xa_for_each_marked'
+  - 'xa_for_each_range'
   - 'xa_for_each_start'
   - 'xas_for_each'
   - 'xas_for_each_conflict'
   - 'xas_for_each_marked'
+  - 'xbc_array_for_each_value'
+  - 'xbc_for_each_key_value'
+  - 'xbc_node_for_each_array_value'
+  - 'xbc_node_for_each_child'
+  - 'xbc_node_for_each_key_value'
   - 'zorro_for_each_dev'
 
 #IncludeBlocks: Preserve # Unknown to clang-format-5.0
index ffb8f28..a0dfce8 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -225,6 +225,7 @@ Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
 Praveen BP <praveenbp@ti.com>
 Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
 Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
+Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com>
 Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
 Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
 Rajesh Shah <rajesh.shah@intel.com>
index 9120e59..2c08c62 100644 (file)
@@ -110,6 +110,8 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | Cavium         | ThunderX GICv3  | #23154          | CAVIUM_ERRATUM_23154        |
 +----------------+-----------------+-----------------+-----------------------------+
+| Cavium         | ThunderX GICv3  | #38539          | N/A                         |
++----------------+-----------------+-----------------+-----------------------------+
 | Cavium         | ThunderX Core   | #27456          | CAVIUM_ERRATUM_27456        |
 +----------------+-----------------+-----------------+-----------------------------+
 | Cavium         | ThunderX Core   | #30115          | CAVIUM_ERRATUM_30115        |
index 33c7842..8b9a8f3 100644 (file)
@@ -23,6 +23,8 @@ properties:
       - items:
         - const: allwinner,sun7i-a20-crypto
         - const: allwinner,sun4i-a10-crypto
+      - items:
+        - const: allwinner,sun8i-a33-crypto
 
   reg:
     maxItems: 1
index ef2ae72..921172f 100644 (file)
@@ -5,6 +5,7 @@ Required properties:
     * "cypress,tm2-touchkey" - for the touchkey found on the tm2 board
     * "cypress,midas-touchkey" - for the touchkey found on midas boards
     * "cypress,aries-touchkey" - for the touchkey found on aries boards
+    * "coreriver,tc360-touchkey" - for the Coreriver TouchCore 360 touchkey
 - reg: I2C address of the chip.
 - interrupts: interrupt to which the chip is connected (see interrupt
        binding[0]).
index 250f8d8..c00fb0d 100644 (file)
@@ -110,6 +110,13 @@ PROPERTIES
                Usage: required
                Definition: See soc/fsl/qman.txt and soc/fsl/bman.txt
 
+- fsl,erratum-a050385
+               Usage: optional
+               Value type: boolean
+               Definition: A boolean property. Indicates the presence of the
+               erratum A050385 which indicates that DMA transactions that are
+               split can result in a FMan lock.
+
 =============================================================================
 FMan MURAM Node
 
index 9e67944..b3c8c62 100644 (file)
@@ -205,6 +205,8 @@ patternProperties:
     description: Colorful GRP, Shenzhen Xueyushi Technology Ltd.
   "^compulab,.*":
     description: CompuLab Ltd.
+  "^coreriver,.*":
+    description: CORERIVER Semiconductor Co.,Ltd.
   "^corpro,.*":
     description: Chengdu Corpro Technology Co., Ltd.
   "^cortina,.*":
index 790a150..56e5833 100644 (file)
@@ -266,11 +266,15 @@ to use.
   attached (via the dmaengine_desc_attach_metadata() helper to the descriptor.
 
   From the DMA driver the following is expected for this mode:
+
   - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM
+
     The data from the provided metadata buffer should be prepared for the DMA
     controller to be sent alongside of the payload data. Either by copying to a
     hardware descriptor, or highly coupled packet.
+
   - DMA_DEV_TO_MEM
+
     On transfer completion the DMA driver must copy the metadata to the client
     provided metadata buffer before notifying the client about the completion.
     After the transfer completion, DMA drivers must not touch the metadata
@@ -284,10 +288,14 @@ to use.
   and dmaengine_desc_set_metadata_len() is provided as helper functions.
 
   From the DMA driver the following is expected for this mode:
-  - get_metadata_ptr
+
+  - get_metadata_ptr()
+
     Should return a pointer for the metadata buffer, the maximum size of the
     metadata buffer and the currently used / valid (if any) bytes in the buffer.
-  - set_metadata_len
+
+  - set_metadata_len()
+
     It is called by the clients after it have placed the metadata to the buffer
     to let the DMA driver know the number of valid bytes provided.
 
index f185060..26c0939 100644 (file)
@@ -850,3 +850,11 @@ business doing so.
 d_alloc_pseudo() is internal-only; uses outside of alloc_file_pseudo() are
 very suspect (and won't work in modules).  Such uses are very likely to
 be misspelled d_alloc_anon().
+
+---
+
+**mandatory**
+
+[should've been added in 2016] stale comment in finish_open() nonwithstanding,
+failure exits in ->atomic_open() instances should *NOT* fput() the file,
+no matter what.  Everything is handled by the caller.
index d54fa98..78813c3 100644 (file)
@@ -258,11 +258,11 @@ conditions.
     |    option    | condition | size     read    write    read    write |
     +--------------+-----------+-----------------------------------------+
     |              | good      | fixed    yes     no       yes     yes   |
-    | remount-ro   | read-only | fixed    yes     no       yes     no    |
+    | remount-ro   | read-only | as is    yes     no       yes     no    |
     | (default)    | offline   |   0      no      no       no      no    |
     +--------------+-----------+-----------------------------------------+
     |              | good      | fixed    yes     no       yes     yes   |
-    | zone-ro      | read-only | fixed    yes     no       yes     no    |
+    | zone-ro      | read-only | as is    yes     no       yes     no    |
     |              | offline   |   0      no      no       no      no    |
     +--------------+-----------+-----------------------------------------+
     |              | good      |   0      no      no       yes     yes   |
@@ -270,7 +270,7 @@ conditions.
     |              | offline   |   0      no      no       no      no    |
     +--------------+-----------+-----------------------------------------+
     |              | good      | fixed    yes     yes      yes     yes   |
-    | repair       | read-only | fixed    yes     no       yes     no    |
+    | repair       | read-only | as is    yes     no       yes     no    |
     |              | offline   |   0      no      no       no      no    |
     +--------------+-----------+-----------------------------------------+
 
@@ -307,8 +307,16 @@ condition changes. The defined behaviors are as follow:
 * zone-offline
 * repair
 
-The I/O error actions defined for each behavior are detailed in the previous
-section.
+The run-time I/O error actions defined for each behavior are detailed in the
+previous section. Mount time I/O errors will cause the mount operation to fail.
+The handling of read-only zones also differs between mount-time and run-time.
+If a read-only zone is found at mount time, the zone is always treated in the
+same manner as offline zones, that is, all accesses are disabled and the zone
+file size set to 0. This is necessary as the write pointer of read-only zones
+is defined as invalib by the ZBC and ZAC standards, making it impossible to
+discover the amount of data that has been written to the zone. In the case of a
+read-only zone discovered at run-time, as indicated in the previous section.
+the size of the zone file is left unchanged from its last updated value.
 
 Zonefs User Space Tools
 =======================
index f1e5dce..510f38d 100644 (file)
@@ -237,7 +237,7 @@ This is solely useful to speed up test compiles.
 KBUILD_EXTRA_SYMBOLS
 --------------------
 For modules that use symbols from other modules.
-See more details in modules.txt.
+See more details in modules.rst.
 
 ALLSOURCE_ARCHS
 ---------------
index 35b3263..8b413ef 100644 (file)
@@ -44,7 +44,7 @@ intermediate::
             def_bool y
 
 Then, Kconfig moves onto the evaluation stage to resolve inter-symbol
-dependency as explained in kconfig-language.txt.
+dependency as explained in kconfig-language.rst.
 
 
 Variables
index 6bc126a..04d5c01 100644 (file)
@@ -924,7 +924,7 @@ When kbuild executes, the following steps are followed (roughly):
        $(KBUILD_AFLAGS_MODULE) is used to add arch-specific options that
        are used for assembler.
 
-       From commandline AFLAGS_MODULE shall be used (see kbuild.txt).
+       From commandline AFLAGS_MODULE shall be used (see kbuild.rst).
 
     KBUILD_CFLAGS_KERNEL
        $(CC) options specific for built-in
@@ -937,7 +937,7 @@ When kbuild executes, the following steps are followed (roughly):
 
        $(KBUILD_CFLAGS_MODULE) is used to add arch-specific options that
        are used for $(CC).
-       From commandline CFLAGS_MODULE shall be used (see kbuild.txt).
+       From commandline CFLAGS_MODULE shall be used (see kbuild.rst).
 
     KBUILD_LDFLAGS_MODULE
        Options for $(LD) when linking modules
@@ -945,7 +945,7 @@ When kbuild executes, the following steps are followed (roughly):
        $(KBUILD_LDFLAGS_MODULE) is used to add arch-specific options
        used when linking modules. This is often a linker script.
 
-       From commandline LDFLAGS_MODULE shall be used (see kbuild.txt).
+       From commandline LDFLAGS_MODULE shall be used (see kbuild.rst).
 
     KBUILD_LDS
 
index 69fa48e..e0b45a2 100644 (file)
@@ -470,9 +470,9 @@ build.
 
        The syntax of the Module.symvers file is::
 
-       <CRC>       <Symbol>          <Namespace>  <Module>                         <Export Type>
+       <CRC>       <Symbol>         <Module>                         <Export Type>     <Namespace>
 
-       0xe1cc2a05  usb_stor_suspend  USB_STORAGE  drivers/usb/storage/usb-storage  EXPORT_SYMBOL_GPL
+       0xe1cc2a05  usb_stor_suspend drivers/usb/storage/usb-storage  EXPORT_SYMBOL_GPL USB_STORAGE
 
        The fields are separated by tabs and values may be empty (e.g.
        if no namespace is defined for an exported symbol).
index 1a7683e..8b46e85 100644 (file)
@@ -40,9 +40,6 @@ example usage
     # Delete a snapshot using:
     $ devlink region del pci/0000:00:05.0/cr-space snapshot 1
 
-    # Trigger (request) a snapshot be taken:
-    $ devlink region trigger pci/0000:00:05.0/cr-space
-
     # Dump a snapshot:
     $ devlink region dump pci/0000:00:05.0/fw-health snapshot 1
     0000000000000000 0014 95dc 0014 9514 0035 1670 0034 db30
index 06c97dc..e143ab7 100644 (file)
@@ -8,9 +8,9 @@ Overview
 ========
 
 The net_failover driver provides an automated failover mechanism via APIs
-to create and destroy a failover master netdev and mananges a primary and
+to create and destroy a failover master netdev and manages a primary and
 standby slave netdevs that get registered via the generic failover
-infrastructrure.
+infrastructure.
 
 The failover netdev acts a master device and controls 2 slave devices. The
 original paravirtual interface is registered as 'standby' slave netdev and
@@ -29,7 +29,7 @@ virtio-net accelerated datapath: STANDBY mode
 =============================================
 
 net_failover enables hypervisor controlled accelerated datapath to virtio-net
-enabled VMs in a transparent manner with no/minimal guest userspace chanages.
+enabled VMs in a transparent manner with no/minimal guest userspace changes.
 
 To support this, the hypervisor needs to enable VIRTIO_NET_F_STANDBY
 feature on the virtio-net interface and assign the same MAC address to both
index f2a0147..eec6169 100644 (file)
@@ -159,7 +159,7 @@ Socket Interface
        set SO_RDS_TRANSPORT on a socket for which the transport has
        been previously attached explicitly (by SO_RDS_TRANSPORT) or
        implicitly (via bind(2)) will return an error of EOPNOTSUPP.
-       An attempt to set SO_RDS_TRANSPPORT to RDS_TRANS_NONE will
+       An attempt to set SO_RDS_TRANSPORT to RDS_TRANS_NONE will
        always return EINVAL.
 
 RDMA for RDS
index d18c97b..c3129b9 100644 (file)
@@ -53,6 +53,29 @@ key management interface to perform common hypervisor activities such as
 encrypting bootstrap code, snapshot, migrating and debugging the guest. For more
 information, see the SEV Key Management spec [api-spec]_
 
+The main ioctl to access SEV is KVM_MEM_ENCRYPT_OP.  If the argument
+to KVM_MEM_ENCRYPT_OP is NULL, the ioctl returns 0 if SEV is enabled
+and ``ENOTTY` if it is disabled (on some older versions of Linux,
+the ioctl runs normally even with a NULL argument, and therefore will
+likely return ``EFAULT``).  If non-NULL, the argument to KVM_MEM_ENCRYPT_OP
+must be a struct kvm_sev_cmd::
+
+       struct kvm_sev_cmd {
+               __u32 id;
+               __u64 data;
+               __u32 error;
+               __u32 sev_fd;
+       };
+
+
+The ``id`` field contains the subcommand, and the ``data`` field points to
+another struct containing arguments specific to command.  The ``sev_fd``
+should point to a file descriptor that is opened on the ``/dev/sev``
+device, if needed (see individual commands).
+
+On output, ``error`` is zero on success, or an error code.  Error codes
+are defined in ``<linux/psp-dev.h>`.
+
 KVM implements the following commands to support common lifecycle events of SEV
 guests, such as launching, running, snapshotting, migrating and decommissioning.
 
@@ -90,6 +113,8 @@ Returns: 0 on success, -negative on error
 
 On success, the 'handle' field contains a new handle and on error, a negative value.
 
+KVM_SEV_LAUNCH_START requires the ``sev_fd`` field to be valid.
+
 For more details, see SEV spec Section 6.2.
 
 3. KVM_SEV_LAUNCH_UPDATE_DATA
index a6fbdf3..8b6e2d8 100644 (file)
@@ -4073,7 +4073,6 @@ F:        drivers/scsi/snic/
 CISCO VIC ETHERNET NIC DRIVER
 M:     Christian Benvenuti <benve@cisco.com>
 M:     Govindarajulu Varadarajan <_govind@gmx.com>
-M:     Parvi Kaustubhi <pkaustub@cisco.com>
 S:     Supported
 F:     drivers/net/ethernet/cisco/enic/
 
@@ -4572,7 +4571,7 @@ F:        drivers/infiniband/hw/cxgb4/
 F:     include/uapi/rdma/cxgb4-abi.h
 
 CXGB4VF ETHERNET DRIVER (CXGB4VF)
-M:     Casey Leedom <leedom@chelsio.com>
+M:     Vishal Kulkarni <vishal@gmail.com>
 L:     netdev@vger.kernel.org
 W:     http://www.chelsio.com
 S:     Supported
@@ -6198,7 +6197,6 @@ S:        Supported
 F:     drivers/scsi/be2iscsi/
 
 Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net)
-M:     Sathya Perla <sathya.perla@broadcom.com>
 M:     Ajit Khaparde <ajit.khaparde@broadcom.com>
 M:     Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
 M:     Somnath Kotur <somnath.kotur@broadcom.com>
@@ -7518,6 +7516,12 @@ F:       include/uapi/linux/if_hippi.h
 F:     net/802/hippi.c
 F:     drivers/net/hippi/
 
+HISILICON DMA DRIVER
+M:     Zhou Wang <wangzhou1@hisilicon.com>
+L:     dmaengine@vger.kernel.org
+S:     Maintained
+F:     drivers/dma/hisi_dma.c
+
 HISILICON SECURITY ENGINE V2 DRIVER (SEC2)
 M:     Zaibo Xu <xuzaibo@huawei.com>
 L:     linux-crypto@vger.kernel.org
@@ -7575,7 +7579,8 @@ F:        Documentation/admin-guide/perf/hisi-pmu.rst
 
 HISILICON ROCE DRIVER
 M:     Lijun Ou <oulijun@huawei.com>
-M:     Wei Hu(Xavier) <xavier.huwei@huawei.com>
+M:     Wei Hu(Xavier) <huwei87@hisilicon.com>
+M:     Weihang Li <liweihang@huawei.com>
 L:     linux-rdma@vger.kernel.org
 S:     Maintained
 F:     drivers/infiniband/hw/hns/
@@ -8478,7 +8483,6 @@ L:        dmaengine@vger.kernel.org
 S:     Supported
 F:     drivers/dma/idxd/*
 F:     include/uapi/linux/idxd.h
-F:     include/linux/idxd.h
 
 INTEL IDLE DRIVER
 M:     Jacob Pan <jacob.jun.pan@linux.intel.com>
@@ -8685,7 +8689,7 @@ M:        Emmanuel Grumbach <emmanuel.grumbach@intel.com>
 M:     Luca Coelho <luciano.coelho@intel.com>
 M:     Intel Linux Wireless <linuxwifi@intel.com>
 L:     linux-wireless@vger.kernel.org
-W:     http://intellinuxwireless.org
+W:     https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git
 S:     Supported
 F:     drivers/net/wireless/intel/iwlwifi/
@@ -9276,8 +9280,8 @@ L:        keyrings@vger.kernel.org
 S:     Supported
 F:     Documentation/security/keys/trusted-encrypted.rst
 F:     include/keys/trusted-type.h
-F:     security/keys/trusted.c
-F:     include/keys/trusted.h
+F:     include/keys/trusted_tpm.h
+F:     security/keys/trusted-keys/
 
 KEYS/KEYRINGS
 M:     David Howells <dhowells@redhat.com>
@@ -11119,7 +11123,7 @@ M:      Thomas Bogendoerfer <tsbogend@alpha.franken.de>
 L:     linux-mips@vger.kernel.org
 W:     http://www.linux-mips.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git
-Q:     http://patchwork.linux-mips.org/project/linux-mips/list/
+Q:     https://patchwork.kernel.org/project/linux-mips/list/
 S:     Maintained
 F:     Documentation/devicetree/bindings/mips/
 F:     Documentation/mips/
@@ -15418,11 +15422,9 @@ F:     drivers/infiniband/sw/siw/
 F:     include/uapi/rdma/siw-abi.h
 
 SOFT-ROCE DRIVER (rxe)
-M:     Moni Shoua <monis@mellanox.com>
+M:     Zhu Yanjun <yanjunz@mellanox.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
-W:     https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
-Q:     http://patchwork.kernel.org/project/linux-rdma/list/
 F:     drivers/infiniband/sw/rxe/
 F:     include/uapi/rdma/rdma_user_rxe.h
 
@@ -16752,7 +16754,7 @@ Q:      http://patchwork.linuxtv.org/project/linux-media/list/
 S:     Maintained
 F:     drivers/media/platform/ti-vpe/
 F:     Documentation/devicetree/bindings/media/ti,vpe.yaml
-       Documentation/devicetree/bindings/media/ti,cal.yaml
+F:     Documentation/devicetree/bindings/media/ti,cal.yaml
 
 TI WILINK WIRELESS DRIVERS
 L:     linux-wireless@vger.kernel.org
index e25db57..4d0711f 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 6
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION =
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
@@ -1804,7 +1804,7 @@ existing-targets := $(wildcard $(sort $(targets)))
 
 -include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
 
-endif # config-targets
+endif # config-build
 endif # mixed-build
 endif # need-sub-make
 
index ff2a393..7124ab8 100644 (file)
@@ -154,7 +154,7 @@ config ARC_CPU_HS
        help
          Support for ARC HS38x Cores based on ARCv2 ISA
          The notable features are:
-           - SMP configurations of upto 4 core with coherency
+           - SMP configurations of up to 4 cores with coherency
            - Optional L2 Cache and IO-Coherency
            - Revised Interrupt Architecture (multiple priorites, reg banks,
                auto stack switch, auto regfile save/restore)
@@ -192,7 +192,7 @@ config ARC_SMP_HALT_ON_RESET
        help
          In SMP configuration cores can be configured as Halt-on-reset
          or they could all start at same time. For Halt-on-reset, non
-         masters are parked until Master kicks them so they can start of
+         masters are parked until Master kicks them so they can start off
          at designated entry point. For other case, all jump to common
          entry point and spin wait for Master's signal.
 
index 07f26ed..f7a978d 100644 (file)
@@ -21,8 +21,6 @@ CONFIG_MODULES=y
 CONFIG_MODULE_FORCE_LOAD=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_ARC_PLAT_EZNPS=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=4096
index 5dd470b..bf39a00 100644 (file)
@@ -20,8 +20,6 @@ CONFIG_ISA_ARCOMPACT=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci"
 # CONFIG_COMPACTION is not set
 CONFIG_NET=y
index 3532e86..7121bd7 100644 (file)
@@ -19,8 +19,6 @@ CONFIG_PERF_EVENTS=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_ISA_ARCV2=y
 CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs"
 # CONFIG_COMPACTION is not set
index d90448b..f9863b2 100644 (file)
@@ -14,8 +14,6 @@ CONFIG_PERF_EVENTS=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_ISA_ARCV2=y
 CONFIG_SMP=y
 # CONFIG_ARC_TIMERS_64BIT is not set
index 6434725..006bcf8 100644 (file)
@@ -43,6 +43,8 @@ extern void fpu_init_task(struct pt_regs *regs);
 
 #endif /* !CONFIG_ISA_ARCOMPACT */
 
+struct task_struct;
+
 extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
 
 #else  /* !CONFIG_ARC_FPU_SAVE_RESTORE */
index d9ee43c..fe19f1d 100644 (file)
@@ -29,6 +29,8 @@
 .endm
 
 #define ASM_NL          `      /* use '`' to mark new line in macro */
+#define __ALIGN                .align 4
+#define __ALIGN_STR    __stringify(__ALIGN)
 
 /* annotation for data we want in DCCM - if enabled in .config */
 .macro ARCFP_DATA nm
index e1c6474..aa41af6 100644 (file)
@@ -8,11 +8,11 @@
 #include <linux/delay.h>
 #include <linux/root_dev.h>
 #include <linux/clk.h>
-#include <linux/clk-provider.h>
 #include <linux/clocksource.h>
 #include <linux/console.h>
 #include <linux/module.h>
 #include <linux/cpu.h>
+#include <linux/of_clk.h>
 #include <linux/of_fdt.h>
 #include <linux/of.h>
 #include <linux/cache.h>
index b79886a..d299950 100644 (file)
@@ -104,8 +104,7 @@ static void show_faulting_vma(unsigned long address)
                        if (IS_ERR(nm))
                                nm = "?";
                }
-               pr_info("    @off 0x%lx in [%s]\n"
-                       "    VMA: 0x%08lx to 0x%08lx\n",
+               pr_info("  @off 0x%lx in [%s]  VMA: 0x%08lx to 0x%08lx\n",
                        vma->vm_start < TASK_UNMAPPED_BASE ?
                                address : address - vma->vm_start,
                        nm, vma->vm_start, vma->vm_end);
@@ -120,8 +119,6 @@ static void show_ecr_verbose(struct pt_regs *regs)
        unsigned int vec, cause_code;
        unsigned long address;
 
-       pr_info("\n[ECR   ]: 0x%08lx => ", regs->event);
-
        /* For Data fault, this is data address not instruction addr */
        address = current->thread.fault_address;
 
@@ -130,10 +127,10 @@ static void show_ecr_verbose(struct pt_regs *regs)
 
        /* For DTLB Miss or ProtV, display the memory involved too */
        if (vec == ECR_V_DTLB_MISS) {
-               pr_cont("Invalid %s @ 0x%08lx by insn @ 0x%08lx\n",
+               pr_cont("Invalid %s @ 0x%08lx by insn @ %pS\n",
                       (cause_code == 0x01) ? "Read" :
                       ((cause_code == 0x02) ? "Write" : "EX"),
-                      address, regs->ret);
+                      address, (void *)regs->ret);
        } else if (vec == ECR_V_ITLB_MISS) {
                pr_cont("Insn could not be fetched\n");
        } else if (vec == ECR_V_MACH_CHK) {
@@ -191,31 +188,31 @@ void show_regs(struct pt_regs *regs)
 
        show_ecr_verbose(regs);
 
-       pr_info("[EFA   ]: 0x%08lx\n[BLINK ]: %pS\n[ERET  ]: %pS\n",
-               current->thread.fault_address,
-               (void *)regs->blink, (void *)regs->ret);
-
        if (user_mode(regs))
                show_faulting_vma(regs->ret); /* faulting code, not data */
 
-       pr_info("[STAT32]: 0x%08lx", regs->status32);
+       pr_info("ECR: 0x%08lx EFA: 0x%08lx ERET: 0x%08lx\n",
+               regs->event, current->thread.fault_address, regs->ret);
+
+       pr_info("STAT32: 0x%08lx", regs->status32);
 
 #define STS_BIT(r, bit)        r->status32 & STATUS_##bit##_MASK ? #bit" " : ""
 
 #ifdef CONFIG_ISA_ARCOMPACT
-       pr_cont(" : %2s%2s%2s%2s%2s%2s%2s\n",
+       pr_cont(" [%2s%2s%2s%2s%2s%2s%2s]",
                        (regs->status32 & STATUS_U_MASK) ? "U " : "K ",
                        STS_BIT(regs, DE), STS_BIT(regs, AE),
                        STS_BIT(regs, A2), STS_BIT(regs, A1),
                        STS_BIT(regs, E2), STS_BIT(regs, E1));
 #else
-       pr_cont(" : %2s%2s%2s%2s\n",
+       pr_cont(" [%2s%2s%2s%2s]",
                        STS_BIT(regs, IE),
                        (regs->status32 & STATUS_U_MASK) ? "U " : "K ",
                        STS_BIT(regs, DE), STS_BIT(regs, AE));
 #endif
-       pr_info("BTA: 0x%08lx\t SP: 0x%08lx\t FP: 0x%08lx\n",
-               regs->bta, regs->sp, regs->fp);
+       pr_cont("  BTA: 0x%08lx\n", regs->bta);
+       pr_info("BLK: %pS\n SP: 0x%08lx  FP: 0x%08lx\n",
+               (void *)regs->blink, regs->sp, regs->fp);
        pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
               regs->lp_start, regs->lp_end, regs->lp_count);
 
index db857d0..1fc32b6 100644 (file)
@@ -307,13 +307,15 @@ endif
 ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
 prepare: stack_protector_prepare
 stack_protector_prepare: prepare0
-       $(eval KBUILD_CFLAGS += \
+       $(eval SSP_PLUGIN_CFLAGS := \
                -fplugin-arg-arm_ssp_per_task_plugin-tso=$(shell        \
                        awk '{if ($$2 == "THREAD_SZ_ORDER") print $$3;}'\
                                include/generated/asm-offsets.h)        \
                -fplugin-arg-arm_ssp_per_task_plugin-offset=$(shell     \
                        awk '{if ($$2 == "TI_STACK_CANARY") print $$3;}'\
                                include/generated/asm-offsets.h))
+       $(eval KBUILD_CFLAGS += $(SSP_PLUGIN_CFLAGS))
+       $(eval GCC_PLUGINS_CFLAGS += $(SSP_PLUGIN_CFLAGS))
 endif
 
 all:   $(notdir $(KBUILD_IMAGE))
index da599c3..9c11e74 100644 (file)
@@ -101,7 +101,6 @@ clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S \
                $(libfdt) $(libfdt_hdrs) hyp-stub.S
 
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS += $(DISABLE_ARM_SSP_PER_TASK_PLUGIN)
 
 ifeq ($(CONFIG_FUNCTION_TRACER),y)
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
@@ -117,7 +116,8 @@ CFLAGS_fdt_ro.o := $(nossp-flags-y)
 CFLAGS_fdt_rw.o := $(nossp-flags-y)
 CFLAGS_fdt_wip.o := $(nossp-flags-y)
 
-ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
+ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin \
+            -I$(obj) $(DISABLE_ARM_SSP_PER_TASK_PLUGIN)
 asflags-y := -DZIMAGE
 
 # Supply kernel BSS size to the decompressor via a linker symbol.
index b75af21..4c3f606 100644 (file)
 &sdhci {
        #address-cells = <1>;
        #size-cells = <0>;
+       pinctrl-names = "default";
        pinctrl-0 = <&emmc_gpio34 &gpclk2_gpio43>;
        bus-width = <4>;
        mmc-pwrseq = <&wifi_pwrseq>;
index 394c8a7..fd2c766 100644 (file)
@@ -15,6 +15,7 @@
                firmware: firmware {
                        compatible = "raspberrypi,bcm2835-firmware", "simple-bus";
                        mboxes = <&mailbox>;
+                       dma-ranges;
                };
 
                power: power {
index 3931fb0..91d1018 100644 (file)
 
 &cpsw_emac0 {
        phy-handle = <&ethphy0>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
 };
 
 &cpsw_emac1 {
        phy-handle = <&ethphy1>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
 };
 
 &davinci_mdio {
index 9e43d5e..79ccdd4 100644 (file)
 
 &cpsw_emac0 {
        phy-handle = <&ethphy0>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
 };
 
 &cpsw_emac1 {
        phy-handle = <&ethphy1>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
 };
 
 &davinci_mdio {
index 861ab90..c16e183 100644 (file)
 
 &cpsw_emac0 {
        phy-handle = <&ethphy0>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
 };
 
 &cpsw_emac1 {
        phy-handle = <&ethphy1>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
 };
 
 &davinci_mdio {
index 4305051..5f5ee16 100644 (file)
                #address-cells = <1>;
                #size-cells = <1>;
                ranges = <0x0 0x0 0x0 0xc0000000>;
+               dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>;
                ti,hwmods = "l3_main_1", "l3_main_2";
                reg = <0x0 0x44000000 0x0 0x1000000>,
                      <0x0 0x45000000 0x0 0x1000>;
index 31719c0..44f9754 100644 (file)
@@ -33,7 +33,7 @@
                };
        };
 
-       lcd_vdd3_reg: voltage-regulator-6 {
+       lcd_vdd3_reg: voltage-regulator-7 {
                compatible = "regulator-fixed";
                regulator-name = "LCD_VDD_2.2V";
                regulator-min-microvolt = <2200000>;
@@ -42,7 +42,7 @@
                enable-active-high;
        };
 
-       ps_als_reg: voltage-regulator-7 {
+       ps_als_reg: voltage-regulator-8 {
                compatible = "regulator-fixed";
                regulator-name = "LED_A_3.0V";
                regulator-min-microvolt = <3000000>;
index 98cd128..4189e1f 100644 (file)
@@ -13,7 +13,7 @@
 
        /* bootargs are passed in by bootloader */
 
-       cam_vdda_reg: voltage-regulator-6 {
+       cam_vdda_reg: voltage-regulator-7 {
                compatible = "regulator-fixed";
                regulator-name = "CAM_SENSOR_CORE_1.2V";
                regulator-min-microvolt = <1200000>;
index 4d18952..77d8713 100644 (file)
                regulators {
                        vdd_arm: buck1 {
                                regulator-name = "vdd_arm";
-                               regulator-min-microvolt = <730000>;
+                               regulator-min-microvolt = <925000>;
                                regulator-max-microvolt = <1380000>;
                                regulator-initial-mode = <DA9063_BUCK_MODE_SYNC>;
                                regulator-always-on;
 
                        vdd_soc: buck2 {
                                regulator-name = "vdd_soc";
-                               regulator-min-microvolt = <730000>;
+                               regulator-min-microvolt = <1150000>;
                                regulator-max-microvolt = <1380000>;
                                regulator-initial-mode = <DA9063_BUCK_MODE_SYNC>;
                                regulator-always-on;
index b6e82b1..9067e0e 100644 (file)
                reset-gpios = <&gpio6 13 GPIO_ACTIVE_HIGH>; /* gpio173 */
 
                /* gpio_183 with sys_nirq2 pad as wakeup */
-               interrupts-extended = <&gpio6 23 IRQ_TYPE_EDGE_FALLING>,
+               interrupts-extended = <&gpio6 23 IRQ_TYPE_LEVEL_LOW>,
                                      <&omap4_pmx_core 0x160>;
                interrupt-names = "irq", "wakeup";
                wakeup-source;
index c3c6d7d..4089d97 100644 (file)
                compatible = "ti,omap2-onenand";
                reg = <0 0 0x20000>;    /* CS0, offset 0, IO size 128K */
 
+               /*
+                * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported
+                * bootloader set values when booted with v5.1
+                * (OneNAND Manufacturer: Samsung):
+                *
+                *   cs0 GPMC_CS_CONFIG1: 0xfb001202
+                *   cs0 GPMC_CS_CONFIG2: 0x00111100
+                *   cs0 GPMC_CS_CONFIG3: 0x00020200
+                *   cs0 GPMC_CS_CONFIG4: 0x11001102
+                *   cs0 GPMC_CS_CONFIG5: 0x03101616
+                *   cs0 GPMC_CS_CONFIG6: 0x90060000
+                */
                gpmc,sync-read;
                gpmc,sync-write;
                gpmc,burst-length = <16>;
                gpmc,burst-read;
                gpmc,burst-wrap;
                gpmc,burst-write;
-               gpmc,device-width = <2>; /* GPMC_DEVWIDTH_16BIT */
-               gpmc,mux-add-data = <2>; /* GPMC_MUX_AD */
+               gpmc,device-width = <2>;
+               gpmc,mux-add-data = <2>;
                gpmc,cs-on-ns = <0>;
-               gpmc,cs-rd-off-ns = <87>;
-               gpmc,cs-wr-off-ns = <87>;
+               gpmc,cs-rd-off-ns = <102>;
+               gpmc,cs-wr-off-ns = <102>;
                gpmc,adv-on-ns = <0>;
-               gpmc,adv-rd-off-ns = <10>;
-               gpmc,adv-wr-off-ns = <10>;
-               gpmc,oe-on-ns = <15>;
-               gpmc,oe-off-ns = <87>;
+               gpmc,adv-rd-off-ns = <12>;
+               gpmc,adv-wr-off-ns = <12>;
+               gpmc,oe-on-ns = <12>;
+               gpmc,oe-off-ns = <102>;
                gpmc,we-on-ns = <0>;
-               gpmc,we-off-ns = <87>;
-               gpmc,rd-cycle-ns = <112>;
-               gpmc,wr-cycle-ns = <112>;
-               gpmc,access-ns = <81>;
-               gpmc,page-burst-access-ns = <15>;
+               gpmc,we-off-ns = <102>;
+               gpmc,rd-cycle-ns = <132>;
+               gpmc,wr-cycle-ns = <132>;
+               gpmc,access-ns = <96>;
+               gpmc,page-burst-access-ns = <18>;
                gpmc,bus-turnaround-ns = <0>;
                gpmc,cycle2cycle-delay-ns = <0>;
                gpmc,wait-monitoring-ns = <0>;
-               gpmc,clk-activation-ns = <5>;
-               gpmc,wr-data-mux-bus-ns = <30>;
-               gpmc,wr-access-ns = <81>;
+               gpmc,clk-activation-ns = <6>;
+               gpmc,wr-data-mux-bus-ns = <36>;
+               gpmc,wr-access-ns = <96>;
                gpmc,sync-clk-ps = <15000>;
 
                /*
index d0ecf54..a7562d3 100644 (file)
                #address-cells = <1>;
                #size-cells = <1>;
                ranges = <0 0 0 0xc0000000>;
+               dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>;
                ti,hwmods = "l3_main_1", "l3_main_2", "l3_main_3";
                reg = <0 0x44000000 0 0x2000>,
                      <0 0x44800000 0 0x3000>,
index 9f6c2b6..0755e58 100644 (file)
                                        interrupt-controller;
                                        reg = <0 0x200>;
                                        #interrupt-cells = <1>;
-                                       valid-mask = <0xFFFFFFFF>;
-                                       clear-mask = <0>;
+                                       valid-mask = <0xffffffff>;
+                                       clear-mask = <0xffffffff>;
                                };
 
                                timer0: timer@200 {
index c9b3277..90846a7 100644 (file)
                                        reg = <0 0x200>;
                                        interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
                                        #interrupt-cells = <1>;
-                                       valid-mask = <0xFFFFFFFF>;
-                                       clear-mask = <0>;
+                                       valid-mask = <0xffffffff>;
+                                       clear-mask = <0xffffffff>;
                                };
 
                                timer0: timer@200 {
index 1532a0e..a2c37ad 100644 (file)
                };
 
                crypto: crypto-engine@1c15000 {
-                       compatible = "allwinner,sun4i-a10-crypto";
+                       compatible = "allwinner,sun8i-a33-crypto";
                        reg = <0x01c15000 0x1000>;
                        interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&ccu CLK_BUS_SS>, <&ccu CLK_SS>;
index 2fd31a0..e8b3669 100644 (file)
 };
 
 &reg_dldo3 {
-       regulator-min-microvolt = <2800000>;
-       regulator-max-microvolt = <2800000>;
+       regulator-min-microvolt = <1800000>;
+       regulator-max-microvolt = <1800000>;
        regulator-name = "vdd-csi";
 };
 
 };
 
 &usbphy {
-       usb0_id_det-gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; /* PH11 */
+       usb0_id_det-gpios = <&pio 7 11 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH11 */
+       usb0_vbus_power-supply = <&usb_power_supply>;
        usb0_vbus-supply = <&reg_drivevbus>;
        usb1_vbus-supply = <&reg_vmain>;
        usb2_vbus-supply = <&reg_vmain>;
index 74ac7ee..e7b9bef 100644 (file)
                        reg = <0x01c30000 0x104>;
                        interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "macirq";
-                       resets = <&ccu CLK_BUS_EMAC>;
-                       reset-names = "stmmaceth";
-                       clocks = <&ccu RST_BUS_EMAC>;
+                       clocks = <&ccu CLK_BUS_EMAC>;
                        clock-names = "stmmaceth";
+                       resets = <&ccu RST_BUS_EMAC>;
+                       reset-names = "stmmaceth";
                        status = "disabled";
 
                        mdio: mdio {
index 8f09a24..a9d5d6d 100644 (file)
                        interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
                };
 
+               spi0: spi@1c05000 {
+                       compatible = "allwinner,sun8i-r40-spi",
+                                    "allwinner,sun8i-h3-spi";
+                       reg = <0x01c05000 0x1000>;
+                       interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&ccu CLK_BUS_SPI0>, <&ccu CLK_SPI0>;
+                       clock-names = "ahb", "mod";
+                       resets = <&ccu RST_BUS_SPI0>;
+                       status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+               };
+
+               spi1: spi@1c06000 {
+                       compatible = "allwinner,sun8i-r40-spi",
+                                    "allwinner,sun8i-h3-spi";
+                       reg = <0x01c06000 0x1000>;
+                       interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&ccu CLK_BUS_SPI1>, <&ccu CLK_SPI1>;
+                       clock-names = "ahb", "mod";
+                       resets = <&ccu RST_BUS_SPI1>;
+                       status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+               };
+
                csi0: csi@1c09000 {
                        compatible = "allwinner,sun8i-r40-csi0",
                                     "allwinner,sun7i-a20-csi0";
                        resets = <&ccu RST_BUS_CE>;
                };
 
+               spi2: spi@1c17000 {
+                       compatible = "allwinner,sun8i-r40-spi",
+                                    "allwinner,sun8i-h3-spi";
+                       reg = <0x01c17000 0x1000>;
+                       interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&ccu CLK_BUS_SPI2>, <&ccu CLK_SPI2>;
+                       clock-names = "ahb", "mod";
+                       resets = <&ccu RST_BUS_SPI2>;
+                       status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+               };
+
+               ahci: sata@1c18000 {
+                       compatible = "allwinner,sun8i-r40-ahci";
+                       reg = <0x01c18000 0x1000>;
+                       interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&ccu CLK_BUS_SATA>, <&ccu CLK_SATA>;
+                       resets = <&ccu RST_BUS_SATA>;
+                       reset-names = "ahci";
+                       status = "disabled";
+               };
+
                ehci1: usb@1c19000 {
                        compatible = "allwinner,sun8i-r40-ehci", "generic-ehci";
                        reg = <0x01c19000 0x100>;
                        status = "disabled";
                };
 
+               spi3: spi@1c1f000 {
+                       compatible = "allwinner,sun8i-r40-spi",
+                                    "allwinner,sun8i-h3-spi";
+                       reg = <0x01c1f000 0x1000>;
+                       interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&ccu CLK_BUS_SPI3>, <&ccu CLK_SPI3>;
+                       clock-names = "ahb", "mod";
+                       resets = <&ccu RST_BUS_SPI3>;
+                       status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+               };
+
                ccu: clock@1c20000 {
                        compatible = "allwinner,sun8i-r40-ccu";
                        reg = <0x01c20000 0x400>;
                        #size-cells = <0>;
                };
 
-               spi0: spi@1c05000 {
-                       compatible = "allwinner,sun8i-r40-spi",
-                                    "allwinner,sun8i-h3-spi";
-                       reg = <0x01c05000 0x1000>;
-                       interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&ccu CLK_BUS_SPI0>, <&ccu CLK_SPI0>;
-                       clock-names = "ahb", "mod";
-                       resets = <&ccu RST_BUS_SPI0>;
-                       status = "disabled";
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-               };
-
-               spi1: spi@1c06000 {
-                       compatible = "allwinner,sun8i-r40-spi",
-                                    "allwinner,sun8i-h3-spi";
-                       reg = <0x01c06000 0x1000>;
-                       interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&ccu CLK_BUS_SPI1>, <&ccu CLK_SPI1>;
-                       clock-names = "ahb", "mod";
-                       resets = <&ccu RST_BUS_SPI1>;
-                       status = "disabled";
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-               };
-
-               spi2: spi@1c07000 {
-                       compatible = "allwinner,sun8i-r40-spi",
-                                    "allwinner,sun8i-h3-spi";
-                       reg = <0x01c07000 0x1000>;
-                       interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&ccu CLK_BUS_SPI2>, <&ccu CLK_SPI2>;
-                       clock-names = "ahb", "mod";
-                       resets = <&ccu RST_BUS_SPI2>;
-                       status = "disabled";
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-               };
-
-               spi3: spi@1c0f000 {
-                       compatible = "allwinner,sun8i-r40-spi",
-                                    "allwinner,sun8i-h3-spi";
-                       reg = <0x01c0f000 0x1000>;
-                       interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&ccu CLK_BUS_SPI3>, <&ccu CLK_SPI3>;
-                       clock-names = "ahb", "mod";
-                       resets = <&ccu RST_BUS_SPI3>;
-                       status = "disabled";
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-               };
-
-               ahci: sata@1c18000 {
-                       compatible = "allwinner,sun8i-r40-ahci";
-                       reg = <0x01c18000 0x1000>;
-                       interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&ccu CLK_BUS_SATA>, <&ccu CLK_SATA>;
-                       resets = <&ccu RST_BUS_SATA>;
-                       reset-names = "ahci";
-                       status = "disabled";
-
-               };
-
                gmac: ethernet@1c50000 {
                        compatible = "allwinner,sun8i-r40-gmac";
                        syscon = <&ccu>;
index c89ac1b..e0330a2 100644 (file)
@@ -95,6 +95,8 @@ static bool __init cntvct_functional(void)
         */
        np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
        if (!np)
+               np = of_find_compatible_node(NULL, NULL, "arm,armv8-timer");
+       if (!np)
                goto out_put;
 
        if (of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
index 95b2e1c..f8016e3 100644 (file)
@@ -118,7 +118,7 @@ ENTRY(arm_copy_from_user)
 
 ENDPROC(arm_copy_from_user)
 
-       .pushsection .fixup,"ax"
+       .pushsection .text.fixup,"ax"
        .align 0
        copy_abort_preamble
        ldmfd   sp!, {r1, r2, r3}
index 0bf375e..55b71bb 100644 (file)
@@ -53,7 +53,7 @@
                 * PSCI node is not added default, U-boot will add missing
                 * parts if it determines to use PSCI.
                 */
-               entry-method = "arm,psci";
+               entry-method = "psci";
 
                CPU_PW20: cpu-pw20 {
                          compatible = "arm,idle-state";
index 6082ae0..d237162 100644 (file)
@@ -20,6 +20,8 @@
 };
 
 &fman0 {
+       fsl,erratum-a050385;
+
        /* these aliases provide the FMan ports mapping */
        enet0: ethernet@e0000 {
        };
index 4223a23..dde50c8 100644 (file)
 
        ethernet@e4000 {
                phy-handle = <&rgmii_phy1>;
-               phy-connection-type = "rgmii-txid";
+               phy-connection-type = "rgmii-id";
        };
 
        ethernet@e6000 {
                phy-handle = <&rgmii_phy2>;
-               phy-connection-type = "rgmii-txid";
+               phy-connection-type = "rgmii-id";
        };
 
        ethernet@e8000 {
index dbc23d6..d53ccc5 100644 (file)
 &fman0 {
        ethernet@e4000 {
                phy-handle = <&rgmii_phy1>;
-               phy-connection-type = "rgmii";
+               phy-connection-type = "rgmii-id";
        };
 
        ethernet@e6000 {
                phy-handle = <&rgmii_phy2>;
-               phy-connection-type = "rgmii";
+               phy-connection-type = "rgmii-id";
        };
 
        ethernet@e8000 {
index cd80756..2c590ca 100644 (file)
        };
 
        idle-states {
-               entry-method = "arm,psci";
+               entry-method = "psci";
                CORE_PD: core-pd {
                        compatible = "arm,idle-state";
                        entry-latency-us = <4000>;
index c1f9660..37ca3e8 100644 (file)
@@ -55,10 +55,10 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
                        break;
                }
                chacha_4block_xor_neon(state, dst, src, nrounds, l);
-               bytes -= CHACHA_BLOCK_SIZE * 5;
-               src += CHACHA_BLOCK_SIZE * 5;
-               dst += CHACHA_BLOCK_SIZE * 5;
-               state[12] += 5;
+               bytes -= l;
+               src += l;
+               dst += l;
+               state[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE);
        }
 }
 
index 324e7d5..5e5dc05 100644 (file)
@@ -221,7 +221,7 @@ alternative_endif
 
 .macro user_alt, label, oldinstr, newinstr, cond
 9999:  alternative_insn "\oldinstr", "\newinstr", \cond
-       _ASM_EXTABLE 9999b, \label
+       _asm_extable 9999b, \label
 .endm
 
 /*
index e4d8624..d79ce6d 100644 (file)
@@ -29,11 +29,9 @@ typedef struct {
  */
 #define ASID(mm)       ((mm)->context.id.counter & 0xffff)
 
-extern bool arm64_use_ng_mappings;
-
 static inline bool arm64_kernel_unmapped_at_el0(void)
 {
-       return arm64_use_ng_mappings;
+       return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
 }
 
 typedef void (*bp_hardening_cb_t)(void);
index 6f87839..1305e28 100644 (file)
 
 #include <asm/pgtable-types.h>
 
+extern bool arm64_use_ng_mappings;
+
 #define _PROT_DEFAULT          (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
 #define _PROT_SECT_DEFAULT     (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
 
-#define PTE_MAYBE_NG           (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
-#define PMD_MAYBE_NG           (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
+#define PTE_MAYBE_NG           (arm64_use_ng_mappings ? PTE_NG : 0)
+#define PMD_MAYBE_NG           (arm64_use_ng_mappings ? PMD_SECT_NG : 0)
 
 #define PROT_DEFAULT           (_PROT_DEFAULT | PTE_MAYBE_NG)
 #define PROT_SECT_DEFAULT      (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
index 1dd22da..803039d 100644 (file)
@@ -25,8 +25,8 @@
 #define __NR_compat_gettimeofday       78
 #define __NR_compat_sigreturn          119
 #define __NR_compat_rt_sigreturn       173
-#define __NR_compat_clock_getres       247
 #define __NR_compat_clock_gettime      263
+#define __NR_compat_clock_getres       264
 #define __NR_compat_clock_gettime64    403
 #define __NR_compat_clock_getres_time64        406
 
index d4ed9a1..5407bf5 100644 (file)
@@ -958,11 +958,22 @@ void tick_broadcast(const struct cpumask *mask)
 }
 #endif
 
+/*
+ * The number of CPUs online, not counting this CPU (which may not be
+ * fully online and so not counted in num_online_cpus()).
+ */
+static inline unsigned int num_other_online_cpus(void)
+{
+       unsigned int this_cpu_online = cpu_online(smp_processor_id());
+
+       return num_online_cpus() - this_cpu_online;
+}
+
 void smp_send_stop(void)
 {
        unsigned long timeout;
 
-       if (num_online_cpus() > 1) {
+       if (num_other_online_cpus()) {
                cpumask_t mask;
 
                cpumask_copy(&mask, cpu_online_mask);
@@ -975,10 +986,10 @@ void smp_send_stop(void)
 
        /* Wait up to one second for other CPUs to stop */
        timeout = USEC_PER_SEC;
-       while (num_online_cpus() > 1 && timeout--)
+       while (num_other_online_cpus() && timeout--)
                udelay(1);
 
-       if (num_online_cpus() > 1)
+       if (num_other_online_cpus())
                pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
                        cpumask_pr_args(cpu_online_mask));
 
@@ -1001,7 +1012,11 @@ void crash_smp_send_stop(void)
 
        cpus_stopped = 1;
 
-       if (num_online_cpus() == 1) {
+       /*
+        * If this cpu is the only one alive at this point in time, online or
+        * not, there are no stop messages to be sent around, so just back out.
+        */
+       if (num_other_online_cpus() == 0) {
                sdei_mask_local_cpu();
                return;
        }
@@ -1009,7 +1024,7 @@ void crash_smp_send_stop(void)
        cpumask_copy(&mask, cpu_online_mask);
        cpumask_clear_cpu(smp_processor_id(), &mask);
 
-       atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+       atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
 
        pr_crit("SMP: stopping secondary CPUs\n");
        smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
index 37b9316..c340f94 100644 (file)
@@ -4,6 +4,8 @@
 #include "jz4780.dtsi"
 #include <dt-bindings/clock/ingenic,tcu.h>
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/regulator/active-semi,8865-regulator.h>
 
 / {
        compatible = "img,ci20", "ingenic,jz4780";
 
                regulators {
                        vddcore: SUDCDC1 {
-                               regulator-name = "VDDCORE";
+                               regulator-name = "DCDC_REG1";
                                regulator-min-microvolt = <1100000>;
                                regulator-max-microvolt = <1100000>;
                                regulator-always-on;
                        };
                        vddmem: SUDCDC2 {
-                               regulator-name = "VDDMEM";
+                               regulator-name = "DCDC_REG2";
                                regulator-min-microvolt = <1500000>;
                                regulator-max-microvolt = <1500000>;
                                regulator-always-on;
                        };
                        vcc_33: SUDCDC3 {
-                               regulator-name = "VCC33";
+                               regulator-name = "DCDC_REG3";
                                regulator-min-microvolt = <3300000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-always-on;
                        };
                        vcc_50: SUDCDC4 {
-                               regulator-name = "VCC50";
+                               regulator-name = "SUDCDC_REG4";
                                regulator-min-microvolt = <5000000>;
                                regulator-max-microvolt = <5000000>;
                                regulator-always-on;
                        };
                        vcc_25: LDO_REG5 {
-                               regulator-name = "VCC25";
+                               regulator-name = "LDO_REG5";
                                regulator-min-microvolt = <2500000>;
                                regulator-max-microvolt = <2500000>;
                                regulator-always-on;
                        };
                        wifi_io: LDO_REG6 {
-                               regulator-name = "WIFIIO";
+                               regulator-name = "LDO_REG6";
                                regulator-min-microvolt = <2500000>;
                                regulator-max-microvolt = <2500000>;
                                regulator-always-on;
                        };
                        vcc_28: LDO_REG7 {
-                               regulator-name = "VCC28";
+                               regulator-name = "LDO_REG7";
                                regulator-min-microvolt = <2800000>;
                                regulator-max-microvolt = <2800000>;
                                regulator-always-on;
                        };
                        vcc_15: LDO_REG8 {
-                               regulator-name = "VCC15";
+                               regulator-name = "LDO_REG8";
                                regulator-min-microvolt = <1500000>;
                                regulator-max-microvolt = <1500000>;
                                regulator-always-on;
                        };
-                       vcc_18: LDO_REG9 {
-                               regulator-name = "VCC18";
-                               regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <1800000>;
+                       vrtc_18: LDO_REG9 {
+                               regulator-name = "LDO_REG9";
+                               /* Despite the datasheet stating 3.3V
+                                * for REG9 and the driver expecting that,
+                                * REG9 outputs 1.8V.
+                                * Likely the CI20 uses a proprietary
+                                * factory programmed chip variant.
+                                * Since this is a simple on/off LDO the
+                                * exact values do not matter.
+                                */
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3300000>;
                                regulator-always-on;
                        };
                        vcc_11: LDO_REG10 {
-                               regulator-name = "VCC11";
-                               regulator-min-microvolt = <1100000>;
-                               regulator-max-microvolt = <1100000>;
+                               regulator-name = "LDO_REG10";
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <1200000>;
                                regulator-always-on;
                        };
                };
                rtc@51 {
                        compatible = "nxp,pcf8563";
                        reg = <0x51>;
-                       interrupts = <110>;
+
+                       interrupt-parent = <&gpf>;
+                       interrupts = <30 IRQ_TYPE_LEVEL_LOW>;
                };
 };
 
index 1ac2752..a7b469d 100644 (file)
@@ -605,7 +605,8 @@ static void __init bootcmdline_init(char **cmdline_p)
         * If we're configured to take boot arguments from DT, look for those
         * now.
         */
-       if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB))
+       if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
+           IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
                of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
 #endif
 
index 71034b5..3801a2e 100644 (file)
@@ -79,6 +79,11 @@ config MMU
 config STACK_GROWSUP
        def_bool y
 
+config ARCH_DEFCONFIG
+       string
+       default "arch/parisc/configs/generic-32bit_defconfig" if !64BIT
+       default "arch/parisc/configs/generic-64bit_defconfig" if 64BIT
+
 config GENERIC_LOCKBREAK
        bool
        default y
index dca8f2d..628cd8b 100644 (file)
@@ -34,6 +34,13 @@ CC_ARCHES    = hppa hppa2.0 hppa1.1
 LD_BFD         := elf32-hppa-linux
 endif
 
+# select defconfig based on actual architecture
+ifeq ($(shell uname -m),parisc64)
+       KBUILD_DEFCONFIG := generic-64bit_defconfig
+else
+       KBUILD_DEFCONFIG := generic-32bit_defconfig
+endif
+
 export LD_BFD
 
 ifneq ($(SUBARCH),$(UTS_MACHINE))
index 729a0f1..db3a873 100644 (file)
@@ -1817,6 +1817,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
 {
        struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
 
+       kvmppc_mmu_destroy_pr(vcpu);
        free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        kfree(vcpu->arch.shadow_vcpu);
index 1af96fb..302e9dc 100644 (file)
@@ -759,7 +759,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        return 0;
 
 out_vcpu_uninit:
-       kvmppc_mmu_destroy(vcpu);
        kvmppc_subarch_vcpu_uninit(vcpu);
        return err;
 }
@@ -792,7 +791,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 
        kvmppc_core_vcpu_free(vcpu);
 
-       kvmppc_mmu_destroy(vcpu);
        kvmppc_subarch_vcpu_uninit(vcpu);
 }
 
index db5664d..d2bed3f 100644 (file)
@@ -120,12 +120,6 @@ static void __init kasan_unmap_early_shadow_vmalloc(void)
        unsigned long k_cur;
        phys_addr_t pa = __pa(kasan_early_shadow_page);
 
-       if (!early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
-               int ret = kasan_init_shadow_page_tables(k_start, k_end);
-
-               if (ret)
-                       panic("kasan: kasan_init_shadow_page_tables() failed");
-       }
        for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
                pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
                pte_t *ptep = pte_offset_kernel(pmd, k_cur);
@@ -143,7 +137,8 @@ void __init kasan_mmu_init(void)
        int ret;
        struct memblock_region *reg;
 
-       if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+       if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ||
+           IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
                ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
                if (ret)
index 1a3b5a5..cd5db57 100644 (file)
@@ -50,7 +50,6 @@ config RISCV
        select PCI_DOMAINS_GENERIC if PCI
        select PCI_MSI if PCI
        select RISCV_TIMER
-       select UACCESS_MEMCPY if !MMU
        select GENERIC_IRQ_MULTI_HANDLER
        select GENERIC_ARCH_TOPOLOGY if SMP
        select ARCH_HAS_PTE_SPECIAL
index 3078b2d..a131174 100644 (file)
@@ -12,20 +12,6 @@ config SOC_SIFIVE
 
 config SOC_VIRT
        bool "QEMU Virt Machine"
-       select VIRTIO_PCI
-       select VIRTIO_BALLOON
-       select VIRTIO_MMIO
-       select VIRTIO_CONSOLE
-       select VIRTIO_NET
-       select NET_9P_VIRTIO
-       select VIRTIO_BLK
-       select SCSI_VIRTIO
-       select DRM_VIRTIO_GPU
-       select HW_RANDOM_VIRTIO
-       select RPMSG_CHAR
-       select RPMSG_VIRTIO
-       select CRYPTO_DEV_VIRTIO
-       select VIRTIO_INPUT
        select POWER_RESET_SYSCON
        select POWER_RESET_SYSCON_POWEROFF
        select GOLDFISH
index c8f0842..2557c53 100644 (file)
@@ -31,6 +31,7 @@ CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NETLINK_DIAG=y
 CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
 CONFIG_PCI=y
 CONFIG_PCIEPORTBUS=y
 CONFIG_PCI_HOST_GENERIC=y
@@ -38,12 +39,15 @@ CONFIG_PCIE_XILINX=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
+CONFIG_SCSI_VIRTIO=y
 CONFIG_ATA=y
 CONFIG_SATA_AHCI=y
 CONFIG_SATA_AHCI_PLATFORM=y
 CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
 CONFIG_MACB=y
 CONFIG_E1000E=y
 CONFIG_R8169=y
@@ -54,13 +58,16 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
 CONFIG_HVC_RISCV_SBI=y
+CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_VIRTIO=y
 CONFIG_SPI=y
 CONFIG_SPI_SIFIVE=y
 # CONFIG_PTP_1588_CLOCK is not set
 CONFIG_POWER_RESET=y
 CONFIG_DRM=y
 CONFIG_DRM_RADEON=y
+CONFIG_DRM_VIRTIO_GPU=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
@@ -74,6 +81,12 @@ CONFIG_USB_UAS=y
 CONFIG_MMC=y
 CONFIG_MMC_SPI=y
 CONFIG_RTC_CLASS=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_INPUT=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_VIRTIO=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_AUTOFS4_FS=y
@@ -88,16 +101,17 @@ CONFIG_NFS_V4_2=y
 CONFIG_ROOT_NFS=y
 CONFIG_9P_FS=y
 CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SCHED_STACK_END_CHECK=y
 CONFIG_DEBUG_VM=y
 CONFIG_DEBUG_VM_PGFLAGS=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_DEBUG_PER_CPU_MAPS=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_WQ_WATCHDOG=y
-CONFIG_SCHED_STACK_END_CHECK=y
 CONFIG_DEBUG_TIMEKEEPING=y
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_DEBUG_SPINLOCK=y
index a844920..0292879 100644 (file)
@@ -31,6 +31,7 @@ CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NETLINK_DIAG=y
 CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
 CONFIG_PCI=y
 CONFIG_PCIEPORTBUS=y
 CONFIG_PCI_HOST_GENERIC=y
@@ -38,12 +39,15 @@ CONFIG_PCIE_XILINX=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
+CONFIG_SCSI_VIRTIO=y
 CONFIG_ATA=y
 CONFIG_SATA_AHCI=y
 CONFIG_SATA_AHCI_PLATFORM=y
 CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
 CONFIG_MACB=y
 CONFIG_E1000E=y
 CONFIG_R8169=y
@@ -54,11 +58,14 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
 CONFIG_HVC_RISCV_SBI=y
+CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_VIRTIO=y
 # CONFIG_PTP_1588_CLOCK is not set
 CONFIG_POWER_RESET=y
 CONFIG_DRM=y
 CONFIG_DRM_RADEON=y
+CONFIG_DRM_VIRTIO_GPU=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
@@ -70,6 +77,12 @@ CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_UAS=y
 CONFIG_RTC_CLASS=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_INPUT=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_VIRTIO=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_AUTOFS4_FS=y
@@ -84,16 +97,17 @@ CONFIG_NFS_V4_2=y
 CONFIG_ROOT_NFS=y
 CONFIG_9P_FS=y
 CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SCHED_STACK_END_CHECK=y
 CONFIG_DEBUG_VM=y
 CONFIG_DEBUG_VM_PGFLAGS=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_DEBUG_PER_CPU_MAPS=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_WQ_WATCHDOG=y
-CONFIG_SCHED_STACK_END_CHECK=y
 CONFIG_DEBUG_TIMEKEEPING=y
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_DEBUG_SPINLOCK=y
index 6eaa2ee..a279b17 100644 (file)
@@ -15,12 +15,12 @@ static inline void clint_send_ipi_single(unsigned long hartid)
        writel(1, clint_ipi_base + hartid);
 }
 
-static inline void clint_send_ipi_mask(const struct cpumask *hartid_mask)
+static inline void clint_send_ipi_mask(const struct cpumask *mask)
 {
-       int hartid;
+       int cpu;
 
-       for_each_cpu(hartid, hartid_mask)
-               clint_send_ipi_single(hartid);
+       for_each_cpu(cpu, mask)
+               clint_send_ipi_single(cpuid_to_hartid_map(cpu));
 }
 
 static inline void clint_clear_ipi(unsigned long hartid)
index e430415..393f201 100644 (file)
 #include <asm/tlbflush.h>
 #include <linux/mm_types.h>
 
+#ifdef CONFIG_MMU
+
+#define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
+#define VMALLOC_END      (PAGE_OFFSET - 1)
+#define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
+
+#define BPF_JIT_REGION_SIZE    (SZ_128M)
+#define BPF_JIT_REGION_START   (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_END     (VMALLOC_END)
+
+/*
+ * Roughly size the vmemmap space to be large enough to fit enough
+ * struct pages to map half the virtual address space. Then
+ * position vmemmap directly below the VMALLOC region.
+ */
+#define VMEMMAP_SHIFT \
+       (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
+#define VMEMMAP_SIZE   BIT(VMEMMAP_SHIFT)
+#define VMEMMAP_END    (VMALLOC_START - 1)
+#define VMEMMAP_START  (VMALLOC_START - VMEMMAP_SIZE)
+
+/*
+ * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
+ * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
+ */
+#define vmemmap                ((struct page *)VMEMMAP_START)
+
+#define PCI_IO_SIZE      SZ_16M
+#define PCI_IO_END       VMEMMAP_START
+#define PCI_IO_START     (PCI_IO_END - PCI_IO_SIZE)
+
+#define FIXADDR_TOP      PCI_IO_START
+#ifdef CONFIG_64BIT
+#define FIXADDR_SIZE     PMD_SIZE
+#else
+#define FIXADDR_SIZE     PGDIR_SIZE
+#endif
+#define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)
+
+#endif
+
 #ifdef CONFIG_64BIT
 #include <asm/pgtable-64.h>
 #else
@@ -90,31 +131,6 @@ extern pgd_t swapper_pg_dir[];
 #define __S110 PAGE_SHARED_EXEC
 #define __S111 PAGE_SHARED_EXEC
 
-#define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
-#define VMALLOC_END      (PAGE_OFFSET - 1)
-#define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
-
-#define BPF_JIT_REGION_SIZE    (SZ_128M)
-#define BPF_JIT_REGION_START   (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
-#define BPF_JIT_REGION_END     (VMALLOC_END)
-
-/*
- * Roughly size the vmemmap space to be large enough to fit enough
- * struct pages to map half the virtual address space. Then
- * position vmemmap directly below the VMALLOC region.
- */
-#define VMEMMAP_SHIFT \
-       (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
-#define VMEMMAP_SIZE   BIT(VMEMMAP_SHIFT)
-#define VMEMMAP_END    (VMALLOC_START - 1)
-#define VMEMMAP_START  (VMALLOC_START - VMEMMAP_SIZE)
-
-/*
- * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
- * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
- */
-#define vmemmap                ((struct page *)VMEMMAP_START)
-
 static inline int pmd_present(pmd_t pmd)
 {
        return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
@@ -432,18 +448,6 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
 #define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
 
-#define PCI_IO_SIZE      SZ_16M
-#define PCI_IO_END       VMEMMAP_START
-#define PCI_IO_START     (PCI_IO_END - PCI_IO_SIZE)
-
-#define FIXADDR_TOP      PCI_IO_START
-#ifdef CONFIG_64BIT
-#define FIXADDR_SIZE     PMD_SIZE
-#else
-#define FIXADDR_SIZE     PGDIR_SIZE
-#endif
-#define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)
-
 /*
  * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
  * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
index f462a18..8ce9d60 100644 (file)
 /*
  * User space memory access functions
  */
+
+extern unsigned long __must_check __asm_copy_to_user(void __user *to,
+       const void *from, unsigned long n);
+extern unsigned long __must_check __asm_copy_from_user(void *to,
+       const void __user *from, unsigned long n);
+
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       return __asm_copy_from_user(to, from, n);
+}
+
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+       return __asm_copy_to_user(to, from, n);
+}
+
 #ifdef CONFIG_MMU
 #include <linux/errno.h>
 #include <linux/compiler.h>
@@ -367,24 +385,6 @@ do {                                                               \
                -EFAULT;                                        \
 })
 
-
-extern unsigned long __must_check __asm_copy_to_user(void __user *to,
-       const void *from, unsigned long n);
-extern unsigned long __must_check __asm_copy_from_user(void *to,
-       const void __user *from, unsigned long n);
-
-static inline unsigned long
-raw_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-       return __asm_copy_from_user(to, from, n);
-}
-
-static inline unsigned long
-raw_copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-       return __asm_copy_to_user(to, from, n);
-}
-
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
 extern long __must_check strlen_user(const char __user *str);
index eb878ab..e0a6293 100644 (file)
@@ -96,7 +96,7 @@ static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
        if (IS_ENABLED(CONFIG_RISCV_SBI))
                sbi_send_ipi(cpumask_bits(&hartid_mask));
        else
-               clint_send_ipi_mask(&hartid_mask);
+               clint_send_ipi_mask(mask);
 }
 
 static void send_ipi_single(int cpu, enum ipi_message_type op)
index 47e7a82..0d0db80 100644 (file)
@@ -2,5 +2,5 @@
 lib-y                  += delay.o
 lib-y                  += memcpy.o
 lib-y                  += memset.o
-lib-$(CONFIG_MMU)      += uaccess.o
+lib-y                  += uaccess.o
 lib-$(CONFIG_64BIT)    += tishift.o
index d7ff30e..c2e6d4b 100644 (file)
@@ -3268,7 +3268,10 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
        /* Initial reset is a superset of the normal reset */
        kvm_arch_vcpu_ioctl_normal_reset(vcpu);
 
-       /* this equals initial cpu reset in pop, but we don't switch to ESA */
+       /*
+        * This equals initial cpu reset in pop, but we don't switch to ESA.
+        * We do not only reset the internal data, but also ...
+        */
        vcpu->arch.sie_block->gpsw.mask = 0;
        vcpu->arch.sie_block->gpsw.addr = 0;
        kvm_s390_set_prefix(vcpu, 0);
@@ -3278,6 +3281,19 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
        memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
        vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
        vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
+
+       /* ... the data in sync regs */
+       memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
+       vcpu->run->s.regs.ckc = 0;
+       vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
+       vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
+       vcpu->run->psw_addr = 0;
+       vcpu->run->psw_mask = 0;
+       vcpu->run->s.regs.todpr = 0;
+       vcpu->run->s.regs.cputm = 0;
+       vcpu->run->s.regs.ckc = 0;
+       vcpu->run->s.regs.pp = 0;
+       vcpu->run->s.regs.gbea = 1;
        vcpu->run->s.regs.fpc = 0;
        vcpu->arch.sie_block->gbea = 1;
        vcpu->arch.sie_block->pp = 0;
index 94df086..513a555 100644 (file)
@@ -194,9 +194,10 @@ avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
 avx512_instr :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1)
 sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI=1)
 sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1)
+adx_instr := $(call as-instr,adox %r10$(comma)%r10,-DCONFIG_AS_ADX=1)
 
-KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr)
-KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr)
+KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr)
+KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr)
 
 KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
 
index b69e00b..8c2e9ea 100644 (file)
@@ -11,6 +11,7 @@ avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
 avx512_supported :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,yes,no)
 sha1_ni_supported :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,yes,no)
 sha256_ni_supported :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,yes,no)
+adx_supported := $(call as-instr,adox %r10$(comma)%r10,yes,no)
 
 obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o
 
@@ -39,7 +40,11 @@ obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o
 
 obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o
 obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o
-obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o
+
+# These modules require the assembler to support ADX.
+ifeq ($(adx_supported),yes)
+       obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o
+endif
 
 # These modules require assembler to support AVX.
 ifeq ($(avx_supported),yes)
index a6ea07f..4d867a7 100644 (file)
@@ -190,15 +190,12 @@ static int amd_uncore_event_init(struct perf_event *event)
 
        /*
         * NB and Last level cache counters (MSRs) are shared across all cores
-        * that share the same NB / Last level cache. Interrupts can be directed
-        * to a single target core, however, event counts generated by processes
-        * running on other cores cannot be masked out. So we do not support
-        * sampling and per-thread events.
+        * that share the same NB / Last level cache.  On family 16h and below,
+        * Interrupts can be directed to a single target core, however, event
+        * counts generated by processes running on other cores cannot be masked
+        * out. So we do not support sampling and per-thread events via
+        * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
         */
-       if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
-               return -EINVAL;
-
-       /* and we do not enable counter overflow interrupts */
        hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
        hwc->idx = -1;
 
@@ -306,7 +303,7 @@ static struct pmu amd_nb_pmu = {
        .start          = amd_uncore_start,
        .stop           = amd_uncore_stop,
        .read           = amd_uncore_read,
-       .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
+       .capabilities   = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
 };
 
 static struct pmu amd_llc_pmu = {
@@ -317,7 +314,7 @@ static struct pmu amd_llc_pmu = {
        .start          = amd_uncore_start,
        .stop           = amd_uncore_stop,
        .read           = amd_uncore_read,
-       .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
+       .capabilities   = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
 };
 
 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
index 2a8f2bd..c06e835 100644 (file)
@@ -360,7 +360,6 @@ struct x86_emulate_ctxt {
        u64 d;
        unsigned long _eip;
        struct operand memop;
-       /* Fields above regs are cleared together. */
        unsigned long _regs[NR_VCPU_REGS];
        struct operand *memopp;
        struct fetch_cache fetch;
index 2c5676b..48293d1 100644 (file)
@@ -838,13 +838,15 @@ static void free_moved_vector(struct apic_chip_data *apicd)
        bool managed = apicd->is_managed;
 
        /*
-        * This should never happen. Managed interrupts are not
-        * migrated except on CPU down, which does not involve the
-        * cleanup vector. But try to keep the accounting correct
-        * nevertheless.
+        * Managed interrupts are usually not migrated away
+        * from an online CPU, but CPU isolation 'managed_irq'
+        * can make that happen.
+        * 1) Activation does not take the isolation into account
+        *    to keep the code simple
+        * 2) Migration away from an isolated CPU can happen when
+        *    a non-isolated CPU which is in the calculated
+        *    affinity mask comes online.
         */
-       WARN_ON_ONCE(managed);
-
        trace_vector_free_moved(apicd->irq, cpu, vector, managed);
        irq_matrix_free(vector_matrix, cpu, vector, managed);
        per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
index 5627b10..f996ffb 100644 (file)
@@ -493,17 +493,18 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
                        return;
 
                if ((val & 3UL) == 1UL) {
-                       /* PPIN available but disabled: */
+                       /* PPIN locked in disabled mode */
                        return;
                }
 
-               /* If PPIN is disabled, but not locked, try to enable: */
-               if (!(val & 3UL)) {
+               /* If PPIN is disabled, try to enable */
+               if (!(val & 2UL)) {
                        wrmsrl_safe(MSR_PPIN_CTL,  val | 2UL);
                        rdmsrl_safe(MSR_PPIN_CTL, &val);
                }
 
-               if ((val & 3UL) == 2UL)
+               /* Is the enable bit set? */
+               if (val & 2UL)
                        set_cpu_cap(c, X86_FEATURE_INTEL_PPIN);
        }
 }
index 58b4ee3..f36dc07 100644 (file)
@@ -486,9 +486,14 @@ static int thermal_throttle_offline(unsigned int cpu)
 {
        struct thermal_state *state = &per_cpu(thermal_state, cpu);
        struct device *dev = get_cpu_device(cpu);
+       u32 l;
+
+       /* Mask the thermal vector before draining evtl. pending work */
+       l = apic_read(APIC_LVTTHMR);
+       apic_write(APIC_LVTTHMR, l | APIC_LVT_MASKED);
 
-       cancel_delayed_work(&state->package_throttle.therm_work);
-       cancel_delayed_work(&state->core_throttle.therm_work);
+       cancel_delayed_work_sync(&state->package_throttle.therm_work);
+       cancel_delayed_work_sync(&state->core_throttle.therm_work);
 
        state->package_throttle.rate_control_active = false;
        state->core_throttle.rate_control_active = false;
index 1bb4927..9fea075 100644 (file)
@@ -68,7 +68,7 @@ config KVM_WERROR
        depends on (X86_64 && !KASAN) || !COMPILE_TEST
        depends on EXPERT
        help
-         Add -Werror to the build flags for (and only for) i915.ko.
+         Add -Werror to the build flags for KVM.
 
          If in doubt, say "N".
 
index dd19fb3..bc00642 100644 (file)
@@ -5173,6 +5173,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
        ctxt->fetch.ptr = ctxt->fetch.data;
        ctxt->fetch.end = ctxt->fetch.data + insn_len;
        ctxt->opcode_len = 1;
+       ctxt->intercept = x86_intercept_none;
        if (insn_len > 0)
                memcpy(ctxt->fetch.data, insn, insn_len);
        else {
index 7668fed..750ff0b 100644 (file)
@@ -378,12 +378,15 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
                if (e->fields.delivery_mode == APIC_DM_FIXED) {
                        struct kvm_lapic_irq irq;
 
-                       irq.shorthand = APIC_DEST_NOSHORT;
                        irq.vector = e->fields.vector;
                        irq.delivery_mode = e->fields.delivery_mode << 8;
-                       irq.dest_id = e->fields.dest_id;
                        irq.dest_mode =
                            kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
+                       irq.level = false;
+                       irq.trig_mode = e->fields.trig_mode;
+                       irq.shorthand = APIC_DEST_NOSHORT;
+                       irq.dest_id = e->fields.dest_id;
+                       irq.msi_redir_hint = false;
                        bitmap_zero(&vcpu_bitmap, 16);
                        kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
                                                 &vcpu_bitmap);
index e3099c6..7356a56 100644 (file)
@@ -1445,6 +1445,8 @@ static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
        }
 }
 
+static void cancel_hv_timer(struct kvm_lapic *apic);
+
 static void apic_update_lvtt(struct kvm_lapic *apic)
 {
        u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
@@ -1454,6 +1456,10 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
                if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
                                APIC_LVT_TIMER_TSCDEADLINE)) {
                        hrtimer_cancel(&apic->lapic_timer.timer);
+                       preempt_disable();
+                       if (apic->lapic_timer.hv_timer_in_use)
+                               cancel_hv_timer(apic);
+                       preempt_enable();
                        kvm_lapic_set_reg(apic, APIC_TMICT, 0);
                        apic->lapic_timer.period = 0;
                        apic->lapic_timer.tscdeadline = 0;
@@ -1715,7 +1721,7 @@ static void start_sw_period(struct kvm_lapic *apic)
 
        hrtimer_start(&apic->lapic_timer.timer,
                apic->lapic_timer.target_expiration,
-               HRTIMER_MODE_ABS);
+               HRTIMER_MODE_ABS_HARD);
 }
 
 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
index 24c0b2b..50d1eba 100644 (file)
@@ -1933,14 +1933,6 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages)
 static void __unregister_enc_region_locked(struct kvm *kvm,
                                           struct enc_region *region)
 {
-       /*
-        * The guest may change the memory encryption attribute from C=0 -> C=1
-        * or vice versa for this memory range. Lets make sure caches are
-        * flushed to ensure that guest data gets written into memory with
-        * correct C-bit.
-        */
-       sev_clflush_pages(region->pages, region->npages);
-
        sev_unpin_memory(kvm, region->pages, region->npages);
        list_del(&region->list);
        kfree(region);
@@ -1971,6 +1963,13 @@ static void sev_vm_destroy(struct kvm *kvm)
        mutex_lock(&kvm->lock);
 
        /*
+        * Ensure that all guest tagged cache entries are flushed before
+        * releasing the pages back to the system for use. CLFLUSH will
+        * not do this, so issue a WBINVD.
+        */
+       wbinvd_on_all_cpus();
+
+       /*
         * if userspace was terminated before unregistering the memory regions
         * then lets unpin all the registered memory.
         */
@@ -6312,7 +6311,8 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu,
        enum exit_fastpath_completion *exit_fastpath)
 {
        if (!is_guest_mode(vcpu) &&
-               to_svm(vcpu)->vmcb->control.exit_code == EXIT_REASON_MSR_WRITE)
+           to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
+           to_svm(vcpu)->vmcb->control.exit_info_1)
                *exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
 }
 
@@ -7157,6 +7157,9 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
        if (!svm_sev_enabled())
                return -ENOTTY;
 
+       if (!argp)
+               return 0;
+
        if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
                return -EFAULT;
 
@@ -7284,6 +7287,13 @@ static int svm_unregister_enc_region(struct kvm *kvm,
                goto failed;
        }
 
+       /*
+        * Ensure that all guest tagged cache entries are flushed before
+        * releasing the pages back to the system for use. CLFLUSH will
+        * not do this, so issue a WBINVD.
+        */
+       wbinvd_on_all_cpus();
+
        __unregister_enc_region_locked(kvm, region);
 
        mutex_unlock(&kvm->lock);
index e920d78..9750e59 100644 (file)
@@ -224,7 +224,7 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
                return;
 
        kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
-       vmx->nested.hv_evmcs_vmptr = -1ull;
+       vmx->nested.hv_evmcs_vmptr = 0;
        vmx->nested.hv_evmcs = NULL;
 }
 
@@ -1923,7 +1923,8 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
        if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa))
                return 1;
 
-       if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
+       if (unlikely(!vmx->nested.hv_evmcs ||
+                    evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
                if (!vmx->nested.hv_evmcs)
                        vmx->nested.current_vmptr = -1ull;
 
index 40b1e61..079d9fb 100644 (file)
@@ -2338,6 +2338,17 @@ static void hardware_disable(void)
        kvm_cpu_vmxoff();
 }
 
+/*
+ * There is no X86_FEATURE for SGX yet, but anyway we need to query CPUID
+ * directly instead of going through cpu_has(), to ensure KVM is trapping
+ * ENCLS whenever it's supported in hardware.  It does not matter whether
+ * the host OS supports or has enabled SGX.
+ */
+static bool cpu_has_sgx(void)
+{
+       return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0));
+}
+
 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
                                      u32 msr, u32 *result)
 {
@@ -2418,8 +2429,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
                        SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
                        SECONDARY_EXEC_PT_USE_GPA |
                        SECONDARY_EXEC_PT_CONCEAL_VMX |
-                       SECONDARY_EXEC_ENABLE_VMFUNC |
-                       SECONDARY_EXEC_ENCLS_EXITING;
+                       SECONDARY_EXEC_ENABLE_VMFUNC;
+               if (cpu_has_sgx())
+                       opt2 |= SECONDARY_EXEC_ENCLS_EXITING;
                if (adjust_vmx_controls(min2, opt2,
                                        MSR_IA32_VMX_PROCBASED_CTLS2,
                                        &_cpu_based_2nd_exec_control) < 0)
@@ -6275,7 +6287,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
 #endif
                ASM_CALL_CONSTRAINT
                :
-               THUNK_TARGET(entry),
+               [thunk_target]"r"(entry),
                [ss]"i"(__KERNEL_DS),
                [cs]"i"(__KERNEL_CS)
        );
index 5de2006..cf95c36 100644 (file)
@@ -1554,7 +1554,10 @@ EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
  */
 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data)
 {
-       if (lapic_in_kernel(vcpu) && apic_x2apic_mode(vcpu->arch.apic) &&
+       if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic))
+               return 1;
+
+       if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) &&
                ((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) &&
                ((data & APIC_MODE_MASK) == APIC_DM_FIXED)) {
 
@@ -2444,7 +2447,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
        vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
        vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
        vcpu->last_guest_tsc = tsc_timestamp;
-       WARN_ON((s64)vcpu->hv_clock.system_time < 0);
 
        /* If the host uses TSC clocksource, then it is stable */
        pvclock_flags = 0;
@@ -7195,10 +7197,12 @@ static void kvm_timer_init(void)
 
                cpu = get_cpu();
                policy = cpufreq_cpu_get(cpu);
-               if (policy && policy->cpuinfo.max_freq)
-                       max_tsc_khz = policy->cpuinfo.max_freq;
+               if (policy) {
+                       if (policy->cpuinfo.max_freq)
+                               max_tsc_khz = policy->cpuinfo.max_freq;
+                       cpufreq_cpu_put(policy);
+               }
                put_cpu();
-               cpufreq_cpu_put(policy);
 #endif
                cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
                                          CPUFREQ_TRANSITION_NOTIFIER);
index fa4ea09..629fdf1 100644 (file)
@@ -190,7 +190,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
        return pmd_k;
 }
 
-void vmalloc_sync_all(void)
+static void vmalloc_sync(void)
 {
        unsigned long address;
 
@@ -217,6 +217,16 @@ void vmalloc_sync_all(void)
        }
 }
 
+void vmalloc_sync_mappings(void)
+{
+       vmalloc_sync();
+}
+
+void vmalloc_sync_unmappings(void)
+{
+       vmalloc_sync();
+}
+
 /*
  * 32-bit:
  *
@@ -319,11 +329,23 @@ out:
 
 #else /* CONFIG_X86_64: */
 
-void vmalloc_sync_all(void)
+void vmalloc_sync_mappings(void)
 {
+       /*
+        * 64-bit mappings might allocate new p4d/pud pages
+        * that need to be propagated to all tasks' PGDs.
+        */
        sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
 }
 
+void vmalloc_sync_unmappings(void)
+{
+       /*
+        * Unmappings never allocate or free p4d/pud pages.
+        * No work is required here.
+        */
+}
+
 /*
  * 64-bit:
  *
index 44e4beb..18c637c 100644 (file)
@@ -106,6 +106,22 @@ static unsigned int __ioremap_check_encrypted(struct resource *res)
        return 0;
 }
 
+/*
+ * The EFI runtime services data area is not covered by walk_mem_res(), but must
+ * be mapped encrypted when SEV is active.
+ */
+static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
+{
+       if (!sev_active())
+               return;
+
+       if (!IS_ENABLED(CONFIG_EFI))
+               return;
+
+       if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
+               desc->flags |= IORES_MAP_ENCRYPTED;
+}
+
 static int __ioremap_collect_map_flags(struct resource *res, void *arg)
 {
        struct ioremap_desc *desc = arg;
@@ -124,6 +140,9 @@ static int __ioremap_collect_map_flags(struct resource *res, void *arg)
  * To avoid multiple resource walks, this function walks resources marked as
  * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
  * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
+ *
+ * After that, deal with misc other ranges in __ioremap_check_other() which do
+ * not fall into the above category.
  */
 static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
                                struct ioremap_desc *desc)
@@ -135,6 +154,8 @@ static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
        memset(desc, 0, sizeof(struct ioremap_desc));
 
        walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
+
+       __ioremap_check_other(addr, desc);
 }
 
 /*
index 393d251..4d2a7a7 100644 (file)
@@ -2039,10 +2039,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                        }
                        /* and dreg_lo,sreg_lo */
                        EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
-                       /* and dreg_hi,sreg_hi */
-                       EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
-                       /* or dreg_lo,dreg_hi */
-                       EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
+                       if (is_jmp64) {
+                               /* and dreg_hi,sreg_hi */
+                               EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
+                               /* or dreg_lo,dreg_hi */
+                               EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
+                       }
                        goto emit_cond_jmp;
                }
                case BPF_JMP | BPF_JSET | BPF_K:
index 6a7788f..db35ee6 100644 (file)
@@ -1315,7 +1315,7 @@ static bool iocg_is_idle(struct ioc_gq *iocg)
                return false;
 
        /* is something in flight? */
-       if (atomic64_read(&iocg->done_vtime) < atomic64_read(&iocg->vtime))
+       if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
                return false;
 
        return true;
index 856356b..74cedea 100644 (file)
@@ -398,6 +398,28 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
        WARN_ON(e && (rq->tag != -1));
 
        if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
+               /*
+                * Firstly normal IO request is inserted to scheduler queue or
+                * sw queue, meantime we add flush request to dispatch queue(
+                * hctx->dispatch) directly and there is at most one in-flight
+                * flush request for each hw queue, so it doesn't matter to add
+                * flush request to tail or front of the dispatch queue.
+                *
+                * Secondly in case of NCQ, flush request belongs to non-NCQ
+                * command, and queueing it will fail when there is any
+                * in-flight normal IO request(NCQ command). When adding flush
+                * rq to the front of hctx->dispatch, it is easier to introduce
+                * extra time to flush rq's latency because of S_SCHED_RESTART
+                * compared with adding to the tail of dispatch queue, then
+                * chance of flush merge is increased, and less flush requests
+                * will be issued to controller. It is observed that ~10% time
+                * is saved in blktests block/004 on disk attached to AHCI/NCQ
+                * drive when adding flush rq to the front of hctx->dispatch.
+                *
+                * Simply queue flush rq to the front of hctx->dispatch so that
+                * intensive flush workloads can benefit in case of NCQ HW.
+                */
+               at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
                blk_mq_request_bypass_insert(rq, at_head, false);
                goto run;
        }
index 14cf395..06b642b 100644 (file)
@@ -373,6 +373,42 @@ struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
        return &disk->part0;
 }
 
+/**
+ * disk_has_partitions
+ * @disk: gendisk of interest
+ *
+ * Walk through the partition table and check if valid partition exists.
+ *
+ * CONTEXT:
+ * Don't care.
+ *
+ * RETURNS:
+ * True if the gendisk has at least one valid non-zero size partition.
+ * Otherwise false.
+ */
+bool disk_has_partitions(struct gendisk *disk)
+{
+       struct disk_part_tbl *ptbl;
+       int i;
+       bool ret = false;
+
+       rcu_read_lock();
+       ptbl = rcu_dereference(disk->part_tbl);
+
+       /* Iterate partitions skipping the whole device at index 0 */
+       for (i = 1; i < ptbl->len; i++) {
+               if (rcu_dereference(ptbl->part[i])) {
+                       ret = true;
+                       break;
+               }
+       }
+
+       rcu_read_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(disk_has_partitions);
+
 /*
  * Can be deleted altogether. Later.
  *
index 103acbb..24c9642 100644 (file)
@@ -171,7 +171,7 @@ int ghes_estatus_pool_init(int num_ghes)
         * New allocation must be visible in all pgd before it can be found by
         * an NMI allocating from the pool.
         */
-       vmalloc_sync_all();
+       vmalloc_sync_mappings();
 
        rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
        if (rc)
index 110e41f..f303106 100644 (file)
@@ -448,6 +448,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
        inode->i_uid = info->root_uid;
        inode->i_gid = info->root_gid;
 
+       refcount_set(&device->ref, 1);
        device->binderfs_inode = inode;
        device->miscdev.minor = minor;
 
index a6beb2c..05ecdce 100644 (file)
@@ -34,6 +34,12 @@ if ATA
 config ATA_NONSTANDARD
        bool
 
+config SATA_HOST
+       bool
+
+config PATA_TIMINGS
+       bool
+
 config ATA_VERBOSE_ERROR
        bool "Verbose ATA error reporting"
        default y
@@ -45,9 +51,26 @@ config ATA_VERBOSE_ERROR
 
          If unsure, say Y.
 
+config ATA_FORCE
+       bool "\"libata.force=\" kernel parameter support" if EXPERT
+       default y
+       help
+         This option adds support for "libata.force=" kernel parameter for
+         forcing configuration settings.
+
+         For further information, please read
+         <file:Documentation/admin-guide/kernel-parameters.txt>.
+
+         This option will enlarge the kernel by approx. 3KB. Disable it if
+         kernel size is more important than ability to override the default
+         configuration settings.
+
+         If unsure, say Y.
+
 config ATA_ACPI
        bool "ATA ACPI Support"
        depends on ACPI
+       select PATA_TIMINGS
        default y
        help
          This option adds support for ATA-related ACPI objects.
@@ -73,6 +96,7 @@ config SATA_ZPODD
 
 config SATA_PMP
        bool "SATA Port Multiplier support"
+       depends on SATA_HOST
        default y
        help
          This option adds support for SATA Port Multipliers
@@ -85,6 +109,7 @@ comment "Controllers with non-SFF native interface"
 config SATA_AHCI
        tristate "AHCI SATA support"
        depends on PCI
+       select SATA_HOST
        help
          This option enables support for AHCI Serial ATA.
 
@@ -111,6 +136,7 @@ config SATA_MOBILE_LPM_POLICY
 
 config SATA_AHCI_PLATFORM
        tristate "Platform AHCI SATA support"
+       select SATA_HOST
        help
          This option enables support for Platform AHCI Serial ATA
          controllers.
@@ -121,6 +147,7 @@ config AHCI_BRCM
        tristate "Broadcom AHCI SATA support"
        depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \
                   ARCH_BCM_63XX
+       select SATA_HOST
        help
          This option enables support for the AHCI SATA3 controller found on
          Broadcom SoC's.
@@ -130,6 +157,7 @@ config AHCI_BRCM
 config AHCI_DA850
        tristate "DaVinci DA850 AHCI SATA support"
        depends on ARCH_DAVINCI_DA850
+       select SATA_HOST
        help
          This option enables support for the DaVinci DA850 SoC's
          onboard AHCI SATA.
@@ -139,6 +167,7 @@ config AHCI_DA850
 config AHCI_DM816
        tristate "DaVinci DM816 AHCI SATA support"
        depends on ARCH_OMAP2PLUS
+       select SATA_HOST
        help
          This option enables support for the DaVinci DM816 SoC's
          onboard AHCI SATA controller.
@@ -148,6 +177,7 @@ config AHCI_DM816
 config AHCI_ST
        tristate "ST AHCI SATA support"
        depends on ARCH_STI
+       select SATA_HOST
        help
          This option enables support for ST AHCI SATA controller.
 
@@ -157,6 +187,7 @@ config AHCI_IMX
        tristate "Freescale i.MX AHCI SATA support"
        depends on MFD_SYSCON && (ARCH_MXC || COMPILE_TEST)
        depends on (HWMON && (THERMAL || !THERMAL_OF)) || !HWMON
+       select SATA_HOST
        help
          This option enables support for the Freescale i.MX SoC's
          onboard AHCI SATA.
@@ -166,6 +197,7 @@ config AHCI_IMX
 config AHCI_CEVA
        tristate "CEVA AHCI SATA support"
        depends on OF
+       select SATA_HOST
        help
          This option enables support for the CEVA AHCI SATA.
          It can be found on the Xilinx Zynq UltraScale+ MPSoC.
@@ -176,6 +208,7 @@ config AHCI_MTK
        tristate "MediaTek AHCI SATA support"
        depends on ARCH_MEDIATEK
        select MFD_SYSCON
+       select SATA_HOST
        help
          This option enables support for the MediaTek SoC's
          onboard AHCI SATA controller.
@@ -185,6 +218,7 @@ config AHCI_MTK
 config AHCI_MVEBU
        tristate "Marvell EBU AHCI SATA support"
        depends on ARCH_MVEBU
+       select SATA_HOST
        help
          This option enables support for the Marvebu EBU SoC's
          onboard AHCI SATA.
@@ -203,6 +237,7 @@ config AHCI_OCTEON
 config AHCI_SUNXI
        tristate "Allwinner sunxi AHCI SATA support"
        depends on ARCH_SUNXI
+       select SATA_HOST
        help
          This option enables support for the Allwinner sunxi SoC's
          onboard AHCI SATA.
@@ -212,6 +247,7 @@ config AHCI_SUNXI
 config AHCI_TEGRA
        tristate "NVIDIA Tegra AHCI SATA support"
        depends on ARCH_TEGRA
+       select SATA_HOST
        help
          This option enables support for the NVIDIA Tegra SoC's
          onboard AHCI SATA.
@@ -221,12 +257,14 @@ config AHCI_TEGRA
 config AHCI_XGENE
        tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support"
        depends on PHY_XGENE
+       select SATA_HOST
        help
         This option enables support for APM X-Gene SoC SATA host controller.
 
 config AHCI_QORIQ
        tristate "Freescale QorIQ AHCI SATA support"
        depends on OF
+       select SATA_HOST
        help
          This option enables support for the Freescale QorIQ AHCI SoC's
          onboard AHCI SATA.
@@ -236,6 +274,7 @@ config AHCI_QORIQ
 config SATA_FSL
        tristate "Freescale 3.0Gbps SATA support"
        depends on FSL_SOC
+       select SATA_HOST
        help
          This option enables support for Freescale 3.0Gbps SATA controller.
          It can be found on MPC837x and MPC8315.
@@ -245,6 +284,7 @@ config SATA_FSL
 config SATA_GEMINI
        tristate "Gemini SATA bridge support"
        depends on ARCH_GEMINI || COMPILE_TEST
+       select SATA_HOST
        default ARCH_GEMINI
        help
          This enabled support for the FTIDE010 to SATA bridge
@@ -255,6 +295,7 @@ config SATA_GEMINI
 config SATA_AHCI_SEATTLE
        tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support"
        depends on ARCH_SEATTLE
+       select SATA_HOST
        help
         This option enables support for AMD Seattle SATA host controller.
 
@@ -263,12 +304,14 @@ config SATA_AHCI_SEATTLE
 config SATA_INIC162X
        tristate "Initio 162x SATA support (Very Experimental)"
        depends on PCI
+       select SATA_HOST
        help
          This option enables support for Initio 162x Serial ATA.
 
 config SATA_ACARD_AHCI
        tristate "ACard AHCI variant (ATP 8620)"
        depends on PCI
+       select SATA_HOST
        help
          This option enables support for Acard.
 
@@ -277,6 +320,7 @@ config SATA_ACARD_AHCI
 config SATA_SIL24
        tristate "Silicon Image 3124/3132 SATA support"
        depends on PCI
+       select SATA_HOST
        help
          This option enables support for Silicon Image 3124/3132 Serial ATA.
 
@@ -317,6 +361,7 @@ config PDC_ADMA
 config PATA_OCTEON_CF
        tristate "OCTEON Boot Bus Compact Flash support"
        depends on CAVIUM_OCTEON_SOC
+       select PATA_TIMINGS
        help
          This option enables a polled compact flash driver for use with
          compact flash cards attached to the OCTEON boot bus.
@@ -326,6 +371,7 @@ config PATA_OCTEON_CF
 config SATA_QSTOR
        tristate "Pacific Digital SATA QStor support"
        depends on PCI
+       select SATA_HOST
        help
          This option enables support for Pacific Digital Serial ATA QStor.
 
@@ -334,6 +380,7 @@ config SATA_QSTOR
 config SATA_SX4
        tristate "Promise SATA SX4 support (Experimental)"
        depends on PCI
+       select SATA_HOST
        help
          This option enables support for Promise Serial ATA SX4.
 
@@ -357,6 +404,7 @@ comment "SATA SFF controllers with BMDMA"
 config ATA_PIIX
        tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support"
        depends on PCI
+       select SATA_HOST
        help
          This option enables support for ICH5/6/7/8 Serial ATA
          and support for PATA on the Intel ESB/ICH/PIIX3/PIIX4 series
@@ -368,6 +416,7 @@ config SATA_DWC
        tristate "DesignWare Cores SATA support"
        depends on DMADEVICES
        select GENERIC_PHY
+       select SATA_HOST
        help
          This option enables support for the on-chip SATA controller of the
          AppliedMicro processor 460EX.
@@ -398,6 +447,7 @@ config SATA_DWC_VDEBUG
 config SATA_HIGHBANK
        tristate "Calxeda Highbank SATA support"
        depends on ARCH_HIGHBANK || COMPILE_TEST
+       select SATA_HOST
        help
          This option enables support for the Calxeda Highbank SoC's
          onboard SATA.
@@ -409,6 +459,7 @@ config SATA_MV
        depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \
                   ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
        select GENERIC_PHY
+       select SATA_HOST
        help
          This option enables support for the Marvell Serial ATA family.
          Currently supports 88SX[56]0[48][01] PCI(-X) chips,
@@ -419,6 +470,7 @@ config SATA_MV
 config SATA_NV
        tristate "NVIDIA SATA support"
        depends on PCI
+       select SATA_HOST
        help
          This option enables support for NVIDIA Serial ATA.
 
@@ -427,6 +479,7 @@ config SATA_NV
 config SATA_PROMISE
        tristate "Promise SATA TX2/TX4 support"
        depends on PCI
+       select SATA_HOST
        help
          This option enables support for Promise Serial ATA TX2/TX4.
 
@@ -435,6 +488,7 @@ config SATA_PROMISE
 config SATA_RCAR
        tristate "Renesas R-Car SATA support"
        depends on ARCH_RENESAS || COMPILE_TEST
+       select SATA_HOST
        help
          This option enables support for Renesas R-Car Serial ATA.
 
@@ -443,6 +497,7 @@ config SATA_RCAR
 config SATA_SIL
        tristate "Silicon Image SATA support"
        depends on PCI
+       select SATA_HOST
        help
          This option enables support for Silicon Image Serial ATA.
 
@@ -452,6 +507,7 @@ config SATA_SIS
        tristate "SiS 964/965/966/180 SATA support"
        depends on PCI
        select PATA_SIS
+       select SATA_HOST
        help
          This option enables support for SiS Serial ATA on
          SiS 964/965/966/180 and Parallel ATA on SiS 180.
@@ -462,6 +518,7 @@ config SATA_SIS
 config SATA_SVW
        tristate "ServerWorks Frodo / Apple K2 SATA support"
        depends on PCI
+       select SATA_HOST
        help
          This option enables support for Broadcom/Serverworks/Apple K2
          SATA support.
@@ -471,6 +528,7 @@ config SATA_SVW
 config SATA_ULI
        tristate "ULi Electronics SATA support"
        depends on PCI
+       select SATA_HOST
        help
          This option enables support for ULi Electronics SATA.
 
@@ -479,6 +537,7 @@ config SATA_ULI
 config SATA_VIA
        tristate "VIA SATA support"
        depends on PCI
+       select SATA_HOST
        help
          This option enables support for VIA Serial ATA.
 
@@ -487,6 +546,7 @@ config SATA_VIA
 config SATA_VITESSE
        tristate "VITESSE VSC-7174 / INTEL 31244 SATA support"
        depends on PCI
+       select SATA_HOST
        help
          This option enables support for Vitesse VSC7174 and Intel 31244 Serial ATA.
 
@@ -497,6 +557,7 @@ comment "PATA SFF controllers with BMDMA"
 config PATA_ALI
        tristate "ALi PATA support"
        depends on PCI
+       select PATA_TIMINGS
        help
          This option enables support for the ALi ATA interfaces
          found on the many ALi chipsets.
@@ -506,6 +567,7 @@ config PATA_ALI
 config PATA_AMD
        tristate "AMD/NVidia PATA support"
        depends on PCI
+       select PATA_TIMINGS
        help
          This option enables support for the AMD and NVidia PATA
          interfaces found on the chipsets for Athlon/Athlon64.
@@ -540,6 +602,7 @@ config PATA_ATIIXP
 config PATA_ATP867X
        tristate "ARTOP/Acard ATP867X PATA support"
        depends on PCI
+       select PATA_TIMINGS
        help
          This option enables support for ARTOP/Acard ATP867X PATA
          controllers.
@@ -549,6 +612,7 @@ config PATA_ATP867X
 config PATA_BK3710
        tristate "Palmchip BK3710 PATA support"
        depends on ARCH_DAVINCI
+       select PATA_TIMINGS
        help
          This option enables support for the integrated IDE controller on
          the TI DaVinci SoC.
@@ -558,6 +622,7 @@ config PATA_BK3710
 config PATA_CMD64X
        tristate "CMD64x PATA support"
        depends on PCI
+       select PATA_TIMINGS
        help
          This option enables support for the CMD64x series chips
          except for the CMD640.
@@ -603,6 +668,7 @@ config PATA_CS5536
 config PATA_CYPRESS
        tristate "Cypress CY82C693 PATA support (Very Experimental)"
        depends on PCI
+       select PATA_TIMINGS
        help
          This option enables support for the Cypress/Contaq CY82C693
          chipset found in some Alpha systems
@@ -621,6 +687,7 @@ config PATA_EFAR
 config PATA_EP93XX
        tristate "Cirrus Logic EP93xx PATA support"
        depends on ARCH_EP93XX
+       select PATA_TIMINGS
        help
          This option enables support for the PATA controller in
          the Cirrus Logic EP9312 and EP9315 ARM CPU.
@@ -685,6 +752,7 @@ config PATA_HPT3X3_DMA
 config PATA_ICSIDE
        tristate "Acorn ICS PATA support"
        depends on ARM && ARCH_ACORN
+       select PATA_TIMINGS
        help
          On Acorn systems, say Y here if you wish to use the ICS PATA
          interface card.  This is not required for ICS partition support.
@@ -693,6 +761,7 @@ config PATA_ICSIDE
 config PATA_IMX
        tristate "PATA support for Freescale iMX"
        depends on ARCH_MXC
+       select PATA_TIMINGS
        help
          This option enables support for the PATA host available on Freescale
           iMX SoCs.
@@ -778,6 +847,7 @@ config PATA_NINJA32
 config PATA_NS87415
        tristate "Nat Semi NS87415 PATA support"
        depends on PCI
+       select PATA_TIMINGS
        help
          This option enables support for the National Semiconductor
          NS87415 PCI-IDE controller.
@@ -902,6 +972,7 @@ config PATA_TRIFLEX
 config PATA_VIA
        tristate "VIA PATA support"
        depends on PCI
+       select PATA_TIMINGS
        help
          This option enables support for the VIA PATA interfaces
          found on the many VIA chipsets.
@@ -935,6 +1006,7 @@ comment "PIO-only SFF controllers"
 config PATA_CMD640_PCI
        tristate "CMD640 PCI PATA support (Experimental)"
        depends on PCI
+       select PATA_TIMINGS
        help
          This option enables support for the CMD640 PCI IDE
          interface chip. Only the primary channel is currently
@@ -1005,6 +1077,7 @@ config PATA_MPIIX
 config PATA_NS87410
        tristate "Nat Semi NS87410 PATA support"
        depends on PCI
+       select PATA_TIMINGS
        help
          This option enables support for the National Semiconductor
          NS87410 PCI-IDE controller.
@@ -1085,6 +1158,7 @@ config PATA_RZ1000
 config PATA_SAMSUNG_CF
        tristate "Samsung SoC PATA support"
        depends on SAMSUNG_DEV_IDE
+       select PATA_TIMINGS
        help
          This option enables basic support for Samsung's S3C/S5P board
          PATA controllers via the new ATA layer
@@ -1104,6 +1178,7 @@ comment "Generic fallback / legacy drivers"
 config PATA_ACPI
        tristate "ACPI firmware driver for PATA"
        depends on ATA_ACPI && ATA_BMDMA && PCI
+       select PATA_TIMINGS
        help
          This option enables an ACPI method driver which drives
          motherboard PATA controller interfaces through the ACPI
@@ -1113,6 +1188,7 @@ config PATA_ACPI
 config ATA_GENERIC
        tristate "Generic ATA support"
        depends on PCI && ATA_BMDMA
+       select SATA_HOST
        help
          This option enables support for generic BIOS configured
          ATA controllers via the new ATA layer
@@ -1122,6 +1198,7 @@ config ATA_GENERIC
 config PATA_LEGACY
        tristate "Legacy ISA PATA support (Experimental)"
        depends on (ISA || PCI)
+       select PATA_TIMINGS
        help
          This option enables support for ISA/VLB/PCI bus legacy PATA
          ports and allows them to be accessed via the new ATA layer.
index d8cc2e0..b8aebfb 100644 (file)
@@ -123,7 +123,9 @@ obj-$(CONFIG_PATA_LEGACY)   += pata_legacy.o
 
 libata-y       := libata-core.o libata-scsi.o libata-eh.o \
        libata-transport.o libata-trace.o
+libata-$(CONFIG_SATA_HOST)     += libata-sata.o
 libata-$(CONFIG_ATA_SFF)       += libata-sff.o
 libata-$(CONFIG_SATA_PMP)      += libata-pmp.o
 libata-$(CONFIG_ATA_ACPI)      += libata-acpi.o
 libata-$(CONFIG_SATA_ZPODD)    += libata-zpodd.o
+libata-$(CONFIG_PATA_TIMINGS)  += libata-pata-timings.o
index 11ea1af..ad0185c 100644 (file)
@@ -40,6 +40,7 @@
 enum {
        AHCI_PCI_BAR_STA2X11    = 0,
        AHCI_PCI_BAR_CAVIUM     = 0,
+       AHCI_PCI_BAR_LOONGSON   = 0,
        AHCI_PCI_BAR_ENMOTUS    = 2,
        AHCI_PCI_BAR_CAVIUM_GEN5        = 4,
        AHCI_PCI_BAR_STANDARD   = 5,
@@ -245,6 +246,7 @@ static const struct ata_port_info ahci_port_info[] = {
 
 static const struct pci_device_id ahci_pci_tbl[] = {
        /* Intel */
+       { PCI_VDEVICE(INTEL, 0x06d6), board_ahci }, /* Comet Lake PCH-H RAID */
        { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
        { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
        { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
@@ -401,6 +403,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
        { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
        { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
+       { PCI_VDEVICE(INTEL, 0x06d7), board_ahci }, /* Comet Lake-H RAID */
+       { PCI_VDEVICE(INTEL, 0xa386), board_ahci }, /* Comet Lake PCH-V RAID */
        { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_mobile }, /* Bay Trail AHCI */
        { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
        { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
@@ -589,6 +593,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        /* Enmotus */
        { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
 
+       /* Loongson */
+       { PCI_VDEVICE(LOONGSON, 0x7a08), board_ahci },
+
        /* Generic, PCI class code for AHCI */
        { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
          PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
@@ -1680,6 +1687,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                        ahci_pci_bar = AHCI_PCI_BAR_CAVIUM;
                if (pdev->device == 0xa084)
                        ahci_pci_bar = AHCI_PCI_BAR_CAVIUM_GEN5;
+       } else if (pdev->vendor == PCI_VENDOR_ID_LOONGSON) {
+               if (pdev->device == 0x7a08)
+                       ahci_pci_bar = AHCI_PCI_BAR_LOONGSON;
        }
 
        /* acquire resources */
index 42c8728..beca5f9 100644 (file)
@@ -2,10 +2,6 @@
 /*
  *  libata-core.c - helper library for ATA
  *
- *  Maintained by:  Tejun Heo <tj@kernel.org>
- *                 Please ALWAYS copy linux-ide@vger.kernel.org
- *                 on emails.
- *
  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
  *  Copyright 2003-2004 Jeff Garzik
  *
  *     http://www.compactflash.org (CF)
  *     http://www.qic.org (QIC157 - Tape and DSC)
  *     http://www.ce-ata.org (CE-ATA: not supported)
+ *
+ * libata is essentially a library of internal helper functions for
+ * low-level ATA host controller drivers.  As such, the API/ABI is
+ * likely to change as new drivers are added and updated.
+ * Do not depend on ABI/API stability.
  */
 
 #include <linux/kernel.h>
@@ -56,6 +57,7 @@
 #include <linux/leds.h>
 #include <linux/pm_runtime.h>
 #include <linux/platform_device.h>
+#include <asm/setup.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/libata.h>
 #include "libata.h"
 #include "libata-transport.h"
 
-/* debounce timing parameters in msecs { interval, duration, timeout } */
-const unsigned long sata_deb_timing_normal[]           = {   5,  100, 2000 };
-const unsigned long sata_deb_timing_hotplug[]          = {  25,  500, 2000 };
-const unsigned long sata_deb_timing_long[]             = { 100, 2000, 5000 };
-
 const struct ata_port_operations ata_base_port_ops = {
        .prereset               = ata_std_prereset,
        .postreset              = ata_std_postreset,
@@ -82,6 +79,7 @@ const struct ata_port_operations sata_port_ops = {
        .qc_defer               = ata_std_qc_defer,
        .hardreset              = sata_std_hardreset,
 };
+EXPORT_SYMBOL_GPL(sata_port_ops);
 
 static unsigned int ata_dev_init_params(struct ata_device *dev,
                                        u16 heads, u16 sectors);
@@ -91,14 +89,15 @@ static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
 
 atomic_t ata_print_id = ATOMIC_INIT(0);
 
+#ifdef CONFIG_ATA_FORCE
 struct ata_force_param {
        const char      *name;
-       unsigned int    cbl;
-       int             spd_limit;
+       u8              cbl;
+       u8              spd_limit;
        unsigned long   xfer_mask;
        unsigned int    horkage_on;
        unsigned int    horkage_off;
-       unsigned int    lflags;
+       u16             lflags;
 };
 
 struct ata_force_ent {
@@ -110,10 +109,11 @@ struct ata_force_ent {
 static struct ata_force_ent *ata_force_tbl;
 static int ata_force_tbl_size;
 
-static char ata_force_param_buf[PAGE_SIZE] __initdata;
+static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
 /* param_buf is thrown away after initialization, disallow read */
 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
+#endif
 
 static int atapi_enabled = 1;
 module_param(atapi_enabled, int, 0444);
@@ -224,6 +224,7 @@ struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
 
        return NULL;
 }
+EXPORT_SYMBOL_GPL(ata_link_next);
 
 /**
  *     ata_dev_next - device iteration helper
@@ -277,6 +278,7 @@ struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
                goto next;
        return dev;
 }
+EXPORT_SYMBOL_GPL(ata_dev_next);
 
 /**
  *     ata_dev_phys_link - find physical link for a device
@@ -303,6 +305,7 @@ struct ata_link *ata_dev_phys_link(struct ata_device *dev)
        return ap->slave_link;
 }
 
+#ifdef CONFIG_ATA_FORCE
 /**
  *     ata_force_cbl - force cable type according to libata.force
  *     @ap: ATA port of interest
@@ -483,6 +486,11 @@ static void ata_force_horkage(struct ata_device *dev)
                               fe->param.name);
        }
 }
+#else
+static inline void ata_force_link_limits(struct ata_link *link) { }
+static inline void ata_force_xfermask(struct ata_device *dev) { }
+static inline void ata_force_horkage(struct ata_device *dev) { }
+#endif
 
 /**
  *     atapi_cmd_type - Determine ATAPI command type from SCSI opcode
@@ -521,79 +529,7 @@ int atapi_cmd_type(u8 opcode)
                return ATAPI_MISC;
        }
 }
-
-/**
- *     ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
- *     @tf: Taskfile to convert
- *     @pmp: Port multiplier port
- *     @is_cmd: This FIS is for command
- *     @fis: Buffer into which data will output
- *
- *     Converts a standard ATA taskfile to a Serial ATA
- *     FIS structure (Register - Host to Device).
- *
- *     LOCKING:
- *     Inherited from caller.
- */
-void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
-{
-       fis[0] = 0x27;                  /* Register - Host to Device FIS */
-       fis[1] = pmp & 0xf;             /* Port multiplier number*/
-       if (is_cmd)
-               fis[1] |= (1 << 7);     /* bit 7 indicates Command FIS */
-
-       fis[2] = tf->command;
-       fis[3] = tf->feature;
-
-       fis[4] = tf->lbal;
-       fis[5] = tf->lbam;
-       fis[6] = tf->lbah;
-       fis[7] = tf->device;
-
-       fis[8] = tf->hob_lbal;
-       fis[9] = tf->hob_lbam;
-       fis[10] = tf->hob_lbah;
-       fis[11] = tf->hob_feature;
-
-       fis[12] = tf->nsect;
-       fis[13] = tf->hob_nsect;
-       fis[14] = 0;
-       fis[15] = tf->ctl;
-
-       fis[16] = tf->auxiliary & 0xff;
-       fis[17] = (tf->auxiliary >> 8) & 0xff;
-       fis[18] = (tf->auxiliary >> 16) & 0xff;
-       fis[19] = (tf->auxiliary >> 24) & 0xff;
-}
-
-/**
- *     ata_tf_from_fis - Convert SATA FIS to ATA taskfile
- *     @fis: Buffer from which data will be input
- *     @tf: Taskfile to output
- *
- *     Converts a serial ATA FIS structure to a standard ATA taskfile.
- *
- *     LOCKING:
- *     Inherited from caller.
- */
-
-void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
-{
-       tf->command     = fis[2];       /* status */
-       tf->feature     = fis[3];       /* error */
-
-       tf->lbal        = fis[4];
-       tf->lbam        = fis[5];
-       tf->lbah        = fis[6];
-       tf->device      = fis[7];
-
-       tf->hob_lbal    = fis[8];
-       tf->hob_lbam    = fis[9];
-       tf->hob_lbah    = fis[10];
-
-       tf->nsect       = fis[12];
-       tf->hob_nsect   = fis[13];
-}
+EXPORT_SYMBOL_GPL(atapi_cmd_type);
 
 static const u8 ata_rw_cmds[] = {
        /* pio multi */
@@ -868,6 +804,7 @@ unsigned long ata_pack_xfermask(unsigned long pio_mask,
                ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
                ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
 }
+EXPORT_SYMBOL_GPL(ata_pack_xfermask);
 
 /**
  *     ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
@@ -923,6 +860,7 @@ u8 ata_xfer_mask2mode(unsigned long xfer_mask)
                        return ent->base + highbit - ent->shift;
        return 0xff;
 }
+EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
 
 /**
  *     ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
@@ -946,6 +884,7 @@ unsigned long ata_xfer_mode2mask(u8 xfer_mode)
                                & ~((1 << ent->shift) - 1);
        return 0;
 }
+EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
 
 /**
  *     ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
@@ -968,6 +907,7 @@ int ata_xfer_mode2shift(unsigned long xfer_mode)
                        return ent->shift;
        return -1;
 }
+EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
 
 /**
  *     ata_mode_string - convert xfer_mask to string
@@ -1014,6 +954,7 @@ const char *ata_mode_string(unsigned long xfer_mask)
                return xfer_mode_str[highbit];
        return "<n/a>";
 }
+EXPORT_SYMBOL_GPL(ata_mode_string);
 
 const char *sata_spd_string(unsigned int spd)
 {
@@ -1094,6 +1035,7 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
        DPRINTK("unknown device\n");
        return ATA_DEV_UNKNOWN;
 }
+EXPORT_SYMBOL_GPL(ata_dev_classify);
 
 /**
  *     ata_id_string - Convert IDENTIFY DEVICE page into string
@@ -1130,6 +1072,7 @@ void ata_id_string(const u16 *id, unsigned char *s,
                len -= 2;
        }
 }
+EXPORT_SYMBOL_GPL(ata_id_string);
 
 /**
  *     ata_id_c_string - Convert IDENTIFY DEVICE page into C string
@@ -1157,6 +1100,7 @@ void ata_id_c_string(const u16 *id, unsigned char *s,
                p--;
        *p = '\0';
 }
+EXPORT_SYMBOL_GPL(ata_id_c_string);
 
 static u64 ata_id_n_sectors(const u16 *id)
 {
@@ -1514,6 +1458,7 @@ unsigned long ata_id_xfermask(const u16 *id)
 
        return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
 }
+EXPORT_SYMBOL_GPL(ata_id_xfermask);
 
 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
 {
@@ -1771,6 +1716,7 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev)
                return 1;
        return 0;
 }
+EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
 
 /**
  *     ata_pio_mask_no_iordy   -       Return the non IORDY mask
@@ -1811,6 +1757,7 @@ unsigned int ata_do_dev_read_id(struct ata_device *dev,
        return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
                                     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
 }
+EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
 
 /**
  *     ata_dev_read_id - Read ID data from the specified device
@@ -2265,6 +2212,8 @@ static int ata_dev_config_ncq(struct ata_device *dev,
                desc[0] = '\0';
                return 0;
        }
+       if (!IS_ENABLED(CONFIG_SATA_HOST))
+               return 0;
        if (dev->horkage & ATA_HORKAGE_NONCQ) {
                snprintf(desc, desc_sz, "NCQ (not used)");
                return 0;
@@ -2783,6 +2732,7 @@ int ata_cable_40wire(struct ata_port *ap)
 {
        return ATA_CBL_PATA40;
 }
+EXPORT_SYMBOL_GPL(ata_cable_40wire);
 
 /**
  *     ata_cable_80wire        -       return 80 wire cable type
@@ -2796,6 +2746,7 @@ int ata_cable_80wire(struct ata_port *ap)
 {
        return ATA_CBL_PATA80;
 }
+EXPORT_SYMBOL_GPL(ata_cable_80wire);
 
 /**
  *     ata_cable_unknown       -       return unknown PATA cable.
@@ -2808,6 +2759,7 @@ int ata_cable_unknown(struct ata_port *ap)
 {
        return ATA_CBL_PATA_UNK;
 }
+EXPORT_SYMBOL_GPL(ata_cable_unknown);
 
 /**
  *     ata_cable_ignore        -       return ignored PATA cable.
@@ -2820,6 +2772,7 @@ int ata_cable_ignore(struct ata_port *ap)
 {
        return ATA_CBL_PATA_IGN;
 }
+EXPORT_SYMBOL_GPL(ata_cable_ignore);
 
 /**
  *     ata_cable_sata  -       return SATA cable type
@@ -2832,6 +2785,7 @@ int ata_cable_sata(struct ata_port *ap)
 {
        return ATA_CBL_SATA;
 }
+EXPORT_SYMBOL_GPL(ata_cable_sata);
 
 /**
  *     ata_bus_probe - Reset and probe ATA bus
@@ -3014,6 +2968,7 @@ struct ata_device *ata_dev_pair(struct ata_device *adev)
                return NULL;
        return pair;
 }
+EXPORT_SYMBOL_GPL(ata_dev_pair);
 
 /**
  *     sata_down_spd_limit - adjust SATA spd limit downward
@@ -3095,252 +3050,7 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
        return 0;
 }
 
-static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
-{
-       struct ata_link *host_link = &link->ap->link;
-       u32 limit, target, spd;
-
-       limit = link->sata_spd_limit;
-
-       /* Don't configure downstream link faster than upstream link.
-        * It doesn't speed up anything and some PMPs choke on such
-        * configuration.
-        */
-       if (!ata_is_host_link(link) && host_link->sata_spd)
-               limit &= (1 << host_link->sata_spd) - 1;
-
-       if (limit == UINT_MAX)
-               target = 0;
-       else
-               target = fls(limit);
-
-       spd = (*scontrol >> 4) & 0xf;
-       *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
-
-       return spd != target;
-}
-
-/**
- *     sata_set_spd_needed - is SATA spd configuration needed
- *     @link: Link in question
- *
- *     Test whether the spd limit in SControl matches
- *     @link->sata_spd_limit.  This function is used to determine
- *     whether hardreset is necessary to apply SATA spd
- *     configuration.
- *
- *     LOCKING:
- *     Inherited from caller.
- *
- *     RETURNS:
- *     1 if SATA spd configuration is needed, 0 otherwise.
- */
-static int sata_set_spd_needed(struct ata_link *link)
-{
-       u32 scontrol;
-
-       if (sata_scr_read(link, SCR_CONTROL, &scontrol))
-               return 1;
-
-       return __sata_set_spd_needed(link, &scontrol);
-}
-
-/**
- *     sata_set_spd - set SATA spd according to spd limit
- *     @link: Link to set SATA spd for
- *
- *     Set SATA spd of @link according to sata_spd_limit.
- *
- *     LOCKING:
- *     Inherited from caller.
- *
- *     RETURNS:
- *     0 if spd doesn't need to be changed, 1 if spd has been
- *     changed.  Negative errno if SCR registers are inaccessible.
- */
-int sata_set_spd(struct ata_link *link)
-{
-       u32 scontrol;
-       int rc;
-
-       if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
-               return rc;
-
-       if (!__sata_set_spd_needed(link, &scontrol))
-               return 0;
-
-       if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
-               return rc;
-
-       return 1;
-}
-
-/*
- * This mode timing computation functionality is ported over from
- * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
- */
-/*
- * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
- * These were taken from ATA/ATAPI-6 standard, rev 0a, except
- * for UDMA6, which is currently supported only by Maxtor drives.
- *
- * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
- */
-
-static const struct ata_timing ata_timing[] = {
-/*     { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
-       { XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
-       { XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
-       { XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
-       { XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
-       { XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
-       { XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
-       { XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
-
-       { XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
-       { XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
-       { XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
-
-       { XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
-       { XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
-       { XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
-       { XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
-       { XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
-
-/*     { XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
-       { XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
-       { XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
-       { XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
-       { XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
-       { XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
-       { XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
-       { XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
-
-       { 0xFF }
-};
-
-#define ENOUGH(v, unit)                (((v)-1)/(unit)+1)
-#define EZ(v, unit)            ((v)?ENOUGH(((v) * 1000), unit):0)
-
-static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
-{
-       q->setup        = EZ(t->setup,       T);
-       q->act8b        = EZ(t->act8b,       T);
-       q->rec8b        = EZ(t->rec8b,       T);
-       q->cyc8b        = EZ(t->cyc8b,       T);
-       q->active       = EZ(t->active,      T);
-       q->recover      = EZ(t->recover,     T);
-       q->dmack_hold   = EZ(t->dmack_hold,  T);
-       q->cycle        = EZ(t->cycle,       T);
-       q->udma         = EZ(t->udma,       UT);
-}
-
-void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
-                     struct ata_timing *m, unsigned int what)
-{
-       if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
-       if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
-       if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
-       if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
-       if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
-       if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
-       if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
-       if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
-       if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
-}
-
-const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
-{
-       const struct ata_timing *t = ata_timing;
-
-       while (xfer_mode > t->mode)
-               t++;
-
-       if (xfer_mode == t->mode)
-               return t;
-
-       WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
-                       __func__, xfer_mode);
-
-       return NULL;
-}
-
-int ata_timing_compute(struct ata_device *adev, unsigned short speed,
-                      struct ata_timing *t, int T, int UT)
-{
-       const u16 *id = adev->id;
-       const struct ata_timing *s;
-       struct ata_timing p;
-
-       /*
-        * Find the mode.
-        */
-
-       if (!(s = ata_timing_find_mode(speed)))
-               return -EINVAL;
-
-       memcpy(t, s, sizeof(*s));
-
-       /*
-        * If the drive is an EIDE drive, it can tell us it needs extended
-        * PIO/MW_DMA cycle timing.
-        */
-
-       if (id[ATA_ID_FIELD_VALID] & 2) {       /* EIDE drive */
-               memset(&p, 0, sizeof(p));
-
-               if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
-                       if (speed <= XFER_PIO_2)
-                               p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
-                       else if ((speed <= XFER_PIO_4) ||
-                                (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
-                               p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
-               } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
-                       p.cycle = id[ATA_ID_EIDE_DMA_MIN];
-
-               ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
-       }
-
-       /*
-        * Convert the timing to bus clock counts.
-        */
-
-       ata_timing_quantize(t, t, T, UT);
-
-       /*
-        * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
-        * S.M.A.R.T * and some other commands. We have to ensure that the
-        * DMA cycle timing is slower/equal than the fastest PIO timing.
-        */
-
-       if (speed > XFER_PIO_6) {
-               ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
-               ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
-       }
-
-       /*
-        * Lengthen active & recovery time so that cycle time is correct.
-        */
-
-       if (t->act8b + t->rec8b < t->cyc8b) {
-               t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
-               t->rec8b = t->cyc8b - t->act8b;
-       }
-
-       if (t->active + t->recover < t->cycle) {
-               t->active += (t->cycle - (t->active + t->recover)) / 2;
-               t->recover = t->cycle - t->active;
-       }
-
-       /* In a few cases quantisation may produce enough errors to
-          leave t->cycle too low for the sum of active and recovery
-          if so we must correct this */
-       if (t->active + t->recover > t->cycle)
-               t->cycle = t->active + t->recover;
-
-       return 0;
-}
-
+#ifdef CONFIG_ATA_ACPI
 /**
  *     ata_timing_cycle2mode - find xfer mode for the specified cycle duration
  *     @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
@@ -3391,6 +3101,7 @@ u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
 
        return last_mode;
 }
+#endif
 
 /**
  *     ata_down_xfermask_limit - adjust dev xfer masks downward
@@ -3662,6 +3373,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
                *r_failed_dev = dev;
        return rc;
 }
+EXPORT_SYMBOL_GPL(ata_do_set_mode);
 
 /**
  *     ata_wait_ready - wait for link to become ready
@@ -3771,216 +3483,7 @@ int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
 
        return ata_wait_ready(link, deadline, check_ready);
 }
-
-/**
- *     sata_link_debounce - debounce SATA phy status
- *     @link: ATA link to debounce SATA phy status for
- *     @params: timing parameters { interval, duration, timeout } in msec
- *     @deadline: deadline jiffies for the operation
- *
- *     Make sure SStatus of @link reaches stable state, determined by
- *     holding the same value where DET is not 1 for @duration polled
- *     every @interval, before @timeout.  Timeout constraints the
- *     beginning of the stable state.  Because DET gets stuck at 1 on
- *     some controllers after hot unplugging, this functions waits
- *     until timeout then returns 0 if DET is stable at 1.
- *
- *     @timeout is further limited by @deadline.  The sooner of the
- *     two is used.
- *
- *     LOCKING:
- *     Kernel thread context (may sleep)
- *
- *     RETURNS:
- *     0 on success, -errno on failure.
- */
-int sata_link_debounce(struct ata_link *link, const unsigned long *params,
-                      unsigned long deadline)
-{
-       unsigned long interval = params[0];
-       unsigned long duration = params[1];
-       unsigned long last_jiffies, t;
-       u32 last, cur;
-       int rc;
-
-       t = ata_deadline(jiffies, params[2]);
-       if (time_before(t, deadline))
-               deadline = t;
-
-       if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
-               return rc;
-       cur &= 0xf;
-
-       last = cur;
-       last_jiffies = jiffies;
-
-       while (1) {
-               ata_msleep(link->ap, interval);
-               if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
-                       return rc;
-               cur &= 0xf;
-
-               /* DET stable? */
-               if (cur == last) {
-                       if (cur == 1 && time_before(jiffies, deadline))
-                               continue;
-                       if (time_after(jiffies,
-                                      ata_deadline(last_jiffies, duration)))
-                               return 0;
-                       continue;
-               }
-
-               /* unstable, start over */
-               last = cur;
-               last_jiffies = jiffies;
-
-               /* Check deadline.  If debouncing failed, return
-                * -EPIPE to tell upper layer to lower link speed.
-                */
-               if (time_after(jiffies, deadline))
-                       return -EPIPE;
-       }
-}
-
-/**
- *     sata_link_resume - resume SATA link
- *     @link: ATA link to resume SATA
- *     @params: timing parameters { interval, duration, timeout } in msec
- *     @deadline: deadline jiffies for the operation
- *
- *     Resume SATA phy @link and debounce it.
- *
- *     LOCKING:
- *     Kernel thread context (may sleep)
- *
- *     RETURNS:
- *     0 on success, -errno on failure.
- */
-int sata_link_resume(struct ata_link *link, const unsigned long *params,
-                    unsigned long deadline)
-{
-       int tries = ATA_LINK_RESUME_TRIES;
-       u32 scontrol, serror;
-       int rc;
-
-       if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
-               return rc;
-
-       /*
-        * Writes to SControl sometimes get ignored under certain
-        * controllers (ata_piix SIDPR).  Make sure DET actually is
-        * cleared.
-        */
-       do {
-               scontrol = (scontrol & 0x0f0) | 0x300;
-               if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
-                       return rc;
-               /*
-                * Some PHYs react badly if SStatus is pounded
-                * immediately after resuming.  Delay 200ms before
-                * debouncing.
-                */
-               if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
-                       ata_msleep(link->ap, 200);
-
-               /* is SControl restored correctly? */
-               if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
-                       return rc;
-       } while ((scontrol & 0xf0f) != 0x300 && --tries);
-
-       if ((scontrol & 0xf0f) != 0x300) {
-               ata_link_warn(link, "failed to resume link (SControl %X)\n",
-                            scontrol);
-               return 0;
-       }
-
-       if (tries < ATA_LINK_RESUME_TRIES)
-               ata_link_warn(link, "link resume succeeded after %d retries\n",
-                             ATA_LINK_RESUME_TRIES - tries);
-
-       if ((rc = sata_link_debounce(link, params, deadline)))
-               return rc;
-
-       /* clear SError, some PHYs require this even for SRST to work */
-       if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
-               rc = sata_scr_write(link, SCR_ERROR, serror);
-
-       return rc != -EINVAL ? rc : 0;
-}
-
-/**
- *     sata_link_scr_lpm - manipulate SControl IPM and SPM fields
- *     @link: ATA link to manipulate SControl for
- *     @policy: LPM policy to configure
- *     @spm_wakeup: initiate LPM transition to active state
- *
- *     Manipulate the IPM field of the SControl register of @link
- *     according to @policy.  If @policy is ATA_LPM_MAX_POWER and
- *     @spm_wakeup is %true, the SPM field is manipulated to wake up
- *     the link.  This function also clears PHYRDY_CHG before
- *     returning.
- *
- *     LOCKING:
- *     EH context.
- *
- *     RETURNS:
- *     0 on success, -errno otherwise.
- */
-int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
-                     bool spm_wakeup)
-{
-       struct ata_eh_context *ehc = &link->eh_context;
-       bool woken_up = false;
-       u32 scontrol;
-       int rc;
-
-       rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
-       if (rc)
-               return rc;
-
-       switch (policy) {
-       case ATA_LPM_MAX_POWER:
-               /* disable all LPM transitions */
-               scontrol |= (0x7 << 8);
-               /* initiate transition to active state */
-               if (spm_wakeup) {
-                       scontrol |= (0x4 << 12);
-                       woken_up = true;
-               }
-               break;
-       case ATA_LPM_MED_POWER:
-               /* allow LPM to PARTIAL */
-               scontrol &= ~(0x1 << 8);
-               scontrol |= (0x6 << 8);
-               break;
-       case ATA_LPM_MED_POWER_WITH_DIPM:
-       case ATA_LPM_MIN_POWER_WITH_PARTIAL:
-       case ATA_LPM_MIN_POWER:
-               if (ata_link_nr_enabled(link) > 0)
-                       /* no restrictions on LPM transitions */
-                       scontrol &= ~(0x7 << 8);
-               else {
-                       /* empty port, power off */
-                       scontrol &= ~0xf;
-                       scontrol |= (0x1 << 2);
-               }
-               break;
-       default:
-               WARN_ON(1);
-       }
-
-       rc = sata_scr_write(link, SCR_CONTROL, scontrol);
-       if (rc)
-               return rc;
-
-       /* give the link time to transit out of LPM state */
-       if (woken_up)
-               msleep(10);
-
-       /* clear PHYRDY_CHG from SError */
-       ehc->i.serror &= ~SERR_PHYRDY_CHG;
-       return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
-}
+EXPORT_SYMBOL_GPL(ata_wait_after_reset);
 
 /**
  *     ata_std_prereset - prepare for reset
@@ -4026,118 +3529,7 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline)
 
        return 0;
 }
-
-/**
- *     sata_link_hardreset - reset link via SATA phy reset
- *     @link: link to reset
- *     @timing: timing parameters { interval, duration, timeout } in msec
- *     @deadline: deadline jiffies for the operation
- *     @online: optional out parameter indicating link onlineness
- *     @check_ready: optional callback to check link readiness
- *
- *     SATA phy-reset @link using DET bits of SControl register.
- *     After hardreset, link readiness is waited upon using
- *     ata_wait_ready() if @check_ready is specified.  LLDs are
- *     allowed to not specify @check_ready and wait itself after this
- *     function returns.  Device classification is LLD's
- *     responsibility.
- *
- *     *@online is set to one iff reset succeeded and @link is online
- *     after reset.
- *
- *     LOCKING:
- *     Kernel thread context (may sleep)
- *
- *     RETURNS:
- *     0 on success, -errno otherwise.
- */
-int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
-                       unsigned long deadline,
-                       bool *online, int (*check_ready)(struct ata_link *))
-{
-       u32 scontrol;
-       int rc;
-
-       DPRINTK("ENTER\n");
-
-       if (online)
-               *online = false;
-
-       if (sata_set_spd_needed(link)) {
-               /* SATA spec says nothing about how to reconfigure
-                * spd.  To be on the safe side, turn off phy during
-                * reconfiguration.  This works for at least ICH7 AHCI
-                * and Sil3124.
-                */
-               if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
-                       goto out;
-
-               scontrol = (scontrol & 0x0f0) | 0x304;
-
-               if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
-                       goto out;
-
-               sata_set_spd(link);
-       }
-
-       /* issue phy wake/reset */
-       if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
-               goto out;
-
-       scontrol = (scontrol & 0x0f0) | 0x301;
-
-       if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
-               goto out;
-
-       /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
-        * 10.4.2 says at least 1 ms.
-        */
-       ata_msleep(link->ap, 1);
-
-       /* bring link back */
-       rc = sata_link_resume(link, timing, deadline);
-       if (rc)
-               goto out;
-       /* if link is offline nothing more to do */
-       if (ata_phys_link_offline(link))
-               goto out;
-
-       /* Link is online.  From this point, -ENODEV too is an error. */
-       if (online)
-               *online = true;
-
-       if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
-               /* If PMP is supported, we have to do follow-up SRST.
-                * Some PMPs don't send D2H Reg FIS after hardreset if
-                * the first port is empty.  Wait only for
-                * ATA_TMOUT_PMP_SRST_WAIT.
-                */
-               if (check_ready) {
-                       unsigned long pmp_deadline;
-
-                       pmp_deadline = ata_deadline(jiffies,
-                                                   ATA_TMOUT_PMP_SRST_WAIT);
-                       if (time_after(pmp_deadline, deadline))
-                               pmp_deadline = deadline;
-                       ata_wait_ready(link, pmp_deadline, check_ready);
-               }
-               rc = -EAGAIN;
-               goto out;
-       }
-
-       rc = 0;
-       if (check_ready)
-               rc = ata_wait_ready(link, deadline, check_ready);
- out:
-       if (rc && rc != -EAGAIN) {
-               /* online is set iff link is online && reset succeeded */
-               if (online)
-                       *online = false;
-               ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
-       }
-       DPRINTK("EXIT, rc=%d\n", rc);
-       return rc;
-}
+EXPORT_SYMBOL_GPL(ata_std_prereset);
 
 /**
  *     sata_std_hardreset - COMRESET w/o waiting or classification
@@ -4164,6 +3556,7 @@ int sata_std_hardreset(struct ata_link *link, unsigned int *class,
        rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
        return online ? -EAGAIN : rc;
 }
+EXPORT_SYMBOL_GPL(sata_std_hardreset);
 
 /**
  *     ata_std_postreset - standard postreset callback
@@ -4192,6 +3585,7 @@ void ata_std_postreset(struct ata_link *link, unsigned int *classes)
 
        DPRINTK("EXIT\n");
 }
+EXPORT_SYMBOL_GPL(ata_std_postreset);
 
 /**
  *     ata_dev_same_device - Determine whether new ID matches configured device
@@ -4979,11 +4373,13 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc)
 
        return ATA_DEFER_LINK;
 }
+EXPORT_SYMBOL_GPL(ata_std_qc_defer);
 
 enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
 {
        return AC_ERR_OK;
 }
+EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
 
 /**
  *     ata_sg_init - Associate command with scatter-gather table.
@@ -5327,6 +4723,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
                __ata_qc_complete(qc);
        }
 }
+EXPORT_SYMBOL_GPL(ata_qc_complete);
 
 /**
  *     ata_qc_get_active - get bitmask of active qcs
@@ -5353,64 +4750,6 @@ u64 ata_qc_get_active(struct ata_port *ap)
 EXPORT_SYMBOL_GPL(ata_qc_get_active);
 
 /**
- *     ata_qc_complete_multiple - Complete multiple qcs successfully
- *     @ap: port in question
- *     @qc_active: new qc_active mask
- *
- *     Complete in-flight commands.  This functions is meant to be
- *     called from low-level driver's interrupt routine to complete
- *     requests normally.  ap->qc_active and @qc_active is compared
- *     and commands are completed accordingly.
- *
- *     Always use this function when completing multiple NCQ commands
- *     from IRQ handlers instead of calling ata_qc_complete()
- *     multiple times to keep IRQ expect status properly in sync.
- *
- *     LOCKING:
- *     spin_lock_irqsave(host lock)
- *
- *     RETURNS:
- *     Number of completed commands on success, -errno otherwise.
- */
-int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
-{
-       u64 done_mask, ap_qc_active = ap->qc_active;
-       int nr_done = 0;
-
-       /*
-        * If the internal tag is set on ap->qc_active, then we care about
-        * bit0 on the passed in qc_active mask. Move that bit up to match
-        * the internal tag.
-        */
-       if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
-               qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
-               qc_active ^= qc_active & 0x01;
-       }
-
-       done_mask = ap_qc_active ^ qc_active;
-
-       if (unlikely(done_mask & qc_active)) {
-               ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
-                            ap->qc_active, qc_active);
-               return -EINVAL;
-       }
-
-       while (done_mask) {
-               struct ata_queued_cmd *qc;
-               unsigned int tag = __ffs64(done_mask);
-
-               qc = ata_qc_from_tag(ap, tag);
-               if (qc) {
-                       ata_qc_complete(qc);
-                       nr_done++;
-               }
-               done_mask &= ~(1ULL << tag);
-       }
-
-       return nr_done;
-}
-
-/**
  *     ata_qc_issue - issue taskfile to device
  *     @qc: command to issue to device
  *
@@ -5486,111 +4825,6 @@ err:
 }
 
 /**
- *     sata_scr_valid - test whether SCRs are accessible
- *     @link: ATA link to test SCR accessibility for
- *
- *     Test whether SCRs are accessible for @link.
- *
- *     LOCKING:
- *     None.
- *
- *     RETURNS:
- *     1 if SCRs are accessible, 0 otherwise.
- */
-int sata_scr_valid(struct ata_link *link)
-{
-       struct ata_port *ap = link->ap;
-
-       return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
-}
-
-/**
- *     sata_scr_read - read SCR register of the specified port
- *     @link: ATA link to read SCR for
- *     @reg: SCR to read
- *     @val: Place to store read value
- *
- *     Read SCR register @reg of @link into *@val.  This function is
- *     guaranteed to succeed if @link is ap->link, the cable type of
- *     the port is SATA and the port implements ->scr_read.
- *
- *     LOCKING:
- *     None if @link is ap->link.  Kernel thread context otherwise.
- *
- *     RETURNS:
- *     0 on success, negative errno on failure.
- */
-int sata_scr_read(struct ata_link *link, int reg, u32 *val)
-{
-       if (ata_is_host_link(link)) {
-               if (sata_scr_valid(link))
-                       return link->ap->ops->scr_read(link, reg, val);
-               return -EOPNOTSUPP;
-       }
-
-       return sata_pmp_scr_read(link, reg, val);
-}
-
-/**
- *     sata_scr_write - write SCR register of the specified port
- *     @link: ATA link to write SCR for
- *     @reg: SCR to write
- *     @val: value to write
- *
- *     Write @val to SCR register @reg of @link.  This function is
- *     guaranteed to succeed if @link is ap->link, the cable type of
- *     the port is SATA and the port implements ->scr_read.
- *
- *     LOCKING:
- *     None if @link is ap->link.  Kernel thread context otherwise.
- *
- *     RETURNS:
- *     0 on success, negative errno on failure.
- */
-int sata_scr_write(struct ata_link *link, int reg, u32 val)
-{
-       if (ata_is_host_link(link)) {
-               if (sata_scr_valid(link))
-                       return link->ap->ops->scr_write(link, reg, val);
-               return -EOPNOTSUPP;
-       }
-
-       return sata_pmp_scr_write(link, reg, val);
-}
-
-/**
- *     sata_scr_write_flush - write SCR register of the specified port and flush
- *     @link: ATA link to write SCR for
- *     @reg: SCR to write
- *     @val: value to write
- *
- *     This function is identical to sata_scr_write() except that this
- *     function performs flush after writing to the register.
- *
- *     LOCKING:
- *     None if @link is ap->link.  Kernel thread context otherwise.
- *
- *     RETURNS:
- *     0 on success, negative errno on failure.
- */
-int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
-{
-       if (ata_is_host_link(link)) {
-               int rc;
-
-               if (sata_scr_valid(link)) {
-                       rc = link->ap->ops->scr_write(link, reg, val);
-                       if (rc == 0)
-                               rc = link->ap->ops->scr_read(link, reg, &val);
-                       return rc;
-               }
-               return -EOPNOTSUPP;
-       }
-
-       return sata_pmp_scr_write(link, reg, val);
-}
-
-/**
  *     ata_phys_link_online - test whether the given link is online
  *     @link: ATA link to test
  *
@@ -5663,6 +4897,7 @@ bool ata_link_online(struct ata_link *link)
        return ata_phys_link_online(link) ||
                (slave && ata_phys_link_online(slave));
 }
+EXPORT_SYMBOL_GPL(ata_link_online);
 
 /**
  *     ata_link_offline - test whether the given link is offline
@@ -5689,6 +4924,7 @@ bool ata_link_offline(struct ata_link *link)
        return ata_phys_link_offline(link) &&
                (!slave || ata_phys_link_offline(slave));
 }
+EXPORT_SYMBOL_GPL(ata_link_offline);
 
 #ifdef CONFIG_PM
 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
@@ -5875,6 +5111,7 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
        host->dev->power.power_state = mesg;
        return 0;
 }
+EXPORT_SYMBOL_GPL(ata_host_suspend);
 
 /**
  *     ata_host_resume - resume host
@@ -5886,6 +5123,7 @@ void ata_host_resume(struct ata_host *host)
 {
        host->dev->power.power_state = PMSG_ON;
 }
+EXPORT_SYMBOL_GPL(ata_host_resume);
 #endif
 
 const struct device_type ata_port_type = {
@@ -6105,6 +5343,7 @@ void ata_host_put(struct ata_host *host)
 {
        kref_put(&host->kref, ata_host_release);
 }
+EXPORT_SYMBOL_GPL(ata_host_put);
 
 /**
  *     ata_host_alloc - allocate and init basic ATA host resources
@@ -6178,6 +5417,7 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
        kfree(host);
        return NULL;
 }
+EXPORT_SYMBOL_GPL(ata_host_alloc);
 
 /**
  *     ata_host_alloc_pinfo - alloc host and init with port_info array
@@ -6226,68 +5466,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
 
        return host;
 }
-
-/**
- *     ata_slave_link_init - initialize slave link
- *     @ap: port to initialize slave link for
- *
- *     Create and initialize slave link for @ap.  This enables slave
- *     link handling on the port.
- *
- *     In libata, a port contains links and a link contains devices.
- *     There is single host link but if a PMP is attached to it,
- *     there can be multiple fan-out links.  On SATA, there's usually
- *     a single device connected to a link but PATA and SATA
- *     controllers emulating TF based interface can have two - master
- *     and slave.
- *
- *     However, there are a few controllers which don't fit into this
- *     abstraction too well - SATA controllers which emulate TF
- *     interface with both master and slave devices but also have
- *     separate SCR register sets for each device.  These controllers
- *     need separate links for physical link handling
- *     (e.g. onlineness, link speed) but should be treated like a
- *     traditional M/S controller for everything else (e.g. command
- *     issue, softreset).
- *
- *     slave_link is libata's way of handling this class of
- *     controllers without impacting core layer too much.  For
- *     anything other than physical link handling, the default host
- *     link is used for both master and slave.  For physical link
- *     handling, separate @ap->slave_link is used.  All dirty details
- *     are implemented inside libata core layer.  From LLD's POV, the
- *     only difference is that prereset, hardreset and postreset are
- *     called once more for the slave link, so the reset sequence
- *     looks like the following.
- *
- *     prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
- *     softreset(M) -> postreset(M) -> postreset(S)
- *
- *     Note that softreset is called only for the master.  Softreset
- *     resets both M/S by definition, so SRST on master should handle
- *     both (the standard method will work just fine).
- *
- *     LOCKING:
- *     Should be called before host is registered.
- *
- *     RETURNS:
- *     0 on success, -errno on failure.
- */
-int ata_slave_link_init(struct ata_port *ap)
-{
-       struct ata_link *link;
-
-       WARN_ON(ap->slave_link);
-       WARN_ON(ap->flags & ATA_FLAG_PMP);
-
-       link = kzalloc(sizeof(*link), GFP_KERNEL);
-       if (!link)
-               return -ENOMEM;
-
-       ata_link_init(ap, link, 1);
-       ap->slave_link = link;
-       return 0;
-}
+EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
 
 static void ata_host_stop(struct device *gendev, void *res)
 {
@@ -6436,6 +5615,7 @@ int ata_host_start(struct ata_host *host)
        devres_free(start_dr);
        return rc;
 }
+EXPORT_SYMBOL_GPL(ata_host_start);
 
 /**
  *     ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
@@ -6454,6 +5634,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
        host->ops = ops;
        kref_init(&host->kref);
 }
+EXPORT_SYMBOL_GPL(ata_host_init);
 
 void __ata_port_probe(struct ata_port *ap)
 {
@@ -6609,6 +5790,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
        return rc;
 
 }
+EXPORT_SYMBOL_GPL(ata_host_register);
 
 /**
  *     ata_host_activate - start host, request IRQ and register it
@@ -6671,6 +5853,7 @@ int ata_host_activate(struct ata_host *host, int irq,
 
        return rc;
 }
+EXPORT_SYMBOL_GPL(ata_host_activate);
 
 /**
  *     ata_port_detach - Detach ATA port in preparation of device removal
@@ -6746,6 +5929,7 @@ void ata_host_detach(struct ata_host *host)
        /* the host is dead now, dissociate ACPI */
        ata_acpi_dissociate(host);
 }
+EXPORT_SYMBOL_GPL(ata_host_detach);
 
 #ifdef CONFIG_PCI
 
@@ -6766,6 +5950,7 @@ void ata_pci_remove_one(struct pci_dev *pdev)
 
        ata_host_detach(host);
 }
+EXPORT_SYMBOL_GPL(ata_pci_remove_one);
 
 void ata_pci_shutdown_one(struct pci_dev *pdev)
 {
@@ -6786,6 +5971,7 @@ void ata_pci_shutdown_one(struct pci_dev *pdev)
                        ap->ops->port_stop(ap);
        }
 }
+EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
 
 /* move to PCI subsystem */
 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
@@ -6820,6 +6006,7 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
 
        return (tmp == bits->val) ? 1 : 0;
 }
+EXPORT_SYMBOL_GPL(pci_test_config_bits);
 
 #ifdef CONFIG_PM
 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
@@ -6830,6 +6017,7 @@ void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
        if (mesg.event & PM_EVENT_SLEEP)
                pci_set_power_state(pdev, PCI_D3hot);
 }
+EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
 
 int ata_pci_device_do_resume(struct pci_dev *pdev)
 {
@@ -6848,6 +6036,7 @@ int ata_pci_device_do_resume(struct pci_dev *pdev)
        pci_set_master(pdev);
        return 0;
 }
+EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
 
 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
 {
@@ -6862,6 +6051,7 @@ int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
 
 int ata_pci_device_resume(struct pci_dev *pdev)
 {
@@ -6873,8 +6063,8 @@ int ata_pci_device_resume(struct pci_dev *pdev)
                ata_host_resume(host);
        return rc;
 }
+EXPORT_SYMBOL_GPL(ata_pci_device_resume);
 #endif /* CONFIG_PM */
-
 #endif /* CONFIG_PCI */
 
 /**
@@ -6896,7 +6086,9 @@ int ata_platform_remove_one(struct platform_device *pdev)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(ata_platform_remove_one);
 
+#ifdef CONFIG_ATA_FORCE
 static int __init ata_parse_force_one(char **cur,
                                      struct ata_force_ent *force_ent,
                                      const char **reason)
@@ -7076,6 +6268,15 @@ static void __init ata_parse_force_param(void)
        ata_force_tbl_size = idx;
 }
 
+static void ata_free_force_param(void)
+{
+       kfree(ata_force_tbl);
+}
+#else
+static inline void ata_parse_force_param(void) { }
+static inline void ata_free_force_param(void) { }
+#endif
+
 static int __init ata_init(void)
 {
        int rc;
@@ -7084,7 +6285,7 @@ static int __init ata_init(void)
 
        rc = ata_sff_init();
        if (rc) {
-               kfree(ata_force_tbl);
+               ata_free_force_param();
                return rc;
        }
 
@@ -7108,7 +6309,7 @@ static void __exit ata_exit(void)
        ata_release_transport(ata_scsi_transport_template);
        libata_transport_exit();
        ata_sff_exit();
-       kfree(ata_force_tbl);
+       ata_free_force_param();
 }
 
 subsys_initcall(ata_init);
@@ -7120,6 +6321,7 @@ int ata_ratelimit(void)
 {
        return __ratelimit(&ratelimit);
 }
+EXPORT_SYMBOL_GPL(ata_ratelimit);
 
 /**
  *     ata_msleep - ATA EH owner aware msleep
@@ -7152,6 +6354,7 @@ void ata_msleep(struct ata_port *ap, unsigned int msecs)
        if (owns_eh)
                ata_eh_acquire(ap);
 }
+EXPORT_SYMBOL_GPL(ata_msleep);
 
 /**
  *     ata_wait_register - wait until register value changes
@@ -7198,38 +6401,7 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
 
        return tmp;
 }
-
-/**
- *     sata_lpm_ignore_phy_events - test if PHY event should be ignored
- *     @link: Link receiving the event
- *
- *     Test whether the received PHY event has to be ignored or not.
- *
- *     LOCKING:
- *     None:
- *
- *     RETURNS:
- *     True if the event has to be ignored.
- */
-bool sata_lpm_ignore_phy_events(struct ata_link *link)
-{
-       unsigned long lpm_timeout = link->last_lpm_change +
-                                   msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
-
-       /* if LPM is enabled, PHYRDY doesn't mean anything */
-       if (link->lpm_policy > ATA_LPM_MAX_POWER)
-               return true;
-
-       /* ignore the first PHY event after the LPM policy changed
-        * as it is might be spurious
-        */
-       if ((link->flags & ATA_LFLAG_CHANGED) &&
-           time_before(jiffies, lpm_timeout))
-               return true;
-
-       return false;
-}
-EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
+EXPORT_SYMBOL_GPL(ata_wait_register);
 
 /*
  * Dummy port_ops
@@ -7251,10 +6423,12 @@ struct ata_port_operations ata_dummy_port_ops = {
        .sched_eh               = ata_std_sched_eh,
        .end_eh                 = ata_std_end_eh,
 };
+EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
 
 const struct ata_port_info ata_dummy_port_info = {
        .port_ops               = &ata_dummy_port_ops,
 };
+EXPORT_SYMBOL_GPL(ata_dummy_port_info);
 
 /*
  * Utility print functions
@@ -7322,127 +6496,3 @@ void ata_print_version(const struct device *dev, const char *version)
        dev_printk(KERN_DEBUG, dev, "version %s\n", version);
 }
 EXPORT_SYMBOL(ata_print_version);
-
-/*
- * libata is essentially a library of internal helper functions for
- * low-level ATA host controller drivers.  As such, the API/ABI is
- * likely to change as new drivers are added and updated.
- * Do not depend on ABI/API stability.
- */
-EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
-EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
-EXPORT_SYMBOL_GPL(sata_deb_timing_long);
-EXPORT_SYMBOL_GPL(ata_base_port_ops);
-EXPORT_SYMBOL_GPL(sata_port_ops);
-EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
-EXPORT_SYMBOL_GPL(ata_dummy_port_info);
-EXPORT_SYMBOL_GPL(ata_link_next);
-EXPORT_SYMBOL_GPL(ata_dev_next);
-EXPORT_SYMBOL_GPL(ata_std_bios_param);
-EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
-EXPORT_SYMBOL_GPL(ata_host_init);
-EXPORT_SYMBOL_GPL(ata_host_alloc);
-EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
-EXPORT_SYMBOL_GPL(ata_slave_link_init);
-EXPORT_SYMBOL_GPL(ata_host_start);
-EXPORT_SYMBOL_GPL(ata_host_register);
-EXPORT_SYMBOL_GPL(ata_host_activate);
-EXPORT_SYMBOL_GPL(ata_host_detach);
-EXPORT_SYMBOL_GPL(ata_sg_init);
-EXPORT_SYMBOL_GPL(ata_qc_complete);
-EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
-EXPORT_SYMBOL_GPL(atapi_cmd_type);
-EXPORT_SYMBOL_GPL(ata_tf_to_fis);
-EXPORT_SYMBOL_GPL(ata_tf_from_fis);
-EXPORT_SYMBOL_GPL(ata_pack_xfermask);
-EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
-EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
-EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
-EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
-EXPORT_SYMBOL_GPL(ata_mode_string);
-EXPORT_SYMBOL_GPL(ata_id_xfermask);
-EXPORT_SYMBOL_GPL(ata_do_set_mode);
-EXPORT_SYMBOL_GPL(ata_std_qc_defer);
-EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
-EXPORT_SYMBOL_GPL(ata_dev_disable);
-EXPORT_SYMBOL_GPL(sata_set_spd);
-EXPORT_SYMBOL_GPL(ata_wait_after_reset);
-EXPORT_SYMBOL_GPL(sata_link_debounce);
-EXPORT_SYMBOL_GPL(sata_link_resume);
-EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
-EXPORT_SYMBOL_GPL(ata_std_prereset);
-EXPORT_SYMBOL_GPL(sata_link_hardreset);
-EXPORT_SYMBOL_GPL(sata_std_hardreset);
-EXPORT_SYMBOL_GPL(ata_std_postreset);
-EXPORT_SYMBOL_GPL(ata_dev_classify);
-EXPORT_SYMBOL_GPL(ata_dev_pair);
-EXPORT_SYMBOL_GPL(ata_ratelimit);
-EXPORT_SYMBOL_GPL(ata_msleep);
-EXPORT_SYMBOL_GPL(ata_wait_register);
-EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
-EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
-EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
-EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
-EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
-EXPORT_SYMBOL_GPL(sata_scr_valid);
-EXPORT_SYMBOL_GPL(sata_scr_read);
-EXPORT_SYMBOL_GPL(sata_scr_write);
-EXPORT_SYMBOL_GPL(sata_scr_write_flush);
-EXPORT_SYMBOL_GPL(ata_link_online);
-EXPORT_SYMBOL_GPL(ata_link_offline);
-#ifdef CONFIG_PM
-EXPORT_SYMBOL_GPL(ata_host_suspend);
-EXPORT_SYMBOL_GPL(ata_host_resume);
-#endif /* CONFIG_PM */
-EXPORT_SYMBOL_GPL(ata_id_string);
-EXPORT_SYMBOL_GPL(ata_id_c_string);
-EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
-EXPORT_SYMBOL_GPL(ata_scsi_simulate);
-
-EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
-EXPORT_SYMBOL_GPL(ata_timing_find_mode);
-EXPORT_SYMBOL_GPL(ata_timing_compute);
-EXPORT_SYMBOL_GPL(ata_timing_merge);
-EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
-
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL_GPL(pci_test_config_bits);
-EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
-EXPORT_SYMBOL_GPL(ata_pci_remove_one);
-#ifdef CONFIG_PM
-EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
-EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
-EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
-EXPORT_SYMBOL_GPL(ata_pci_device_resume);
-#endif /* CONFIG_PM */
-#endif /* CONFIG_PCI */
-
-EXPORT_SYMBOL_GPL(ata_platform_remove_one);
-
-EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
-EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
-EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
-EXPORT_SYMBOL_GPL(ata_port_desc);
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
-#endif /* CONFIG_PCI */
-EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
-EXPORT_SYMBOL_GPL(ata_link_abort);
-EXPORT_SYMBOL_GPL(ata_port_abort);
-EXPORT_SYMBOL_GPL(ata_port_freeze);
-EXPORT_SYMBOL_GPL(sata_async_notification);
-EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
-EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
-EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
-EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
-EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
-EXPORT_SYMBOL_GPL(ata_do_eh);
-EXPORT_SYMBOL_GPL(ata_std_error_handler);
-
-EXPORT_SYMBOL_GPL(ata_cable_40wire);
-EXPORT_SYMBOL_GPL(ata_cable_80wire);
-EXPORT_SYMBOL_GPL(ata_cable_unknown);
-EXPORT_SYMBOL_GPL(ata_cable_ignore);
-EXPORT_SYMBOL_GPL(ata_cable_sata);
-EXPORT_SYMBOL_GPL(ata_host_get);
-EXPORT_SYMBOL_GPL(ata_host_put);
index 3bfd9da..474c6c3 100644 (file)
@@ -2,10 +2,6 @@
 /*
  *  libata-eh.c - libata error handling
  *
- *  Maintained by:  Tejun Heo <tj@kernel.org>
- *                 Please ALWAYS copy linux-ide@vger.kernel.org
- *                 on emails.
- *
  *  Copyright 2006 Tejun Heo <htejun@gmail.com>
  *
  *  libata documentation is available via 'make {ps|pdf}docs',
@@ -184,6 +180,7 @@ void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
        __ata_ehi_pushv_desc(ehi, fmt, args);
        va_end(args);
 }
+EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
 
 /**
  *     ata_ehi_push_desc - push error description with separator
@@ -207,6 +204,7 @@ void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
        __ata_ehi_pushv_desc(ehi, fmt, args);
        va_end(args);
 }
+EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
 
 /**
  *     ata_ehi_clear_desc - clean error description
@@ -222,6 +220,7 @@ void ata_ehi_clear_desc(struct ata_eh_info *ehi)
        ehi->desc[0] = '\0';
        ehi->desc_len = 0;
 }
+EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
 
 /**
  *     ata_port_desc - append port description
@@ -249,9 +248,9 @@ void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
        __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
        va_end(args);
 }
+EXPORT_SYMBOL_GPL(ata_port_desc);
 
 #ifdef CONFIG_PCI
-
 /**
  *     ata_port_pbar_desc - append PCI BAR description
  *     @ap: target ATA port
@@ -288,7 +287,7 @@ void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
                ata_port_desc(ap, "%s 0x%llx", name,
                                start + (unsigned long long)offset);
 }
-
+EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
 #endif /* CONFIG_PCI */
 
 static int ata_lookup_timeout_table(u8 cmd)
@@ -973,6 +972,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
        /* see: ata_std_sched_eh, unless you know better */
        ap->ops->sched_eh(ap);
 }
+EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
 
 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
 {
@@ -1015,6 +1015,7 @@ int ata_link_abort(struct ata_link *link)
 {
        return ata_do_link_abort(link->ap, link);
 }
+EXPORT_SYMBOL_GPL(ata_link_abort);
 
 /**
  *     ata_port_abort - abort all qc's on the port
@@ -1032,6 +1033,7 @@ int ata_port_abort(struct ata_port *ap)
 {
        return ata_do_link_abort(ap, NULL);
 }
+EXPORT_SYMBOL_GPL(ata_port_abort);
 
 /**
  *     __ata_port_freeze - freeze port
@@ -1088,79 +1090,7 @@ int ata_port_freeze(struct ata_port *ap)
 
        return nr_aborted;
 }
-
-/**
- *     sata_async_notification - SATA async notification handler
- *     @ap: ATA port where async notification is received
- *
- *     Handler to be called when async notification via SDB FIS is
- *     received.  This function schedules EH if necessary.
- *
- *     LOCKING:
- *     spin_lock_irqsave(host lock)
- *
- *     RETURNS:
- *     1 if EH is scheduled, 0 otherwise.
- */
-int sata_async_notification(struct ata_port *ap)
-{
-       u32 sntf;
-       int rc;
-
-       if (!(ap->flags & ATA_FLAG_AN))
-               return 0;
-
-       rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
-       if (rc == 0)
-               sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
-
-       if (!sata_pmp_attached(ap) || rc) {
-               /* PMP is not attached or SNTF is not available */
-               if (!sata_pmp_attached(ap)) {
-                       /* PMP is not attached.  Check whether ATAPI
-                        * AN is configured.  If so, notify media
-                        * change.
-                        */
-                       struct ata_device *dev = ap->link.device;
-
-                       if ((dev->class == ATA_DEV_ATAPI) &&
-                           (dev->flags & ATA_DFLAG_AN))
-                               ata_scsi_media_change_notify(dev);
-                       return 0;
-               } else {
-                       /* PMP is attached but SNTF is not available.
-                        * ATAPI async media change notification is
-                        * not used.  The PMP must be reporting PHY
-                        * status change, schedule EH.
-                        */
-                       ata_port_schedule_eh(ap);
-                       return 1;
-               }
-       } else {
-               /* PMP is attached and SNTF is available */
-               struct ata_link *link;
-
-               /* check and notify ATAPI AN */
-               ata_for_each_link(link, ap, EDGE) {
-                       if (!(sntf & (1 << link->pmp)))
-                               continue;
-
-                       if ((link->device->class == ATA_DEV_ATAPI) &&
-                           (link->device->flags & ATA_DFLAG_AN))
-                               ata_scsi_media_change_notify(link->device);
-               }
-
-               /* If PMP is reporting that PHY status of some
-                * downstream ports has changed, schedule EH.
-                */
-               if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
-                       ata_port_schedule_eh(ap);
-                       return 1;
-               }
-
-               return 0;
-       }
-}
+EXPORT_SYMBOL_GPL(ata_port_freeze);
 
 /**
  *     ata_eh_freeze_port - EH helper to freeze port
@@ -1182,6 +1112,7 @@ void ata_eh_freeze_port(struct ata_port *ap)
        __ata_port_freeze(ap);
        spin_unlock_irqrestore(ap->lock, flags);
 }
+EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
 
 /**
  *     ata_port_thaw_port - EH helper to thaw port
@@ -1289,6 +1220,7 @@ void ata_dev_disable(struct ata_device *dev)
         */
        ata_ering_clear(&dev->ering);
 }
+EXPORT_SYMBOL_GPL(ata_dev_disable);
 
 /**
  *     ata_eh_detach_dev - detach ATA device
@@ -1420,62 +1352,6 @@ static const char *ata_err_string(unsigned int err_mask)
 }
 
 /**
- *     ata_eh_read_log_10h - Read log page 10h for NCQ error details
- *     @dev: Device to read log page 10h from
- *     @tag: Resulting tag of the failed command
- *     @tf: Resulting taskfile registers of the failed command
- *
- *     Read log page 10h to obtain NCQ error details and clear error
- *     condition.
- *
- *     LOCKING:
- *     Kernel thread context (may sleep).
- *
- *     RETURNS:
- *     0 on success, -errno otherwise.
- */
-static int ata_eh_read_log_10h(struct ata_device *dev,
-                              int *tag, struct ata_taskfile *tf)
-{
-       u8 *buf = dev->link->ap->sector_buf;
-       unsigned int err_mask;
-       u8 csum;
-       int i;
-
-       err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
-       if (err_mask)
-               return -EIO;
-
-       csum = 0;
-       for (i = 0; i < ATA_SECT_SIZE; i++)
-               csum += buf[i];
-       if (csum)
-               ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
-                            csum);
-
-       if (buf[0] & 0x80)
-               return -ENOENT;
-
-       *tag = buf[0] & 0x1f;
-
-       tf->command = buf[2];
-       tf->feature = buf[3];
-       tf->lbal = buf[4];
-       tf->lbam = buf[5];
-       tf->lbah = buf[6];
-       tf->device = buf[7];
-       tf->hob_lbal = buf[8];
-       tf->hob_lbam = buf[9];
-       tf->hob_lbah = buf[10];
-       tf->nsect = buf[12];
-       tf->hob_nsect = buf[13];
-       if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id))
-               tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
-
-       return 0;
-}
-
-/**
  *     atapi_eh_tur - perform ATAPI TEST_UNIT_READY
  *     @dev: target ATAPI device
  *     @r_sense_key: out parameter for sense_key
@@ -1659,80 +1535,6 @@ static void ata_eh_analyze_serror(struct ata_link *link)
 }
 
 /**
- *     ata_eh_analyze_ncq_error - analyze NCQ error
- *     @link: ATA link to analyze NCQ error for
- *
- *     Read log page 10h, determine the offending qc and acquire
- *     error status TF.  For NCQ device errors, all LLDDs have to do
- *     is setting AC_ERR_DEV in ehi->err_mask.  This function takes
- *     care of the rest.
- *
- *     LOCKING:
- *     Kernel thread context (may sleep).
- */
-void ata_eh_analyze_ncq_error(struct ata_link *link)
-{
-       struct ata_port *ap = link->ap;
-       struct ata_eh_context *ehc = &link->eh_context;
-       struct ata_device *dev = link->device;
-       struct ata_queued_cmd *qc;
-       struct ata_taskfile tf;
-       int tag, rc;
-
-       /* if frozen, we can't do much */
-       if (ap->pflags & ATA_PFLAG_FROZEN)
-               return;
-
-       /* is it NCQ device error? */
-       if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
-               return;
-
-       /* has LLDD analyzed already? */
-       ata_qc_for_each_raw(ap, qc, tag) {
-               if (!(qc->flags & ATA_QCFLAG_FAILED))
-                       continue;
-
-               if (qc->err_mask)
-                       return;
-       }
-
-       /* okay, this error is ours */
-       memset(&tf, 0, sizeof(tf));
-       rc = ata_eh_read_log_10h(dev, &tag, &tf);
-       if (rc) {
-               ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
-                            rc);
-               return;
-       }
-
-       if (!(link->sactive & (1 << tag))) {
-               ata_link_err(link, "log page 10h reported inactive tag %d\n",
-                            tag);
-               return;
-       }
-
-       /* we've got the perpetrator, condemn it */
-       qc = __ata_qc_from_tag(ap, tag);
-       memcpy(&qc->result_tf, &tf, sizeof(tf));
-       qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
-       qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
-       if (dev->class == ATA_DEV_ZAC &&
-           ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) {
-               char sense_key, asc, ascq;
-
-               sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
-               asc = (qc->result_tf.auxiliary >> 8) & 0xff;
-               ascq = qc->result_tf.auxiliary & 0xff;
-               ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, ascq);
-               ata_scsi_set_sense_information(dev, qc->scsicmd,
-                                              &qc->result_tf);
-               qc->flags |= ATA_QCFLAG_SENSE_VALID;
-       }
-
-       ehc->i.err_mask &= ~AC_ERR_DEV;
-}
-
-/**
  *     ata_eh_analyze_tf - analyze taskfile of a failed qc
  *     @qc: qc to analyze
  *     @tf: Taskfile registers to analyze
@@ -3436,7 +3238,8 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
        int rc;
 
        /* if the link or host doesn't do LPM, noop */
-       if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
+       if (!IS_ENABLED(CONFIG_SATA_HOST) ||
+           (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
                return 0;
 
        /*
@@ -4052,6 +3855,7 @@ void ata_std_error_handler(struct ata_port *ap)
 
        ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
 }
+EXPORT_SYMBOL_GPL(ata_std_error_handler);
 
 #ifdef CONFIG_PM
 /**
diff --git a/drivers/ata/libata-pata-timings.c b/drivers/ata/libata-pata-timings.c
new file mode 100644 (file)
index 0000000..af34122
--- /dev/null
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *  Helper library for PATA timings
+ *
+ *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
+ *  Copyright 2003-2004 Jeff Garzik
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/libata.h>
+
+/*
+ * This mode timing computation functionality is ported over from
+ * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
+ */
+/*
+ * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
+ * These were taken from ATA/ATAPI-6 standard, rev 0a, except
+ * for UDMA6, which is currently supported only by Maxtor drives.
+ *
+ * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
+ */
+
+static const struct ata_timing ata_timing[] = {
+/*     { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
+       { XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
+       { XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
+       { XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
+       { XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
+       { XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
+       { XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
+       { XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
+
+       { XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
+       { XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
+       { XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
+
+       { XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
+       { XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
+       { XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
+       { XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
+       { XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
+
+/*     { XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
+       { XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
+       { XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
+       { XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
+       { XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
+       { XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
+       { XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
+       { XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
+
+       { 0xFF }
+};
+
+#define ENOUGH(v, unit)                (((v)-1)/(unit)+1)
+#define EZ(v, unit)            ((v)?ENOUGH(((v) * 1000), unit):0)
+
+static void ata_timing_quantize(const struct ata_timing *t,
+                               struct ata_timing *q, int T, int UT)
+{
+       q->setup        = EZ(t->setup,       T);
+       q->act8b        = EZ(t->act8b,       T);
+       q->rec8b        = EZ(t->rec8b,       T);
+       q->cyc8b        = EZ(t->cyc8b,       T);
+       q->active       = EZ(t->active,      T);
+       q->recover      = EZ(t->recover,     T);
+       q->dmack_hold   = EZ(t->dmack_hold,  T);
+       q->cycle        = EZ(t->cycle,       T);
+       q->udma         = EZ(t->udma,       UT);
+}
+
+void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
+                     struct ata_timing *m, unsigned int what)
+{
+       if (what & ATA_TIMING_SETUP)
+               m->setup = max(a->setup, b->setup);
+       if (what & ATA_TIMING_ACT8B)
+               m->act8b = max(a->act8b, b->act8b);
+       if (what & ATA_TIMING_REC8B)
+               m->rec8b = max(a->rec8b, b->rec8b);
+       if (what & ATA_TIMING_CYC8B)
+               m->cyc8b = max(a->cyc8b, b->cyc8b);
+       if (what & ATA_TIMING_ACTIVE)
+               m->active = max(a->active, b->active);
+       if (what & ATA_TIMING_RECOVER)
+               m->recover = max(a->recover, b->recover);
+       if (what & ATA_TIMING_DMACK_HOLD)
+               m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
+       if (what & ATA_TIMING_CYCLE)
+               m->cycle = max(a->cycle, b->cycle);
+       if (what & ATA_TIMING_UDMA)
+               m->udma = max(a->udma, b->udma);
+}
+EXPORT_SYMBOL_GPL(ata_timing_merge);
+
+const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
+{
+       const struct ata_timing *t = ata_timing;
+
+       while (xfer_mode > t->mode)
+               t++;
+
+       if (xfer_mode == t->mode)
+               return t;
+
+       WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
+                       __func__, xfer_mode);
+
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(ata_timing_find_mode);
+
+int ata_timing_compute(struct ata_device *adev, unsigned short speed,
+                      struct ata_timing *t, int T, int UT)
+{
+       const u16 *id = adev->id;
+       const struct ata_timing *s;
+       struct ata_timing p;
+
+       /*
+        * Find the mode.
+        */
+       s = ata_timing_find_mode(speed);
+       if (!s)
+               return -EINVAL;
+
+       memcpy(t, s, sizeof(*s));
+
+       /*
+        * If the drive is an EIDE drive, it can tell us it needs extended
+        * PIO/MW_DMA cycle timing.
+        */
+
+       if (id[ATA_ID_FIELD_VALID] & 2) {       /* EIDE drive */
+               memset(&p, 0, sizeof(p));
+
+               if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
+                       if (speed <= XFER_PIO_2)
+                               p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
+                       else if ((speed <= XFER_PIO_4) ||
+                                (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
+                               p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
+               } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
+                       p.cycle = id[ATA_ID_EIDE_DMA_MIN];
+
+               ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
+       }
+
+       /*
+        * Convert the timing to bus clock counts.
+        */
+
+       ata_timing_quantize(t, t, T, UT);
+
+       /*
+        * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
+        * S.M.A.R.T * and some other commands. We have to ensure that the
+        * DMA cycle timing is slower/equal than the fastest PIO timing.
+        */
+
+       if (speed > XFER_PIO_6) {
+               ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
+               ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
+       }
+
+       /*
+        * Lengthen active & recovery time so that cycle time is correct.
+        */
+
+       if (t->act8b + t->rec8b < t->cyc8b) {
+               t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
+               t->rec8b = t->cyc8b - t->act8b;
+       }
+
+       if (t->active + t->recover < t->cycle) {
+               t->active += (t->cycle - (t->active + t->recover)) / 2;
+               t->recover = t->cycle - t->active;
+       }
+
+       /*
+        * In a few cases quantisation may produce enough errors to
+        * leave t->cycle too low for the sum of active and recovery
+        * if so we must correct this.
+        */
+       if (t->active + t->recover > t->cycle)
+               t->cycle = t->active + t->recover;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ata_timing_compute);
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
new file mode 100644 (file)
index 0000000..c16423e
--- /dev/null
@@ -0,0 +1,1483 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *  SATA specific part of ATA helper library
+ *
+ *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
+ *  Copyright 2003-2004 Jeff Garzik
+ *  Copyright 2006 Tejun Heo <htejun@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <linux/libata.h>
+
+#include "libata.h"
+#include "libata-transport.h"
+
+/* debounce timing parameters in msecs { interval, duration, timeout } */
+const unsigned long sata_deb_timing_normal[]           = {   5,  100, 2000 };
+EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
+const unsigned long sata_deb_timing_hotplug[]          = {  25,  500, 2000 };
+EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
+const unsigned long sata_deb_timing_long[]             = { 100, 2000, 5000 };
+EXPORT_SYMBOL_GPL(sata_deb_timing_long);
+
+/**
+ *     sata_scr_valid - test whether SCRs are accessible
+ *     @link: ATA link to test SCR accessibility for
+ *
+ *     Test whether SCRs are accessible for @link.
+ *
+ *     LOCKING:
+ *     None.
+ *
+ *     RETURNS:
+ *     1 if SCRs are accessible, 0 otherwise.
+ */
+int sata_scr_valid(struct ata_link *link)
+{
+       struct ata_port *ap = link->ap;
+
+       return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
+}
+EXPORT_SYMBOL_GPL(sata_scr_valid);
+
+/**
+ *     sata_scr_read - read SCR register of the specified port
+ *     @link: ATA link to read SCR for
+ *     @reg: SCR to read
+ *     @val: Place to store read value
+ *
+ *     Read SCR register @reg of @link into *@val.  This function is
+ *     guaranteed to succeed if @link is ap->link, the cable type of
+ *     the port is SATA and the port implements ->scr_read.
+ *
+ *     LOCKING:
+ *     None if @link is ap->link.  Kernel thread context otherwise.
+ *
+ *     RETURNS:
+ *     0 on success, negative errno on failure.
+ */
+int sata_scr_read(struct ata_link *link, int reg, u32 *val)
+{
+       if (ata_is_host_link(link)) {
+               if (sata_scr_valid(link))
+                       return link->ap->ops->scr_read(link, reg, val);
+               return -EOPNOTSUPP;
+       }
+
+       return sata_pmp_scr_read(link, reg, val);
+}
+EXPORT_SYMBOL_GPL(sata_scr_read);
+
+/**
+ *     sata_scr_write - write SCR register of the specified port
+ *     @link: ATA link to write SCR for
+ *     @reg: SCR to write
+ *     @val: value to write
+ *
+ *     Write @val to SCR register @reg of @link.  This function is
+ *     guaranteed to succeed if @link is ap->link, the cable type of
+ *     the port is SATA and the port implements ->scr_read.
+ *
+ *     LOCKING:
+ *     None if @link is ap->link.  Kernel thread context otherwise.
+ *
+ *     RETURNS:
+ *     0 on success, negative errno on failure.
+ */
+int sata_scr_write(struct ata_link *link, int reg, u32 val)
+{
+       if (ata_is_host_link(link)) {
+               if (sata_scr_valid(link))
+                       return link->ap->ops->scr_write(link, reg, val);
+               return -EOPNOTSUPP;
+       }
+
+       return sata_pmp_scr_write(link, reg, val);
+}
+EXPORT_SYMBOL_GPL(sata_scr_write);
+
+/**
+ *     sata_scr_write_flush - write SCR register of the specified port and flush
+ *     @link: ATA link to write SCR for
+ *     @reg: SCR to write
+ *     @val: value to write
+ *
+ *     This function is identical to sata_scr_write() except that this
+ *     function performs flush after writing to the register.
+ *
+ *     LOCKING:
+ *     None if @link is ap->link.  Kernel thread context otherwise.
+ *
+ *     RETURNS:
+ *     0 on success, negative errno on failure.
+ */
+int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
+{
+       if (ata_is_host_link(link)) {
+               int rc;
+
+               if (sata_scr_valid(link)) {
+                       rc = link->ap->ops->scr_write(link, reg, val);
+                       if (rc == 0)
+                               rc = link->ap->ops->scr_read(link, reg, &val);
+                       return rc;
+               }
+               return -EOPNOTSUPP;
+       }
+
+       return sata_pmp_scr_write(link, reg, val);
+}
+EXPORT_SYMBOL_GPL(sata_scr_write_flush);
+
+/**
+ *     ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
+ *     @tf: Taskfile to convert
+ *     @pmp: Port multiplier port
+ *     @is_cmd: This FIS is for command
+ *     @fis: Buffer into which data will output
+ *
+ *     Converts a standard ATA taskfile to a Serial ATA
+ *     FIS structure (Register - Host to Device).
+ *
+ *     LOCKING:
+ *     Inherited from caller.
+ */
+void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
+{
+       fis[0] = 0x27;                  /* Register - Host to Device FIS */
+       fis[1] = pmp & 0xf;             /* Port multiplier number*/
+       if (is_cmd)
+               fis[1] |= (1 << 7);     /* bit 7 indicates Command FIS */
+
+       fis[2] = tf->command;
+       fis[3] = tf->feature;
+
+       fis[4] = tf->lbal;
+       fis[5] = tf->lbam;
+       fis[6] = tf->lbah;
+       fis[7] = tf->device;
+
+       fis[8] = tf->hob_lbal;
+       fis[9] = tf->hob_lbam;
+       fis[10] = tf->hob_lbah;
+       fis[11] = tf->hob_feature;
+
+       fis[12] = tf->nsect;
+       fis[13] = tf->hob_nsect;
+       fis[14] = 0;
+       fis[15] = tf->ctl;
+
+       fis[16] = tf->auxiliary & 0xff;
+       fis[17] = (tf->auxiliary >> 8) & 0xff;
+       fis[18] = (tf->auxiliary >> 16) & 0xff;
+       fis[19] = (tf->auxiliary >> 24) & 0xff;
+}
+EXPORT_SYMBOL_GPL(ata_tf_to_fis);
+
+/**
+ *     ata_tf_from_fis - Convert SATA FIS to ATA taskfile
+ *     @fis: Buffer from which data will be input
+ *     @tf: Taskfile to output
+ *
+ *     Converts a serial ATA FIS structure to a standard ATA taskfile.
+ *
+ *     LOCKING:
+ *     Inherited from caller.
+ */
+
+void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
+{
+       tf->command     = fis[2];       /* status */
+       tf->feature     = fis[3];       /* error */
+
+       tf->lbal        = fis[4];
+       tf->lbam        = fis[5];
+       tf->lbah        = fis[6];
+       tf->device      = fis[7];
+
+       tf->hob_lbal    = fis[8];
+       tf->hob_lbam    = fis[9];
+       tf->hob_lbah    = fis[10];
+
+       tf->nsect       = fis[12];
+       tf->hob_nsect   = fis[13];
+}
+EXPORT_SYMBOL_GPL(ata_tf_from_fis);
+
+/**
+ *     sata_link_debounce - debounce SATA phy status
+ *     @link: ATA link to debounce SATA phy status for
+ *     @params: timing parameters { interval, duration, timeout } in msec
+ *     @deadline: deadline jiffies for the operation
+ *
+ *     Make sure SStatus of @link reaches stable state, determined by
+ *     holding the same value where DET is not 1 for @duration polled
+ *     every @interval, before @timeout.  Timeout constraints the
+ *     beginning of the stable state.  Because DET gets stuck at 1 on
+ *     some controllers after hot unplugging, this functions waits
+ *     until timeout then returns 0 if DET is stable at 1.
+ *
+ *     @timeout is further limited by @deadline.  The sooner of the
+ *     two is used.
+ *
+ *     LOCKING:
+ *     Kernel thread context (may sleep)
+ *
+ *     RETURNS:
+ *     0 on success, -errno on failure.
+ */
+int sata_link_debounce(struct ata_link *link, const unsigned long *params,
+                      unsigned long deadline)
+{
+       unsigned long interval = params[0];
+       unsigned long duration = params[1];
+       unsigned long last_jiffies, t;
+       u32 last, cur;
+       int rc;
+
+       t = ata_deadline(jiffies, params[2]);
+       if (time_before(t, deadline))
+               deadline = t;
+
+       if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
+               return rc;
+       cur &= 0xf;
+
+       last = cur;
+       last_jiffies = jiffies;
+
+       while (1) {
+               ata_msleep(link->ap, interval);
+               if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
+                       return rc;
+               cur &= 0xf;
+
+               /* DET stable? */
+               if (cur == last) {
+                       if (cur == 1 && time_before(jiffies, deadline))
+                               continue;
+                       if (time_after(jiffies,
+                                      ata_deadline(last_jiffies, duration)))
+                               return 0;
+                       continue;
+               }
+
+               /* unstable, start over */
+               last = cur;
+               last_jiffies = jiffies;
+
+               /* Check deadline.  If debouncing failed, return
+                * -EPIPE to tell upper layer to lower link speed.
+                */
+               if (time_after(jiffies, deadline))
+                       return -EPIPE;
+       }
+}
+EXPORT_SYMBOL_GPL(sata_link_debounce);
+
+/**
+ *     sata_link_resume - resume SATA link
+ *     @link: ATA link to resume SATA
+ *     @params: timing parameters { interval, duration, timeout } in msec
+ *     @deadline: deadline jiffies for the operation
+ *
+ *     Resume SATA phy @link and debounce it.
+ *
+ *     LOCKING:
+ *     Kernel thread context (may sleep)
+ *
+ *     RETURNS:
+ *     0 on success, -errno on failure.
+ */
+int sata_link_resume(struct ata_link *link, const unsigned long *params,
+                    unsigned long deadline)
+{
+       int tries = ATA_LINK_RESUME_TRIES;
+       u32 scontrol, serror;
+       int rc;
+
+       if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+               return rc;
+
+       /*
+        * Writes to SControl sometimes get ignored under certain
+        * controllers (ata_piix SIDPR).  Make sure DET actually is
+        * cleared.
+        */
+       do {
+               scontrol = (scontrol & 0x0f0) | 0x300;
+               if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
+                       return rc;
+               /*
+                * Some PHYs react badly if SStatus is pounded
+                * immediately after resuming.  Delay 200ms before
+                * debouncing.
+                */
+               if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
+                       ata_msleep(link->ap, 200);
+
+               /* is SControl restored correctly? */
+               if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+                       return rc;
+       } while ((scontrol & 0xf0f) != 0x300 && --tries);
+
+       if ((scontrol & 0xf0f) != 0x300) {
+               ata_link_warn(link, "failed to resume link (SControl %X)\n",
+                            scontrol);
+               return 0;
+       }
+
+       if (tries < ATA_LINK_RESUME_TRIES)
+               ata_link_warn(link, "link resume succeeded after %d retries\n",
+                             ATA_LINK_RESUME_TRIES - tries);
+
+       if ((rc = sata_link_debounce(link, params, deadline)))
+               return rc;
+
+       /* clear SError, some PHYs require this even for SRST to work */
+       if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
+               rc = sata_scr_write(link, SCR_ERROR, serror);
+
+       return rc != -EINVAL ? rc : 0;
+}
+EXPORT_SYMBOL_GPL(sata_link_resume);
+
+/**
+ *     sata_link_scr_lpm - manipulate SControl IPM and SPM fields
+ *     @link: ATA link to manipulate SControl for
+ *     @policy: LPM policy to configure
+ *     @spm_wakeup: initiate LPM transition to active state
+ *
+ *     Manipulate the IPM field of the SControl register of @link
+ *     according to @policy.  If @policy is ATA_LPM_MAX_POWER and
+ *     @spm_wakeup is %true, the SPM field is manipulated to wake up
+ *     the link.  This function also clears PHYRDY_CHG before
+ *     returning.
+ *
+ *     LOCKING:
+ *     EH context.
+ *
+ *     RETURNS:
+ *     0 on success, -errno otherwise.
+ */
+int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+                     bool spm_wakeup)
+{
+       struct ata_eh_context *ehc = &link->eh_context;
+       bool woken_up = false;
+       u32 scontrol;
+       int rc;
+
+       rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
+       if (rc)
+               return rc;
+
+       switch (policy) {
+       case ATA_LPM_MAX_POWER:
+               /* disable all LPM transitions */
+               scontrol |= (0x7 << 8);
+               /* initiate transition to active state */
+               if (spm_wakeup) {
+                       scontrol |= (0x4 << 12);
+                       woken_up = true;
+               }
+               break;
+       case ATA_LPM_MED_POWER:
+               /* allow LPM to PARTIAL */
+               scontrol &= ~(0x1 << 8);
+               scontrol |= (0x6 << 8);
+               break;
+       case ATA_LPM_MED_POWER_WITH_DIPM:
+       case ATA_LPM_MIN_POWER_WITH_PARTIAL:
+       case ATA_LPM_MIN_POWER:
+               if (ata_link_nr_enabled(link) > 0)
+                       /* no restrictions on LPM transitions */
+                       scontrol &= ~(0x7 << 8);
+               else {
+                       /* empty port, power off */
+                       scontrol &= ~0xf;
+                       scontrol |= (0x1 << 2);
+               }
+               break;
+       default:
+               WARN_ON(1);
+       }
+
+       rc = sata_scr_write(link, SCR_CONTROL, scontrol);
+       if (rc)
+               return rc;
+
+       /* give the link time to transit out of LPM state */
+       if (woken_up)
+               msleep(10);
+
+       /* clear PHYRDY_CHG from SError */
+       ehc->i.serror &= ~SERR_PHYRDY_CHG;
+       return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
+}
+EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
+
+static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
+{
+       struct ata_link *host_link = &link->ap->link;
+       u32 limit, target, spd;
+
+       limit = link->sata_spd_limit;
+
+       /* Don't configure downstream link faster than upstream link.
+        * It doesn't speed up anything and some PMPs choke on such
+        * configuration.
+        */
+       if (!ata_is_host_link(link) && host_link->sata_spd)
+               limit &= (1 << host_link->sata_spd) - 1;
+
+       if (limit == UINT_MAX)
+               target = 0;
+       else
+               target = fls(limit);
+
+       spd = (*scontrol >> 4) & 0xf;
+       *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
+
+       return spd != target;
+}
+
+/**
+ *     sata_set_spd_needed - is SATA spd configuration needed
+ *     @link: Link in question
+ *
+ *     Test whether the spd limit in SControl matches
+ *     @link->sata_spd_limit.  This function is used to determine
+ *     whether hardreset is necessary to apply SATA spd
+ *     configuration.
+ *
+ *     LOCKING:
+ *     Inherited from caller.
+ *
+ *     RETURNS:
+ *     1 if SATA spd configuration is needed, 0 otherwise.
+ */
+static int sata_set_spd_needed(struct ata_link *link)
+{
+       u32 scontrol;
+
+       if (sata_scr_read(link, SCR_CONTROL, &scontrol))
+               return 1;
+
+       return __sata_set_spd_needed(link, &scontrol);
+}
+
+/**
+ *     sata_set_spd - set SATA spd according to spd limit
+ *     @link: Link to set SATA spd for
+ *
+ *     Set SATA spd of @link according to sata_spd_limit.
+ *
+ *     LOCKING:
+ *     Inherited from caller.
+ *
+ *     RETURNS:
+ *     0 if spd doesn't need to be changed, 1 if spd has been
+ *     changed.  Negative errno if SCR registers are inaccessible.
+ */
+int sata_set_spd(struct ata_link *link)
+{
+       u32 scontrol;
+       int rc;
+
+       if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+               return rc;
+
+       if (!__sata_set_spd_needed(link, &scontrol))
+               return 0;
+
+       if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
+               return rc;
+
+       return 1;
+}
+EXPORT_SYMBOL_GPL(sata_set_spd);
+
+/**
+ *     sata_link_hardreset - reset link via SATA phy reset
+ *     @link: link to reset
+ *     @timing: timing parameters { interval, duration, timeout } in msec
+ *     @deadline: deadline jiffies for the operation
+ *     @online: optional out parameter indicating link onlineness
+ *     @check_ready: optional callback to check link readiness
+ *
+ *     SATA phy-reset @link using DET bits of SControl register.
+ *     After hardreset, link readiness is waited upon using
+ *     ata_wait_ready() if @check_ready is specified.  LLDs are
+ *     allowed to not specify @check_ready and wait itself after this
+ *     function returns.  Device classification is LLD's
+ *     responsibility.
+ *
+ *     *@online is set to one iff reset succeeded and @link is online
+ *     after reset.
+ *
+ *     LOCKING:
+ *     Kernel thread context (may sleep)
+ *
+ *     RETURNS:
+ *     0 on success, -errno otherwise.
+ */
+int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
+                       unsigned long deadline,
+                       bool *online, int (*check_ready)(struct ata_link *))
+{
+       u32 scontrol;
+       int rc;
+
+       DPRINTK("ENTER\n");
+
+       if (online)
+               *online = false;
+
+       if (sata_set_spd_needed(link)) {
+               /* SATA spec says nothing about how to reconfigure
+                * spd.  To be on the safe side, turn off phy during
+                * reconfiguration.  This works for at least ICH7 AHCI
+                * and Sil3124.
+                */
+               if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+                       goto out;
+
+               scontrol = (scontrol & 0x0f0) | 0x304;
+
+               if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
+                       goto out;
+
+               sata_set_spd(link);
+       }
+
+       /* issue phy wake/reset */
+       if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+               goto out;
+
+       scontrol = (scontrol & 0x0f0) | 0x301;
+
+       if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
+               goto out;
+
+       /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
+        * 10.4.2 says at least 1 ms.
+        */
+       ata_msleep(link->ap, 1);
+
+       /* bring link back */
+       rc = sata_link_resume(link, timing, deadline);
+       if (rc)
+               goto out;
+       /* if link is offline nothing more to do */
+       if (ata_phys_link_offline(link))
+               goto out;
+
+       /* Link is online.  From this point, -ENODEV too is an error. */
+       if (online)
+               *online = true;
+
+       if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
+               /* If PMP is supported, we have to do follow-up SRST.
+                * Some PMPs don't send D2H Reg FIS after hardreset if
+                * the first port is empty.  Wait only for
+                * ATA_TMOUT_PMP_SRST_WAIT.
+                */
+               if (check_ready) {
+                       unsigned long pmp_deadline;
+
+                       pmp_deadline = ata_deadline(jiffies,
+                                                   ATA_TMOUT_PMP_SRST_WAIT);
+                       if (time_after(pmp_deadline, deadline))
+                               pmp_deadline = deadline;
+                       ata_wait_ready(link, pmp_deadline, check_ready);
+               }
+               rc = -EAGAIN;
+               goto out;
+       }
+
+       rc = 0;
+       if (check_ready)
+               rc = ata_wait_ready(link, deadline, check_ready);
+ out:
+       if (rc && rc != -EAGAIN) {
+               /* online is set iff link is online && reset succeeded */
+               if (online)
+                       *online = false;
+               ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
+       }
+       DPRINTK("EXIT, rc=%d\n", rc);
+       return rc;
+}
+EXPORT_SYMBOL_GPL(sata_link_hardreset);
+
+/**
+ *     ata_qc_complete_multiple - Complete multiple qcs successfully
+ *     @ap: port in question
+ *     @qc_active: new qc_active mask
+ *
+ *     Complete in-flight commands.  This functions is meant to be
+ *     called from low-level driver's interrupt routine to complete
+ *     requests normally.  ap->qc_active and @qc_active is compared
+ *     and commands are completed accordingly.
+ *
+ *     Always use this function when completing multiple NCQ commands
+ *     from IRQ handlers instead of calling ata_qc_complete()
+ *     multiple times to keep IRQ expect status properly in sync.
+ *
+ *     LOCKING:
+ *     spin_lock_irqsave(host lock)
+ *
+ *     RETURNS:
+ *     Number of completed commands on success, -errno otherwise.
+ */
+int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
+{
+       u64 done_mask, ap_qc_active = ap->qc_active;
+       int nr_done = 0;
+
+       /*
+        * If the internal tag is set on ap->qc_active, then we care about
+        * bit0 on the passed in qc_active mask. Move that bit up to match
+        * the internal tag.
+        */
+       if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
+               qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
+               qc_active ^= qc_active & 0x01;
+       }
+
+       done_mask = ap_qc_active ^ qc_active;
+
+       if (unlikely(done_mask & qc_active)) {
+               ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
+                            ap->qc_active, qc_active);
+               return -EINVAL;
+       }
+
+       while (done_mask) {
+               struct ata_queued_cmd *qc;
+               unsigned int tag = __ffs64(done_mask);
+
+               qc = ata_qc_from_tag(ap, tag);
+               if (qc) {
+                       ata_qc_complete(qc);
+                       nr_done++;
+               }
+               done_mask &= ~(1ULL << tag);
+       }
+
+       return nr_done;
+}
+EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
+
+/**
+ *     ata_slave_link_init - initialize slave link
+ *     @ap: port to initialize slave link for
+ *
+ *     Create and initialize slave link for @ap.  This enables slave
+ *     link handling on the port.
+ *
+ *     In libata, a port contains links and a link contains devices.
+ *     There is single host link but if a PMP is attached to it,
+ *     there can be multiple fan-out links.  On SATA, there's usually
+ *     a single device connected to a link but PATA and SATA
+ *     controllers emulating TF based interface can have two - master
+ *     and slave.
+ *
+ *     However, there are a few controllers which don't fit into this
+ *     abstraction too well - SATA controllers which emulate TF
+ *     interface with both master and slave devices but also have
+ *     separate SCR register sets for each device.  These controllers
+ *     need separate links for physical link handling
+ *     (e.g. onlineness, link speed) but should be treated like a
+ *     traditional M/S controller for everything else (e.g. command
+ *     issue, softreset).
+ *
+ *     slave_link is libata's way of handling this class of
+ *     controllers without impacting core layer too much.  For
+ *     anything other than physical link handling, the default host
+ *     link is used for both master and slave.  For physical link
+ *     handling, separate @ap->slave_link is used.  All dirty details
+ *     are implemented inside libata core layer.  From LLD's POV, the
+ *     only difference is that prereset, hardreset and postreset are
+ *     called once more for the slave link, so the reset sequence
+ *     looks like the following.
+ *
+ *     prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
+ *     softreset(M) -> postreset(M) -> postreset(S)
+ *
+ *     Note that softreset is called only for the master.  Softreset
+ *     resets both M/S by definition, so SRST on master should handle
+ *     both (the standard method will work just fine).
+ *
+ *     LOCKING:
+ *     Should be called before host is registered.
+ *
+ *     RETURNS:
+ *     0 on success, -errno on failure.
+ */
+int ata_slave_link_init(struct ata_port *ap)
+{
+       struct ata_link *link;
+
+       WARN_ON(ap->slave_link);
+       WARN_ON(ap->flags & ATA_FLAG_PMP);
+
+       link = kzalloc(sizeof(*link), GFP_KERNEL);
+       if (!link)
+               return -ENOMEM;
+
+       ata_link_init(ap, link, 1);
+       ap->slave_link = link;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ata_slave_link_init);
+
+/**
+ *     sata_lpm_ignore_phy_events - test if PHY event should be ignored
+ *     @link: Link receiving the event
+ *
+ *     Test whether the received PHY event has to be ignored or not.
+ *
+ *     LOCKING:
+ *     None:
+ *
+ *     RETURNS:
+ *     True if the event has to be ignored.
+ */
+bool sata_lpm_ignore_phy_events(struct ata_link *link)
+{
+       unsigned long lpm_timeout = link->last_lpm_change +
+                                   msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
+
+       /* if LPM is enabled, PHYRDY doesn't mean anything */
+       if (link->lpm_policy > ATA_LPM_MAX_POWER)
+               return true;
+
+       /* ignore the first PHY event after the LPM policy changed
+        * as it is might be spurious
+        */
+       if ((link->flags & ATA_LFLAG_CHANGED) &&
+           time_before(jiffies, lpm_timeout))
+               return true;
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
+
+static const char *ata_lpm_policy_names[] = {
+       [ATA_LPM_UNKNOWN]               = "max_performance",
+       [ATA_LPM_MAX_POWER]             = "max_performance",
+       [ATA_LPM_MED_POWER]             = "medium_power",
+       [ATA_LPM_MED_POWER_WITH_DIPM]   = "med_power_with_dipm",
+       [ATA_LPM_MIN_POWER_WITH_PARTIAL] = "min_power_with_partial",
+       [ATA_LPM_MIN_POWER]             = "min_power",
+};
+
+static ssize_t ata_scsi_lpm_store(struct device *device,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(device);
+       struct ata_port *ap = ata_shost_to_port(shost);
+       struct ata_link *link;
+       struct ata_device *dev;
+       enum ata_lpm_policy policy;
+       unsigned long flags;
+
+       /* UNKNOWN is internal state, iterate from MAX_POWER */
+       for (policy = ATA_LPM_MAX_POWER;
+            policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) {
+               const char *name = ata_lpm_policy_names[policy];
+
+               if (strncmp(name, buf, strlen(name)) == 0)
+                       break;
+       }
+       if (policy == ARRAY_SIZE(ata_lpm_policy_names))
+               return -EINVAL;
+
+       spin_lock_irqsave(ap->lock, flags);
+
+       ata_for_each_link(link, ap, EDGE) {
+               ata_for_each_dev(dev, &ap->link, ENABLED) {
+                       if (dev->horkage & ATA_HORKAGE_NOLPM) {
+                               count = -EOPNOTSUPP;
+                               goto out_unlock;
+                       }
+               }
+       }
+
+       ap->target_lpm_policy = policy;
+       ata_port_schedule_eh(ap);
+out_unlock:
+       spin_unlock_irqrestore(ap->lock, flags);
+       return count;
+}
+
+static ssize_t ata_scsi_lpm_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ata_port *ap = ata_shost_to_port(shost);
+
+       if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
+               return -EINVAL;
+
+       return snprintf(buf, PAGE_SIZE, "%s\n",
+                       ata_lpm_policy_names[ap->target_lpm_policy]);
+}
+DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
+           ata_scsi_lpm_show, ata_scsi_lpm_store);
+EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
+
+static ssize_t ata_ncq_prio_enable_show(struct device *device,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(device);
+       struct ata_port *ap;
+       struct ata_device *dev;
+       bool ncq_prio_enable;
+       int rc = 0;
+
+       ap = ata_shost_to_port(sdev->host);
+
+       spin_lock_irq(ap->lock);
+       dev = ata_scsi_find_dev(ap, sdev);
+       if (!dev) {
+               rc = -ENODEV;
+               goto unlock;
+       }
+
+       ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE;
+
+unlock:
+       spin_unlock_irq(ap->lock);
+
+       return rc ? rc : snprintf(buf, 20, "%u\n", ncq_prio_enable);
+}
+
+static ssize_t ata_ncq_prio_enable_store(struct device *device,
+                                        struct device_attribute *attr,
+                                        const char *buf, size_t len)
+{
+       struct scsi_device *sdev = to_scsi_device(device);
+       struct ata_port *ap;
+       struct ata_device *dev;
+       long int input;
+       int rc;
+
+       rc = kstrtol(buf, 10, &input);
+       if (rc)
+               return rc;
+       if ((input < 0) || (input > 1))
+               return -EINVAL;
+
+       ap = ata_shost_to_port(sdev->host);
+       dev = ata_scsi_find_dev(ap, sdev);
+       if (unlikely(!dev))
+               return  -ENODEV;
+
+       spin_lock_irq(ap->lock);
+       if (input)
+               dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLE;
+       else
+               dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
+
+       dev->link->eh_info.action |= ATA_EH_REVALIDATE;
+       dev->link->eh_info.flags |= ATA_EHI_QUIET;
+       ata_port_schedule_eh(ap);
+       spin_unlock_irq(ap->lock);
+
+       ata_port_wait_eh(ap);
+
+       if (input) {
+               spin_lock_irq(ap->lock);
+               if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
+                       dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
+                       rc = -EIO;
+               }
+               spin_unlock_irq(ap->lock);
+       }
+
+       return rc ? rc : len;
+}
+
+DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
+           ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
+EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
+
+struct device_attribute *ata_ncq_sdev_attrs[] = {
+       &dev_attr_unload_heads,
+       &dev_attr_ncq_prio_enable,
+       NULL
+};
+EXPORT_SYMBOL_GPL(ata_ncq_sdev_attrs);
+
+static ssize_t
+ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
+                         const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ata_port *ap = ata_shost_to_port(shost);
+       if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM))
+               return ap->ops->em_store(ap, buf, count);
+       return -EINVAL;
+}
+
+static ssize_t
+ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ata_port *ap = ata_shost_to_port(shost);
+
+       if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM))
+               return ap->ops->em_show(ap, buf);
+       return -EINVAL;
+}
+DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR,
+               ata_scsi_em_message_show, ata_scsi_em_message_store);
+EXPORT_SYMBOL_GPL(dev_attr_em_message);
+
+static ssize_t
+ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
+                             char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ata_port *ap = ata_shost_to_port(shost);
+
+       return snprintf(buf, 23, "%d\n", ap->em_message_type);
+}
+DEVICE_ATTR(em_message_type, S_IRUGO,
+                 ata_scsi_em_message_type_show, NULL);
+EXPORT_SYMBOL_GPL(dev_attr_em_message_type);
+
+static ssize_t
+ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct ata_port *ap = ata_shost_to_port(sdev->host);
+       struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
+
+       if (atadev && ap->ops->sw_activity_show &&
+           (ap->flags & ATA_FLAG_SW_ACTIVITY))
+               return ap->ops->sw_activity_show(atadev, buf);
+       return -EINVAL;
+}
+
+static ssize_t
+ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
+       const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct ata_port *ap = ata_shost_to_port(sdev->host);
+       struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
+       enum sw_activity val;
+       int rc;
+
+       if (atadev && ap->ops->sw_activity_store &&
+           (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
+               val = simple_strtoul(buf, NULL, 0);
+               switch (val) {
+               case OFF: case BLINK_ON: case BLINK_OFF:
+                       rc = ap->ops->sw_activity_store(atadev, val);
+                       if (!rc)
+                               return count;
+                       else
+                               return rc;
+               }
+       }
+       return -EINVAL;
+}
+DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
+                       ata_scsi_activity_store);
+EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
+
+/**
+ *     __ata_change_queue_depth - helper for ata_scsi_change_queue_depth
+ *     @ap: ATA port to which the device change the queue depth
+ *     @sdev: SCSI device to configure queue depth for
+ *     @queue_depth: new queue depth
+ *
+ *     libsas and libata have different approaches for associating a sdev to
+ *     its ata_port.
+ *
+ */
+int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
+                            int queue_depth)
+{
+       struct ata_device *dev;
+       unsigned long flags;
+
+       if (queue_depth < 1 || queue_depth == sdev->queue_depth)
+               return sdev->queue_depth;
+
+       dev = ata_scsi_find_dev(ap, sdev);
+       if (!dev || !ata_dev_enabled(dev))
+               return sdev->queue_depth;
+
+       /* NCQ enabled? */
+       spin_lock_irqsave(ap->lock, flags);
+       dev->flags &= ~ATA_DFLAG_NCQ_OFF;
+       if (queue_depth == 1 || !ata_ncq_enabled(dev)) {
+               dev->flags |= ATA_DFLAG_NCQ_OFF;
+               queue_depth = 1;
+       }
+       spin_unlock_irqrestore(ap->lock, flags);
+
+       /* limit and apply queue depth */
+       queue_depth = min(queue_depth, sdev->host->can_queue);
+       queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
+       queue_depth = min(queue_depth, ATA_MAX_QUEUE);
+
+       if (sdev->queue_depth == queue_depth)
+               return -EINVAL;
+
+       return scsi_change_queue_depth(sdev, queue_depth);
+}
+EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
+
+/**
+ *     ata_scsi_change_queue_depth - SCSI callback for queue depth config
+ *     @sdev: SCSI device to configure queue depth for
+ *     @queue_depth: new queue depth
+ *
+ *     This is libata standard hostt->change_queue_depth callback.
+ *     SCSI will call into this callback when user tries to set queue
+ *     depth via sysfs.
+ *
+ *     LOCKING:
+ *     SCSI layer (we don't care)
+ *
+ *     RETURNS:
+ *     Newly configured queue depth.
+ */
+int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
+{
+       struct ata_port *ap = ata_shost_to_port(sdev->host);
+
+       return __ata_change_queue_depth(ap, sdev, queue_depth);
+}
+EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
+
+/**
+ *     port_alloc - Allocate port for a SAS attached SATA device
+ *     @host: ATA host container for all SAS ports
+ *     @port_info: Information from low-level host driver
+ *     @shost: SCSI host that the scsi device is attached to
+ *
+ *     LOCKING:
+ *     PCI/etc. bus probe sem.
+ *
+ *     RETURNS:
+ *     ata_port pointer on success / NULL on failure.
+ */
+
+struct ata_port *ata_sas_port_alloc(struct ata_host *host,
+                                   struct ata_port_info *port_info,
+                                   struct Scsi_Host *shost)
+{
+       struct ata_port *ap;
+
+       ap = ata_port_alloc(host);
+       if (!ap)
+               return NULL;
+
+       ap->port_no = 0;
+       ap->lock = &host->lock;
+       ap->pio_mask = port_info->pio_mask;
+       ap->mwdma_mask = port_info->mwdma_mask;
+       ap->udma_mask = port_info->udma_mask;
+       ap->flags |= port_info->flags;
+       ap->ops = port_info->port_ops;
+       ap->cbl = ATA_CBL_SATA;
+
+       return ap;
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
+
+/**
+ *     ata_sas_port_start - Set port up for dma.
+ *     @ap: Port to initialize
+ *
+ *     Called just after data structures for each port are
+ *     initialized.
+ *
+ *     May be used as the port_start() entry in ata_port_operations.
+ *
+ *     LOCKING:
+ *     Inherited from caller.
+ */
+int ata_sas_port_start(struct ata_port *ap)
+{
+       /*
+        * the port is marked as frozen at allocation time, but if we don't
+        * have new eh, we won't thaw it
+        */
+       if (!ap->ops->error_handler)
+               ap->pflags &= ~ATA_PFLAG_FROZEN;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_start);
+
+/**
+ *     ata_port_stop - Undo ata_sas_port_start()
+ *     @ap: Port to shut down
+ *
+ *     May be used as the port_stop() entry in ata_port_operations.
+ *
+ *     LOCKING:
+ *     Inherited from caller.
+ */
+
+void ata_sas_port_stop(struct ata_port *ap)
+{
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_stop);
+
+/**
+ * ata_sas_async_probe - simply schedule probing and return
+ * @ap: Port to probe
+ *
+ * For batch scheduling of probe for sas attached ata devices, assumes
+ * the port has already been through ata_sas_port_init()
+ */
+void ata_sas_async_probe(struct ata_port *ap)
+{
+       __ata_port_probe(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_async_probe);
+
+int ata_sas_sync_probe(struct ata_port *ap)
+{
+       return ata_port_probe(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_sync_probe);
+
+
+/**
+ *     ata_sas_port_init - Initialize a SATA device
+ *     @ap: SATA port to initialize
+ *
+ *     LOCKING:
+ *     PCI/etc. bus probe sem.
+ *
+ *     RETURNS:
+ *     Zero on success, non-zero on error.
+ */
+
+int ata_sas_port_init(struct ata_port *ap)
+{
+       int rc = ap->ops->port_start(ap);
+
+       if (rc)
+               return rc;
+       ap->print_id = atomic_inc_return(&ata_print_id);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_init);
+
+int ata_sas_tport_add(struct device *parent, struct ata_port *ap)
+{
+       return ata_tport_add(parent, ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_tport_add);
+
+void ata_sas_tport_delete(struct ata_port *ap)
+{
+       ata_tport_delete(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
+
+/**
+ *     ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
+ *     @ap: SATA port to destroy
+ *
+ */
+
+void ata_sas_port_destroy(struct ata_port *ap)
+{
+       if (ap->ops->port_stop)
+               ap->ops->port_stop(ap);
+       kfree(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
+
+/**
+ *     ata_sas_slave_configure - Default slave_config routine for libata devices
+ *     @sdev: SCSI device to configure
+ *     @ap: ATA port to which SCSI device is attached
+ *
+ *     RETURNS:
+ *     Zero.
+ */
+
+int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
+{
+       ata_scsi_sdev_config(sdev);
+       ata_scsi_dev_config(sdev, ap->link.device);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
+
+/**
+ *     ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
+ *     @cmd: SCSI command to be sent
+ *     @ap:    ATA port to which the command is being sent
+ *
+ *     RETURNS:
+ *     Return value from __ata_scsi_queuecmd() if @cmd can be queued,
+ *     0 otherwise.
+ */
+
+int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
+{
+       int rc = 0;
+
+       ata_scsi_dump_cdb(ap, cmd);
+
+       if (likely(ata_dev_enabled(ap->link.device)))
+               rc = __ata_scsi_queuecmd(cmd, ap->link.device);
+       else {
+               cmd->result = (DID_BAD_TARGET << 16);
+               cmd->scsi_done(cmd);
+       }
+       return rc;
+}
+EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
+
+int ata_sas_allocate_tag(struct ata_port *ap)
+{
+       unsigned int max_queue = ap->host->n_tags;
+       unsigned int i, tag;
+
+       for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) {
+               tag = tag < max_queue ? tag : 0;
+
+               /* the last tag is reserved for internal command. */
+               if (ata_tag_internal(tag))
+                       continue;
+
+               if (!test_and_set_bit(tag, &ap->sas_tag_allocated)) {
+                       ap->sas_last_tag = tag;
+                       return tag;
+               }
+       }
+       return -1;
+}
+
+void ata_sas_free_tag(unsigned int tag, struct ata_port *ap)
+{
+       clear_bit(tag, &ap->sas_tag_allocated);
+}
+
+/**
+ *     sata_async_notification - SATA async notification handler
+ *     @ap: ATA port where async notification is received
+ *
+ *     Handler to be called when async notification via SDB FIS is
+ *     received.  This function schedules EH if necessary.
+ *
+ *     LOCKING:
+ *     spin_lock_irqsave(host lock)
+ *
+ *     RETURNS:
+ *     1 if EH is scheduled, 0 otherwise.
+ */
+int sata_async_notification(struct ata_port *ap)
+{
+       u32 sntf;
+       int rc;
+
+       if (!(ap->flags & ATA_FLAG_AN))
+               return 0;
+
+       rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
+       if (rc == 0)
+               sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
+
+       if (!sata_pmp_attached(ap) || rc) {
+               /* PMP is not attached or SNTF is not available */
+               if (!sata_pmp_attached(ap)) {
+                       /* PMP is not attached.  Check whether ATAPI
+                        * AN is configured.  If so, notify media
+                        * change.
+                        */
+                       struct ata_device *dev = ap->link.device;
+
+                       if ((dev->class == ATA_DEV_ATAPI) &&
+                           (dev->flags & ATA_DFLAG_AN))
+                               ata_scsi_media_change_notify(dev);
+                       return 0;
+               } else {
+                       /* PMP is attached but SNTF is not available.
+                        * ATAPI async media change notification is
+                        * not used.  The PMP must be reporting PHY
+                        * status change, schedule EH.
+                        */
+                       ata_port_schedule_eh(ap);
+                       return 1;
+               }
+       } else {
+               /* PMP is attached and SNTF is available */
+               struct ata_link *link;
+
+               /* check and notify ATAPI AN */
+               ata_for_each_link(link, ap, EDGE) {
+                       if (!(sntf & (1 << link->pmp)))
+                               continue;
+
+                       if ((link->device->class == ATA_DEV_ATAPI) &&
+                           (link->device->flags & ATA_DFLAG_AN))
+                               ata_scsi_media_change_notify(link->device);
+               }
+
+               /* If PMP is reporting that PHY status of some
+                * downstream ports has changed, schedule EH.
+                */
+               if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
+                       ata_port_schedule_eh(ap);
+                       return 1;
+               }
+
+               return 0;
+       }
+}
+EXPORT_SYMBOL_GPL(sata_async_notification);
+
+/**
+ *     ata_eh_read_log_10h - Read log page 10h for NCQ error details
+ *     @dev: Device to read log page 10h from
+ *     @tag: Resulting tag of the failed command
+ *     @tf: Resulting taskfile registers of the failed command
+ *
+ *     Read log page 10h to obtain NCQ error details and clear error
+ *     condition.
+ *
+ *     LOCKING:
+ *     Kernel thread context (may sleep).
+ *
+ *     RETURNS:
+ *     0 on success, -errno otherwise.
+ */
+static int ata_eh_read_log_10h(struct ata_device *dev,
+                              int *tag, struct ata_taskfile *tf)
+{
+       u8 *buf = dev->link->ap->sector_buf;
+       unsigned int err_mask;
+       u8 csum;
+       int i;
+
+       err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
+       if (err_mask)
+               return -EIO;
+
+       csum = 0;
+       for (i = 0; i < ATA_SECT_SIZE; i++)
+               csum += buf[i];
+       if (csum)
+               ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
+                            csum);
+
+       if (buf[0] & 0x80)
+               return -ENOENT;
+
+       *tag = buf[0] & 0x1f;
+
+       tf->command = buf[2];
+       tf->feature = buf[3];
+       tf->lbal = buf[4];
+       tf->lbam = buf[5];
+       tf->lbah = buf[6];
+       tf->device = buf[7];
+       tf->hob_lbal = buf[8];
+       tf->hob_lbam = buf[9];
+       tf->hob_lbah = buf[10];
+       tf->nsect = buf[12];
+       tf->hob_nsect = buf[13];
+       if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id))
+               tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
+
+       return 0;
+}
+
+/**
+ *     ata_eh_analyze_ncq_error - analyze NCQ error
+ *     @link: ATA link to analyze NCQ error for
+ *
+ *     Read log page 10h, determine the offending qc and acquire
+ *     error status TF.  For NCQ device errors, all LLDDs have to do
+ *     is setting AC_ERR_DEV in ehi->err_mask.  This function takes
+ *     care of the rest.
+ *
+ *     LOCKING:
+ *     Kernel thread context (may sleep).
+ */
+void ata_eh_analyze_ncq_error(struct ata_link *link)
+{
+       struct ata_port *ap = link->ap;
+       struct ata_eh_context *ehc = &link->eh_context;
+       struct ata_device *dev = link->device;
+       struct ata_queued_cmd *qc;
+       struct ata_taskfile tf;
+       int tag, rc;
+
+       /* if frozen, we can't do much */
+       if (ap->pflags & ATA_PFLAG_FROZEN)
+               return;
+
+       /* is it NCQ device error? */
+       if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
+               return;
+
+       /* has LLDD analyzed already? */
+       ata_qc_for_each_raw(ap, qc, tag) {
+               if (!(qc->flags & ATA_QCFLAG_FAILED))
+                       continue;
+
+               if (qc->err_mask)
+                       return;
+       }
+
+       /* okay, this error is ours */
+       memset(&tf, 0, sizeof(tf));
+       rc = ata_eh_read_log_10h(dev, &tag, &tf);
+       if (rc) {
+               ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
+                            rc);
+               return;
+       }
+
+       if (!(link->sactive & (1 << tag))) {
+               ata_link_err(link, "log page 10h reported inactive tag %d\n",
+                            tag);
+               return;
+       }
+
+       /* we've got the perpetrator, condemn it */
+       qc = __ata_qc_from_tag(ap, tag);
+       memcpy(&qc->result_tf, &tf, sizeof(tf));
+       qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+       qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
+       if (dev->class == ATA_DEV_ZAC &&
+           ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) {
+               char sense_key, asc, ascq;
+
+               sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
+               asc = (qc->result_tf.auxiliary >> 8) & 0xff;
+               ascq = qc->result_tf.auxiliary & 0xff;
+               ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, ascq);
+               ata_scsi_set_sense_information(dev, qc->scsicmd,
+                                              &qc->result_tf);
+               qc->flags |= ATA_QCFLAG_SENSE_VALID;
+       }
+
+       ehc->i.err_mask &= ~AC_ERR_DEV;
+}
+EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
index eb2eb59..36e588d 100644 (file)
@@ -2,10 +2,6 @@
 /*
  *  libata-scsi.c - helper library for ATA
  *
- *  Maintained by:  Tejun Heo <tj@kernel.org>
- *                 Please ALWAYS copy linux-ide@vger.kernel.org
- *                 on emails.
- *
  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
  *  Copyright 2003-2004 Jeff Garzik
  *
 #include <linux/suspend.h>
 #include <asm/unaligned.h>
 #include <linux/ioprio.h>
+#include <linux/of.h>
 
 #include "libata.h"
 #include "libata-transport.h"
 
-#define ATA_SCSI_RBUF_SIZE     4096
+#define ATA_SCSI_RBUF_SIZE     576
 
 static DEFINE_SPINLOCK(ata_scsi_rbuf_lock);
 static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE];
@@ -49,8 +46,6 @@ typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc);
 
 static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
                                        const struct scsi_device *scsidev);
-static struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
-                                           const struct scsi_device *scsidev);
 
 #define RW_RECOVERY_MPAGE 0x1
 #define RW_RECOVERY_MPAGE_LEN 12
@@ -90,71 +85,6 @@ static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
        0, 30   /* extended self test time, see 05-359r1 */
 };
 
-static const char *ata_lpm_policy_names[] = {
-       [ATA_LPM_UNKNOWN]               = "max_performance",
-       [ATA_LPM_MAX_POWER]             = "max_performance",
-       [ATA_LPM_MED_POWER]             = "medium_power",
-       [ATA_LPM_MED_POWER_WITH_DIPM]   = "med_power_with_dipm",
-       [ATA_LPM_MIN_POWER_WITH_PARTIAL] = "min_power_with_partial",
-       [ATA_LPM_MIN_POWER]             = "min_power",
-};
-
-static ssize_t ata_scsi_lpm_store(struct device *device,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t count)
-{
-       struct Scsi_Host *shost = class_to_shost(device);
-       struct ata_port *ap = ata_shost_to_port(shost);
-       struct ata_link *link;
-       struct ata_device *dev;
-       enum ata_lpm_policy policy;
-       unsigned long flags;
-
-       /* UNKNOWN is internal state, iterate from MAX_POWER */
-       for (policy = ATA_LPM_MAX_POWER;
-            policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) {
-               const char *name = ata_lpm_policy_names[policy];
-
-               if (strncmp(name, buf, strlen(name)) == 0)
-                       break;
-       }
-       if (policy == ARRAY_SIZE(ata_lpm_policy_names))
-               return -EINVAL;
-
-       spin_lock_irqsave(ap->lock, flags);
-
-       ata_for_each_link(link, ap, EDGE) {
-               ata_for_each_dev(dev, &ap->link, ENABLED) {
-                       if (dev->horkage & ATA_HORKAGE_NOLPM) {
-                               count = -EOPNOTSUPP;
-                               goto out_unlock;
-                       }
-               }
-       }
-
-       ap->target_lpm_policy = policy;
-       ata_port_schedule_eh(ap);
-out_unlock:
-       spin_unlock_irqrestore(ap->lock, flags);
-       return count;
-}
-
-static ssize_t ata_scsi_lpm_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       struct Scsi_Host *shost = class_to_shost(dev);
-       struct ata_port *ap = ata_shost_to_port(shost);
-
-       if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
-               return -EINVAL;
-
-       return snprintf(buf, PAGE_SIZE, "%s\n",
-                       ata_lpm_policy_names[ap->target_lpm_policy]);
-}
-DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
-           ata_scsi_lpm_show, ata_scsi_lpm_store);
-EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
-
 static ssize_t ata_scsi_park_show(struct device *device,
                                  struct device_attribute *attr, char *buf)
 {
@@ -258,83 +188,6 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
            ata_scsi_park_show, ata_scsi_park_store);
 EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
 
-static ssize_t ata_ncq_prio_enable_show(struct device *device,
-                                       struct device_attribute *attr,
-                                       char *buf)
-{
-       struct scsi_device *sdev = to_scsi_device(device);
-       struct ata_port *ap;
-       struct ata_device *dev;
-       bool ncq_prio_enable;
-       int rc = 0;
-
-       ap = ata_shost_to_port(sdev->host);
-
-       spin_lock_irq(ap->lock);
-       dev = ata_scsi_find_dev(ap, sdev);
-       if (!dev) {
-               rc = -ENODEV;
-               goto unlock;
-       }
-
-       ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE;
-
-unlock:
-       spin_unlock_irq(ap->lock);
-
-       return rc ? rc : snprintf(buf, 20, "%u\n", ncq_prio_enable);
-}
-
-static ssize_t ata_ncq_prio_enable_store(struct device *device,
-                                        struct device_attribute *attr,
-                                        const char *buf, size_t len)
-{
-       struct scsi_device *sdev = to_scsi_device(device);
-       struct ata_port *ap;
-       struct ata_device *dev;
-       long int input;
-       int rc;
-
-       rc = kstrtol(buf, 10, &input);
-       if (rc)
-               return rc;
-       if ((input < 0) || (input > 1))
-               return -EINVAL;
-
-       ap = ata_shost_to_port(sdev->host);
-       dev = ata_scsi_find_dev(ap, sdev);
-       if (unlikely(!dev))
-               return  -ENODEV;
-
-       spin_lock_irq(ap->lock);
-       if (input)
-               dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLE;
-       else
-               dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
-
-       dev->link->eh_info.action |= ATA_EH_REVALIDATE;
-       dev->link->eh_info.flags |= ATA_EHI_QUIET;
-       ata_port_schedule_eh(ap);
-       spin_unlock_irq(ap->lock);
-
-       ata_port_wait_eh(ap);
-
-       if (input) {
-               spin_lock_irq(ap->lock);
-               if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
-                       dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
-                       rc = -EIO;
-               }
-               spin_unlock_irq(ap->lock);
-       }
-
-       return rc ? rc : len;
-}
-
-DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
-           ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
-EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
-
 void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
                        u8 sk, u8 asc, u8 ascq)
 {
@@ -383,90 +236,8 @@ static void ata_scsi_set_invalid_parameter(struct ata_device *dev,
                                     field, 0xff, 0);
 }
 
-static ssize_t
-ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
-                         const char *buf, size_t count)
-{
-       struct Scsi_Host *shost = class_to_shost(dev);
-       struct ata_port *ap = ata_shost_to_port(shost);
-       if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM))
-               return ap->ops->em_store(ap, buf, count);
-       return -EINVAL;
-}
-
-static ssize_t
-ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr,
-                        char *buf)
-{
-       struct Scsi_Host *shost = class_to_shost(dev);
-       struct ata_port *ap = ata_shost_to_port(shost);
-
-       if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM))
-               return ap->ops->em_show(ap, buf);
-       return -EINVAL;
-}
-DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR,
-               ata_scsi_em_message_show, ata_scsi_em_message_store);
-EXPORT_SYMBOL_GPL(dev_attr_em_message);
-
-static ssize_t
-ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
-                             char *buf)
-{
-       struct Scsi_Host *shost = class_to_shost(dev);
-       struct ata_port *ap = ata_shost_to_port(shost);
-
-       return snprintf(buf, 23, "%d\n", ap->em_message_type);
-}
-DEVICE_ATTR(em_message_type, S_IRUGO,
-                 ata_scsi_em_message_type_show, NULL);
-EXPORT_SYMBOL_GPL(dev_attr_em_message_type);
-
-static ssize_t
-ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
-               char *buf)
-{
-       struct scsi_device *sdev = to_scsi_device(dev);
-       struct ata_port *ap = ata_shost_to_port(sdev->host);
-       struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
-
-       if (atadev && ap->ops->sw_activity_show &&
-           (ap->flags & ATA_FLAG_SW_ACTIVITY))
-               return ap->ops->sw_activity_show(atadev, buf);
-       return -EINVAL;
-}
-
-static ssize_t
-ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
-       const char *buf, size_t count)
-{
-       struct scsi_device *sdev = to_scsi_device(dev);
-       struct ata_port *ap = ata_shost_to_port(sdev->host);
-       struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
-       enum sw_activity val;
-       int rc;
-
-       if (atadev && ap->ops->sw_activity_store &&
-           (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
-               val = simple_strtoul(buf, NULL, 0);
-               switch (val) {
-               case OFF: case BLINK_ON: case BLINK_OFF:
-                       rc = ap->ops->sw_activity_store(atadev, val);
-                       if (!rc)
-                               return count;
-                       else
-                               return rc;
-               }
-       }
-       return -EINVAL;
-}
-DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
-                       ata_scsi_activity_store);
-EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
-
 struct device_attribute *ata_common_sdev_attrs[] = {
        &dev_attr_unload_heads,
-       &dev_attr_ncq_prio_enable,
        NULL
 };
 EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
@@ -499,6 +270,7 @@ int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(ata_std_bios_param);
 
 /**
  *     ata_scsi_unlock_native_capacity - unlock native capacity
@@ -528,6 +300,7 @@ void ata_scsi_unlock_native_capacity(struct scsi_device *sdev)
        spin_unlock_irqrestore(ap->lock, flags);
        ata_port_wait_eh(ap);
 }
+EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
 
 /**
  *     ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
@@ -1215,7 +988,7 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
        scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block);
 }
 
-static void ata_scsi_sdev_config(struct scsi_device *sdev)
+void ata_scsi_sdev_config(struct scsi_device *sdev)
 {
        sdev->use_10_for_rw = 1;
        sdev->use_10_for_ms = 1;
@@ -1255,8 +1028,7 @@ static int atapi_drain_needed(struct request *rq)
        return atapi_cmd_type(scsi_req(rq)->cmd[0]) == ATAPI_MISC;
 }
 
-static int ata_scsi_dev_config(struct scsi_device *sdev,
-                              struct ata_device *dev)
+int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
 {
        struct request_queue *q = sdev->request_queue;
 
@@ -1344,6 +1116,7 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
 
        return rc;
 }
+EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
 
 /**
  *     ata_scsi_slave_destroy - SCSI device is about to be destroyed
@@ -1383,71 +1156,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
        q->dma_drain_buffer = NULL;
        q->dma_drain_size = 0;
 }
-
-/**
- *     __ata_change_queue_depth - helper for ata_scsi_change_queue_depth
- *     @ap: ATA port to which the device change the queue depth
- *     @sdev: SCSI device to configure queue depth for
- *     @queue_depth: new queue depth
- *
- *     libsas and libata have different approaches for associating a sdev to
- *     its ata_port.
- *
- */
-int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
-                            int queue_depth)
-{
-       struct ata_device *dev;
-       unsigned long flags;
-
-       if (queue_depth < 1 || queue_depth == sdev->queue_depth)
-               return sdev->queue_depth;
-
-       dev = ata_scsi_find_dev(ap, sdev);
-       if (!dev || !ata_dev_enabled(dev))
-               return sdev->queue_depth;
-
-       /* NCQ enabled? */
-       spin_lock_irqsave(ap->lock, flags);
-       dev->flags &= ~ATA_DFLAG_NCQ_OFF;
-       if (queue_depth == 1 || !ata_ncq_enabled(dev)) {
-               dev->flags |= ATA_DFLAG_NCQ_OFF;
-               queue_depth = 1;
-       }
-       spin_unlock_irqrestore(ap->lock, flags);
-
-       /* limit and apply queue depth */
-       queue_depth = min(queue_depth, sdev->host->can_queue);
-       queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
-       queue_depth = min(queue_depth, ATA_MAX_QUEUE);
-
-       if (sdev->queue_depth == queue_depth)
-               return -EINVAL;
-
-       return scsi_change_queue_depth(sdev, queue_depth);
-}
-
-/**
- *     ata_scsi_change_queue_depth - SCSI callback for queue depth config
- *     @sdev: SCSI device to configure queue depth for
- *     @queue_depth: new queue depth
- *
- *     This is libata standard hostt->change_queue_depth callback.
- *     SCSI will call into this callback when user tries to set queue
- *     depth via sysfs.
- *
- *     LOCKING:
- *     SCSI layer (we don't care)
- *
- *     RETURNS:
- *     Newly configured queue depth.
- */
-int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
-{
-       struct ata_port *ap = ata_shost_to_port(sdev->host);
-
-       return __ata_change_queue_depth(ap, sdev, queue_depth);
-}
+EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
 
 /**
  *     ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
@@ -2354,10 +2063,6 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
  */
 static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
 {
-       struct ata_taskfile tf;
-
-       memset(&tf, 0, sizeof(tf));
-
        rbuf[1] = 0x89;                 /* our page code */
        rbuf[2] = (0x238 >> 8);         /* page size fixed at 238h */
        rbuf[3] = (0x238 & 0xff);
@@ -2366,14 +2071,14 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
        memcpy(&rbuf[16], "libata          ", 16);
        memcpy(&rbuf[32], DRV_VERSION, 4);
 
-       /* we don't store the ATA device signature, so we fake it */
-
-       tf.command = ATA_DRDY;          /* really, this is Status reg */
-       tf.lbal = 0x1;
-       tf.nsect = 0x1;
-
-       ata_tf_to_fis(&tf, 0, 1, &rbuf[36]);    /* TODO: PMP? */
        rbuf[36] = 0x34;                /* force D2H Reg FIS (34h) */
+       rbuf[37] = (1 << 7);            /* bit 7 indicates Command FIS */
+                                       /* TODO: PMP? */
+
+       /* we don't store the ATA device signature, so we fake it */
+       rbuf[38] = ATA_DRDY;            /* really, this is Status reg */
+       rbuf[40] = 0x1;
+       rbuf[48] = 0x1;
 
        rbuf[56] = ATA_CMD_ID_ATA;
 
@@ -3089,7 +2794,7 @@ static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
  *     RETURNS:
  *     Associated ATA device, or %NULL if not found.
  */
-static struct ata_device *
+struct ata_device *
 ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
 {
        struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
@@ -4299,8 +4004,7 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
  *     Prints the contents of a SCSI command via printk().
  */
 
-static inline void ata_scsi_dump_cdb(struct ata_port *ap,
-                                    struct scsi_cmnd *cmd)
+void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd)
 {
 #ifdef ATA_VERBOSE_DEBUG
        struct scsi_device *scsidev = cmd->device;
@@ -4312,8 +4016,7 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
 #endif
 }
 
-static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
-                                     struct ata_device *dev)
+int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
 {
        u8 scsi_op = scmd->cmnd[0];
        ata_xlat_func_t xlat_func;
@@ -4407,6 +4110,7 @@ int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
 
        return rc;
 }
+EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
 
 /**
  *     ata_scsi_simulate - simulate SCSI command on ATA device
@@ -4562,26 +4266,51 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
                 */
                shost->max_host_blocked = 1;
 
-               rc = scsi_add_host_with_dma(ap->scsi_host,
-                                               &ap->tdev, ap->host->dev);
+               rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev);
                if (rc)
-                       goto err_add;
+                       goto err_alloc;
        }
 
        return 0;
 
- err_add:
-       scsi_host_put(host->ports[i]->scsi_host);
  err_alloc:
        while (--i >= 0) {
                struct Scsi_Host *shost = host->ports[i]->scsi_host;
 
+               /* scsi_host_put() is in ata_devres_release() */
                scsi_remove_host(shost);
-               scsi_host_put(shost);
        }
        return rc;
 }
 
+#ifdef CONFIG_OF
+static void ata_scsi_assign_ofnode(struct ata_device *dev, struct ata_port *ap)
+{
+       struct scsi_device *sdev = dev->sdev;
+       struct device *d = ap->host->dev;
+       struct device_node *np = d->of_node;
+       struct device_node *child;
+
+       for_each_available_child_of_node(np, child) {
+               int ret;
+               u32 val;
+
+               ret = of_property_read_u32(child, "reg", &val);
+               if (ret)
+                       continue;
+               if (val == dev->devno) {
+                       dev_dbg(d, "found matching device node\n");
+                       sdev->sdev_gendev.of_node = child;
+                       return;
+               }
+       }
+}
+#else
+static void ata_scsi_assign_ofnode(struct ata_device *dev, struct ata_port *ap)
+{
+}
+#endif
+
 void ata_scsi_scan_host(struct ata_port *ap, int sync)
 {
        int tries = 5;
@@ -4607,6 +4336,7 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
                                                 NULL);
                        if (!IS_ERR(sdev)) {
                                dev->sdev = sdev;
+                               ata_scsi_assign_ofnode(dev, ap);
                                scsi_device_put(sdev);
                        } else {
                                dev->sdev = NULL;
@@ -4929,214 +4659,3 @@ void ata_scsi_dev_rescan(struct work_struct *work)
        spin_unlock_irqrestore(ap->lock, flags);
        mutex_unlock(&ap->scsi_scan_mutex);
 }
-
-/**
- *     ata_sas_port_alloc - Allocate port for a SAS attached SATA device
- *     @host: ATA host container for all SAS ports
- *     @port_info: Information from low-level host driver
- *     @shost: SCSI host that the scsi device is attached to
- *
- *     LOCKING:
- *     PCI/etc. bus probe sem.
- *
- *     RETURNS:
- *     ata_port pointer on success / NULL on failure.
- */
-
-struct ata_port *ata_sas_port_alloc(struct ata_host *host,
-                                   struct ata_port_info *port_info,
-                                   struct Scsi_Host *shost)
-{
-       struct ata_port *ap;
-
-       ap = ata_port_alloc(host);
-       if (!ap)
-               return NULL;
-
-       ap->port_no = 0;
-       ap->lock = &host->lock;
-       ap->pio_mask = port_info->pio_mask;
-       ap->mwdma_mask = port_info->mwdma_mask;
-       ap->udma_mask = port_info->udma_mask;
-       ap->flags |= port_info->flags;
-       ap->ops = port_info->port_ops;
-       ap->cbl = ATA_CBL_SATA;
-
-       return ap;
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
-
-/**
- *     ata_sas_port_start - Set port up for dma.
- *     @ap: Port to initialize
- *
- *     Called just after data structures for each port are
- *     initialized.
- *
- *     May be used as the port_start() entry in ata_port_operations.
- *
- *     LOCKING:
- *     Inherited from caller.
- */
-int ata_sas_port_start(struct ata_port *ap)
-{
-       /*
-        * the port is marked as frozen at allocation time, but if we don't
-        * have new eh, we won't thaw it
-        */
-       if (!ap->ops->error_handler)
-               ap->pflags &= ~ATA_PFLAG_FROZEN;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_start);
-
-/**
- *     ata_port_stop - Undo ata_sas_port_start()
- *     @ap: Port to shut down
- *
- *     May be used as the port_stop() entry in ata_port_operations.
- *
- *     LOCKING:
- *     Inherited from caller.
- */
-
-void ata_sas_port_stop(struct ata_port *ap)
-{
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_stop);
-
-/**
- * ata_sas_async_probe - simply schedule probing and return
- * @ap: Port to probe
- *
- * For batch scheduling of probe for sas attached ata devices, assumes
- * the port has already been through ata_sas_port_init()
- */
-void ata_sas_async_probe(struct ata_port *ap)
-{
-       __ata_port_probe(ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_async_probe);
-
-int ata_sas_sync_probe(struct ata_port *ap)
-{
-       return ata_port_probe(ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_sync_probe);
-
-
-/**
- *     ata_sas_port_init - Initialize a SATA device
- *     @ap: SATA port to initialize
- *
- *     LOCKING:
- *     PCI/etc. bus probe sem.
- *
- *     RETURNS:
- *     Zero on success, non-zero on error.
- */
-
-int ata_sas_port_init(struct ata_port *ap)
-{
-       int rc = ap->ops->port_start(ap);
-
-       if (rc)
-               return rc;
-       ap->print_id = atomic_inc_return(&ata_print_id);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_init);
-
-int ata_sas_tport_add(struct device *parent, struct ata_port *ap)
-{
-       return ata_tport_add(parent, ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_tport_add);
-
-void ata_sas_tport_delete(struct ata_port *ap)
-{
-       ata_tport_delete(ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
-
-/**
- *     ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
- *     @ap: SATA port to destroy
- *
- */
-
-void ata_sas_port_destroy(struct ata_port *ap)
-{
-       if (ap->ops->port_stop)
-               ap->ops->port_stop(ap);
-       kfree(ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
-
-/**
- *     ata_sas_slave_configure - Default slave_config routine for libata devices
- *     @sdev: SCSI device to configure
- *     @ap: ATA port to which SCSI device is attached
- *
- *     RETURNS:
- *     Zero.
- */
-
-int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
-{
-       ata_scsi_sdev_config(sdev);
-       ata_scsi_dev_config(sdev, ap->link.device);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
-
-/**
- *     ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
- *     @cmd: SCSI command to be sent
- *     @ap:    ATA port to which the command is being sent
- *
- *     RETURNS:
- *     Return value from __ata_scsi_queuecmd() if @cmd can be queued,
- *     0 otherwise.
- */
-
-int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
-{
-       int rc = 0;
-
-       ata_scsi_dump_cdb(ap, cmd);
-
-       if (likely(ata_dev_enabled(ap->link.device)))
-               rc = __ata_scsi_queuecmd(cmd, ap->link.device);
-       else {
-               cmd->result = (DID_BAD_TARGET << 16);
-               cmd->scsi_done(cmd);
-       }
-       return rc;
-}
-EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
-
-int ata_sas_allocate_tag(struct ata_port *ap)
-{
-       unsigned int max_queue = ap->host->n_tags;
-       unsigned int i, tag;
-
-       for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) {
-               tag = tag < max_queue ? tag : 0;
-
-               /* the last tag is reserved for internal command. */
-               if (ata_tag_internal(tag))
-                       continue;
-
-               if (!test_and_set_bit(tag, &ap->sas_tag_allocated)) {
-                       ap->sas_last_tag = tag;
-                       return tag;
-               }
-       }
-       return -1;
-}
-
-void ata_sas_free_tag(unsigned int tag, struct ata_port *ap)
-{
-       clear_bit(tag, &ap->sas_tag_allocated);
-}
index 038db94..ae7189d 100644 (file)
@@ -2,10 +2,6 @@
 /*
  *  libata-sff.c - helper library for PCI IDE BMDMA
  *
- *  Maintained by:  Tejun Heo <tj@kernel.org>
- *                 Please ALWAYS copy linux-ide@vger.kernel.org
- *                 on emails.
- *
  *  Copyright 2003-2006 Red Hat, Inc.  All rights reserved.
  *  Copyright 2003-2006 Jeff Garzik
  *
index 12a505b..6a40e3c 100644 (file)
@@ -208,7 +208,7 @@ show_ata_port_##name(struct device *dev,                            \
 {                                                                      \
        struct ata_port *ap = transport_class_to_port(dev);             \
                                                                        \
-       return snprintf(buf, 20, format_string, cast ap->field);        \
+       return scnprintf(buf, 20, format_string, cast ap->field);       \
 }
 
 #define ata_port_simple_attr(field, name, format_string, type)         \
@@ -479,7 +479,7 @@ show_ata_dev_##field(struct device *dev,                            \
 {                                                                      \
        struct ata_device *ata_dev = transport_class_to_dev(dev);       \
                                                                        \
-       return snprintf(buf, 20, format_string, cast ata_dev->field);   \
+       return scnprintf(buf, 20, format_string, cast ata_dev->field);  \
 }
 
 #define ata_dev_simple_attr(field, format_string, type)        \
@@ -533,7 +533,7 @@ show_ata_dev_id(struct device *dev,
        if (ata_dev->class == ATA_DEV_PMP)
                return 0;
        for(i=0;i<ATA_ID_WORDS;i++)  {
-               written += snprintf(buf+written, 20, "%04x%c",
+               written += scnprintf(buf+written, 20, "%04x%c",
                                    ata_dev->id[i],
                                    ((i+1) & 7) ? ' ' : '\n');
        }
@@ -552,7 +552,7 @@ show_ata_dev_gscr(struct device *dev,
        if (ata_dev->class != ATA_DEV_PMP)
                return 0;
        for(i=0;i<SATA_PMP_GSCR_DWORDS;i++)  {
-               written += snprintf(buf+written, 20, "%08x%c",
+               written += scnprintf(buf+written, 20, "%08x%c",
                                    ata_dev->gscr[i],
                                    ((i+1) & 3) ? ' ' : '\n');
        }
@@ -581,7 +581,7 @@ show_ata_dev_trim(struct device *dev,
        else
                mode = "unqueued";
 
-       return snprintf(buf, 20, "%s\n", mode);
+       return scnprintf(buf, 20, "%s\n", mode);
 }
 
 static DEVICE_ATTR(trim, S_IRUGO, show_ata_dev_trim, NULL);
index cd8090a..68cdd81 100644 (file)
@@ -37,7 +37,11 @@ extern int libata_noacpi;
 extern int libata_allow_tpm;
 extern const struct device_type ata_port_type;
 extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
+#ifdef CONFIG_ATA_FORCE
 extern void ata_force_cbl(struct ata_port *ap);
+#else
+static inline void ata_force_cbl(struct ata_port *ap) { }
+#endif
 extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
 extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf);
 extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
@@ -87,6 +91,18 @@ extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
 
 #define to_ata_port(d) container_of(d, struct ata_port, tdev)
 
+/* libata-sata.c */
+#ifdef CONFIG_SATA_HOST
+int ata_sas_allocate_tag(struct ata_port *ap);
+void ata_sas_free_tag(unsigned int tag, struct ata_port *ap);
+#else
+static inline int ata_sas_allocate_tag(struct ata_port *ap)
+{
+       return -EOPNOTSUPP;
+}
+static inline void ata_sas_free_tag(unsigned int tag, struct ata_port *ap) { }
+#endif
+
 /* libata-acpi.c */
 #ifdef CONFIG_ATA_ACPI
 extern unsigned int ata_acpi_gtf_filter;
@@ -112,6 +128,8 @@ static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
 #endif
 
 /* libata-scsi.c */
+extern struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
+                                           const struct scsi_device *scsidev);
 extern int ata_scsi_add_hosts(struct ata_host *host,
                              struct scsi_host_template *sht);
 extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
@@ -128,9 +146,10 @@ extern void ata_scsi_dev_rescan(struct work_struct *work);
 extern int ata_bus_probe(struct ata_port *ap);
 extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
                              unsigned int id, u64 lun);
-int ata_sas_allocate_tag(struct ata_port *ap);
-void ata_sas_free_tag(unsigned int tag, struct ata_port *ap);
-
+void ata_scsi_sdev_config(struct scsi_device *sdev);
+int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev);
+void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd);
+int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev);
 
 /* libata-eh.c */
 extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
index c451d7d..8729f78 100644 (file)
@@ -157,7 +157,6 @@ static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
 static void pdc_error_handler(struct ata_port *ap);
 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
 static int pdc_pata_cable_detect(struct ata_port *ap);
-static int pdc_sata_cable_detect(struct ata_port *ap);
 
 static struct scsi_host_template pdc_ata_sht = {
        ATA_BASE_SHT(DRV_NAME),
@@ -183,7 +182,7 @@ static const struct ata_port_operations pdc_common_ops = {
 
 static struct ata_port_operations pdc_sata_ops = {
        .inherits               = &pdc_common_ops,
-       .cable_detect           = pdc_sata_cable_detect,
+       .cable_detect           = ata_cable_sata,
        .freeze                 = pdc_sata_freeze,
        .thaw                   = pdc_sata_thaw,
        .scr_read               = pdc_sata_scr_read,
@@ -459,11 +458,6 @@ static int pdc_pata_cable_detect(struct ata_port *ap)
        return ATA_CBL_PATA80;
 }
 
-static int pdc_sata_cable_detect(struct ata_port *ap)
-{
-       return ATA_CBL_SATA;
-}
-
 static int pdc_sata_scr_read(struct ata_link *link,
                             unsigned int sc_reg, u32 *val)
 {
index 8db8c0f..7af74fb 100644 (file)
@@ -91,7 +91,7 @@
 #ifdef GENERAL_DEBUG
 #define PRINTK(args...) printk(args)
 #else
-#define PRINTK(args...)
+#define PRINTK(args...) do {} while (0)
 #endif /* GENERAL_DEBUG */
 
 #ifdef EXTRA_DEBUG
index b8313a0..48efa7a 100644 (file)
@@ -111,7 +111,7 @@ config CFAG12864B
          If unsure, say N.
 
 config CFAG12864B_RATE
-       int "Refresh rate (hertz)"
+       int "Refresh rate (hertz)"
        depends on CFAG12864B
        default "20"
        ---help---
@@ -329,7 +329,7 @@ config PANEL_LCD_PROTO
 
 config PANEL_LCD_PIN_E
        depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
-        int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
        range -17 17
        default 14
        ---help---
@@ -344,7 +344,7 @@ config PANEL_LCD_PIN_E
 
 config PANEL_LCD_PIN_RS
        depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
-        int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
        range -17 17
        default 17
        ---help---
@@ -359,7 +359,7 @@ config PANEL_LCD_PIN_RS
 
 config PANEL_LCD_PIN_RW
        depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
-        int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
        range -17 17
        default 16
        ---help---
@@ -374,7 +374,7 @@ config PANEL_LCD_PIN_RW
 
 config PANEL_LCD_PIN_SCL
        depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
-        int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
        range -17 17
        default 1
        ---help---
@@ -389,7 +389,7 @@ config PANEL_LCD_PIN_SCL
 
 config PANEL_LCD_PIN_SDA
        depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
-        int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
        range -17 17
        default 2
        ---help---
@@ -404,12 +404,12 @@ config PANEL_LCD_PIN_SDA
 
 config PANEL_LCD_PIN_BL
        depends on PANEL_PROFILE="0" && PANEL_LCD="1"
-        int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
        range -17 17
        default 0
        ---help---
          This describes the number of the parallel port pin to which the LCD 'BL' signal
-          has been connected. It can be :
+         has been connected. It can be :
 
                  0 : no connection (eg: connected to ground)
              1..17 : directly connected to any of these pins on the DB25 plug
index 874c259..c0da382 100644 (file)
@@ -88,7 +88,7 @@ struct charlcd_priv {
                int len;
        } esc_seq;
 
-       unsigned long long drvdata[0];
+       unsigned long long drvdata[];
 };
 
 #define charlcd_to_priv(p)     container_of(p, struct charlcd_priv, lcd)
index efb928e..1cce409 100644 (file)
@@ -356,7 +356,6 @@ static int img_ascii_lcd_probe(struct platform_device *pdev)
        const struct of_device_id *match;
        const struct img_ascii_lcd_config *cfg;
        struct img_ascii_lcd_ctx *ctx;
-       struct resource *res;
        int err;
 
        match = of_match_device(img_ascii_lcd_matches, &pdev->dev);
@@ -378,8 +377,7 @@ static int img_ascii_lcd_probe(struct platform_device *pdev)
                                         &ctx->offset))
                        return -EINVAL;
        } else {
-               res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-               ctx->base = devm_ioremap_resource(&pdev->dev, res);
+               ctx->base = devm_platform_ioremap_resource(pdev, 0);
                if (IS_ERR(ctx->base))
                        return PTR_ERR(ctx->base);
        }
index b9f474c..4086718 100644 (file)
@@ -97,30 +97,13 @@ static ssize_t phys_index_show(struct device *dev,
 }
 
 /*
- * Show whether the memory block is likely to be offlineable (or is already
- * offline). Once offline, the memory block could be removed. The return
- * value does, however, not indicate that there is a way to remove the
- * memory block.
+ * Legacy interface that we cannot remove. Always indicate "removable"
+ * with CONFIG_MEMORY_HOTREMOVE - bad heuristic.
  */
 static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
                              char *buf)
 {
-       struct memory_block *mem = to_memory_block(dev);
-       unsigned long pfn;
-       int ret = 1, i;
-
-       if (mem->state != MEM_ONLINE)
-               goto out;
-
-       for (i = 0; i < sections_per_block; i++) {
-               if (!present_section_nr(mem->start_section_nr + i))
-                       continue;
-               pfn = section_nr_to_pfn(mem->start_section_nr + i);
-               ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
-       }
-
-out:
-       return sprintf(buf, "%d\n", ret);
+       return sprintf(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
 }
 
 /*
index 7fa654f..b5ce7b0 100644 (file)
@@ -363,10 +363,10 @@ static void setup_pdev_dma_masks(struct platform_device *pdev)
 {
        if (!pdev->dev.coherent_dma_mask)
                pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-       if (!pdev->dma_mask)
-               pdev->dma_mask = DMA_BIT_MASK(32);
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dma_mask;
+       if (!pdev->dev.dma_mask) {
+               pdev->platform_dma_mask = DMA_BIT_MASK(32);
+               pdev->dev.dma_mask = &pdev->platform_dma_mask;
+       }
 };
 
 /**
@@ -662,20 +662,8 @@ struct platform_device *platform_device_register_full(
        pdev->dev.of_node_reused = pdevinfo->of_node_reused;
 
        if (pdevinfo->dma_mask) {
-               /*
-                * This memory isn't freed when the device is put,
-                * I don't have a nice idea for that though.  Conceptually
-                * dma_mask in struct device should not be a pointer.
-                * See http://thread.gmane.org/gmane.linux.kernel.pci/9081
-                */
-               pdev->dev.dma_mask =
-                       kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
-               if (!pdev->dev.dma_mask)
-                       goto err;
-
-               kmemleak_ignore(pdev->dev.dma_mask);
-
-               *pdev->dev.dma_mask = pdevinfo->dma_mask;
+               pdev->platform_dma_mask = pdevinfo->dma_mask;
+               pdev->dev.dma_mask = &pdev->platform_dma_mask;
                pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
        }
 
@@ -700,7 +688,6 @@ struct platform_device *platform_device_register_full(
        if (ret) {
 err:
                ACPI_COMPANION_SET(&pdev->dev, NULL);
-               kfree(pdev->dev.dma_mask);
                platform_device_put(pdev);
                return ERR_PTR(ret);
        }
index c913ebb..f9b1e70 100644 (file)
@@ -245,13 +245,20 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
        err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
        if (err) {
                virtqueue_kick(vblk->vqs[qid].vq);
-               blk_mq_stop_hw_queue(hctx);
+               /* Don't stop the queue if -ENOMEM: we may have failed to
+                * bounce the buffer due to global resource outage.
+                */
+               if (err == -ENOSPC)
+                       blk_mq_stop_hw_queue(hctx);
                spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
-               /* Out of mem doesn't actually happen, since we fall back
-                * to direct descriptors */
-               if (err == -ENOMEM || err == -ENOSPC)
+               switch (err) {
+               case -ENOSPC:
                        return BLK_STS_DEV_RESOURCE;
-               return BLK_STS_IOERR;
+               case -ENOMEM:
+                       return BLK_STS_RESOURCE;
+               default:
+                       return BLK_STS_IOERR;
+               }
        }
 
        if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
index be79d6c..1bb00a9 100644 (file)
@@ -345,7 +345,7 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
        if (ret)
                goto unlock;
 
-       *buf = readl(rsb->regs + RSB_DATA);
+       *buf = readl(rsb->regs + RSB_DATA) & GENMASK(len * 8 - 1, 0);
 
 unlock:
        mutex_unlock(&rsb->lock);
index 6113fc0..4400196 100644 (file)
@@ -1266,6 +1266,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
        SYSC_QUIRK("gpu", 0x50000000, 0x14, -1, -1, 0x00010201, 0xffffffff, 0),
        SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff,
                   SYSC_MODULE_QUIRK_SGX),
+       SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff,
+                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
        SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
                   0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
        SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -1, 0x4ea2080d, 0xffffffff,
@@ -1294,7 +1296,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
        SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, 0),
        SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
        SYSC_QUIRK("iss", 0, 0, 0x10, -1, 0x40000101, 0xffffffff, 0),
-       SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff, 0),
        SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44306302, 0xffffffff, 0),
        SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44307b02, 0xffffffff, 0),
        SYSC_QUIRK("mcbsp", 0, -1, 0x8c, -1, 0, 0, 0),
index c78127c..638c693 100644 (file)
@@ -194,7 +194,7 @@ static int platform_ipmi_probe(struct platform_device *pdev)
        else
                io.slave_addr = slave_addr;
 
-       io.irq = platform_get_irq(pdev, 0);
+       io.irq = platform_get_irq_optional(pdev, 0);
        if (io.irq > 0)
                io.irq_setup = ipmi_std_irq_setup;
        else
@@ -378,7 +378,7 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
                io.irq = tmp;
                io.irq_setup = acpi_gpe_irq_setup;
        } else {
-               int irq = platform_get_irq(pdev, 0);
+               int irq = platform_get_irq_optional(pdev, 0);
 
                if (irq > 0) {
                        io.irq = irq;
index 7a0fca6..7460f23 100644 (file)
@@ -99,11 +99,8 @@ static int tpm_read_log(struct tpm_chip *chip)
  *
  * If an event log is found then the securityfs files are setup to
  * export it to userspace, otherwise nothing is done.
- *
- * Returns -ENODEV if the firmware has no event log or securityfs is not
- * supported.
  */
-int tpm_bios_log_setup(struct tpm_chip *chip)
+void tpm_bios_log_setup(struct tpm_chip *chip)
 {
        const char *name = dev_name(&chip->dev);
        unsigned int cnt;
@@ -112,7 +109,7 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
 
        rc = tpm_read_log(chip);
        if (rc < 0)
-               return rc;
+               return;
        log_version = rc;
 
        cnt = 0;
@@ -158,13 +155,12 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
                cnt++;
        }
 
-       return 0;
+       return;
 
 err:
-       rc = PTR_ERR(chip->bios_dir[cnt]);
        chip->bios_dir[cnt] = NULL;
        tpm_bios_log_teardown(chip);
-       return rc;
+       return;
 }
 
 void tpm_bios_log_teardown(struct tpm_chip *chip)
index af347c1..a9ce66d 100644 (file)
@@ -51,7 +51,8 @@ int tpm_read_log_of(struct tpm_chip *chip)
         * endian format. For this reason, vtpm doesn't need conversion
         * but physical tpm needs the conversion.
         */
-       if (of_property_match_string(np, "compatible", "IBM,vtpm") < 0) {
+       if (of_property_match_string(np, "compatible", "IBM,vtpm") < 0 &&
+           of_property_match_string(np, "compatible", "IBM,vtpm20") < 0) {
                size = be32_to_cpup((__force __be32 *)sizep);
                base = be64_to_cpup((__force __be64 *)basep);
        } else {
index 739b1d9..2c96977 100644 (file)
@@ -115,6 +115,7 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v,
        u32 converted_event_size;
        u32 converted_event_type;
 
+       (*pos)++;
        converted_event_size = do_endian_conversion(event->event_size);
 
        v += sizeof(struct tcpa_event) + converted_event_size;
@@ -132,7 +133,6 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v,
            ((v + sizeof(struct tcpa_event) + converted_event_size) > limit))
                return NULL;
 
-       (*pos)++;
        return v;
 }
 
index b9aeda1..e741b11 100644 (file)
@@ -94,6 +94,7 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v,
        size_t event_size;
        void *marker;
 
+       (*pos)++;
        event_header = log->bios_event_log;
 
        if (v == SEQ_START_TOKEN) {
@@ -118,7 +119,6 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v,
        if (((v + event_size) >= limit) || (event_size == 0))
                return NULL;
 
-       (*pos)++;
        return v;
 }
 
index 3d6d394..5807383 100644 (file)
@@ -596,9 +596,7 @@ int tpm_chip_register(struct tpm_chip *chip)
 
        tpm_sysfs_add_device(chip);
 
-       rc = tpm_bios_log_setup(chip);
-       if (rc != 0 && rc != -ENODEV)
-               return rc;
+       tpm_bios_log_setup(chip);
 
        tpm_add_ppi(chip);
 
index 5620747..0fbcede 100644 (file)
@@ -226,6 +226,7 @@ int tpm2_auto_startup(struct tpm_chip *chip);
 void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
 unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
 int tpm2_probe(struct tpm_chip *chip);
+int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip);
 int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
 int tpm2_init_space(struct tpm_space *space);
 void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space);
@@ -235,7 +236,7 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
 int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf,
                      size_t *bufsiz);
 
-int tpm_bios_log_setup(struct tpm_chip *chip);
+void tpm_bios_log_setup(struct tpm_chip *chip);
 void tpm_bios_log_teardown(struct tpm_chip *chip);
 int tpm_dev_common_init(void);
 void tpm_dev_common_exit(void);
index 7603295..76f67b1 100644 (file)
@@ -615,7 +615,7 @@ out:
        return rc;
 }
 
-static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
+int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
 {
        struct tpm_buf buf;
        u32 nr_commands;
index 78cc526..1a49db9 100644 (file)
@@ -29,6 +29,7 @@ static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
 
 static const struct vio_device_id tpm_ibmvtpm_device_table[] = {
        { "IBM,vtpm", "IBM,vtpm"},
+       { "IBM,vtpm", "IBM,vtpm20"},
        { "", "" }
 };
 MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
@@ -571,6 +572,7 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
         */
        while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
                ibmvtpm_crq_process(crq, ibmvtpm);
+               wake_up_interruptible(&ibmvtpm->crq_queue.wq);
                crq->valid = 0;
                smp_wmb();
        }
@@ -618,6 +620,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
        }
 
        crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
+       init_waitqueue_head(&crq_q->wq);
        ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
                                                 CRQ_RES_BUF_SIZE,
                                                 DMA_BIDIRECTIONAL);
@@ -670,6 +673,20 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
        if (rc)
                goto init_irq_cleanup;
 
+       if (!strcmp(id->compat, "IBM,vtpm20")) {
+               chip->flags |= TPM_CHIP_FLAG_TPM2;
+               rc = tpm2_get_cc_attrs_tbl(chip);
+               if (rc)
+                       goto init_irq_cleanup;
+       }
+
+       if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
+                               ibmvtpm->rtce_buf != NULL,
+                               HZ)) {
+               dev_err(dev, "CRQ response timed out\n");
+               goto init_irq_cleanup;
+       }
+
        return tpm_chip_register(chip);
 init_irq_cleanup:
        do {
index 7983f1a..b92aa7d 100644 (file)
@@ -26,6 +26,7 @@ struct ibmvtpm_crq_queue {
        struct ibmvtpm_crq *crq_addr;
        u32 index;
        u32 num_entry;
+       wait_queue_head_t wq;
 };
 
 struct ibmvtpm_dev {
index 37d72e8..ea759af 100644 (file)
@@ -132,7 +132,12 @@ static void cr50_wake_if_needed(struct cr50_spi_phy *cr50_phy)
 
        if (cr50_needs_waking(cr50_phy)) {
                /* Assert CS, wait 1 msec, deassert CS */
-               struct spi_transfer spi_cs_wake = { .delay_usecs = 1000 };
+               struct spi_transfer spi_cs_wake = {
+                       .delay = {
+                               .value = 1000,
+                               .unit = SPI_DELAY_UNIT_USECS
+                       }
+               };
 
                spi_sync_transfer(phy->spi_device, &spi_cs_wake, 1);
                /* Wait for it to fully wake */
index d1754fd..d967559 100644 (file)
@@ -110,7 +110,8 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
 
                spi_xfer.cs_change = 0;
                spi_xfer.len = transfer_len;
-               spi_xfer.delay_usecs = 5;
+               spi_xfer.delay.value = 5;
+               spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS;
 
                if (in) {
                        spi_xfer.tx_buf = NULL;
index f0f2b59..95adf6c 100644 (file)
@@ -4713,7 +4713,7 @@ EXPORT_SYMBOL(of_clk_get_by_name);
  *
  * Returns: The number of clocks that are possible parents of this node
  */
-unsigned int of_clk_get_parent_count(struct device_node *np)
+unsigned int of_clk_get_parent_count(const struct device_node *np)
 {
        int count;
 
@@ -4725,7 +4725,7 @@ unsigned int of_clk_get_parent_count(struct device_node *np)
 }
 EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
 
-const char *of_clk_get_parent_name(struct device_node *np, int index)
+const char *of_clk_get_parent_name(const struct device_node *np, int index)
 {
        struct of_phandle_args clkspec;
        struct property *prop;
index f6c120c..cf19290 100644 (file)
@@ -560,7 +560,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
        hws[IMX8MP_CLK_MEDIA_AXI] = imx8m_clk_hw_composite("media_axi", imx8mp_media_axi_sels, ccm_base + 0x8a00);
        hws[IMX8MP_CLK_MEDIA_APB] = imx8m_clk_hw_composite("media_apb", imx8mp_media_apb_sels, ccm_base + 0x8a80);
        hws[IMX8MP_CLK_HDMI_APB] = imx8m_clk_hw_composite("hdmi_apb", imx8mp_media_apb_sels, ccm_base + 0x8b00);
-       hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite("hdmi_axi", imx8mp_media_apb_sels, ccm_base + 0x8b80);
+       hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite("hdmi_axi", imx8mp_media_axi_sels, ccm_base + 0x8b80);
        hws[IMX8MP_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mp_gpu_axi_sels, ccm_base + 0x8c00);
        hws[IMX8MP_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mp_gpu_ahb_sels, ccm_base + 0x8c80);
        hws[IMX8MP_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mp_noc_sels, ccm_base + 0x8d00);
@@ -686,7 +686,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
        hws[IMX8MP_CLK_CAN1_ROOT] = imx_clk_hw_gate2("can1_root_clk", "can1", ccm_base + 0x4350, 0);
        hws[IMX8MP_CLK_CAN2_ROOT] = imx_clk_hw_gate2("can2_root_clk", "can2", ccm_base + 0x4360, 0);
        hws[IMX8MP_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_root_clk", "ipg_root", ccm_base + 0x43a0, 0);
-       hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "enet_axi", ccm_base + 0x43b0, 0);
+       hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "sim_enet_root_clk", ccm_base + 0x43b0, 0);
        hws[IMX8MP_CLK_SIM_ENET_ROOT] = imx_clk_hw_gate4("sim_enet_root_clk", "enet_axi", ccm_base + 0x4400, 0);
        hws[IMX8MP_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_div", ccm_base + 0x4450, 0);
        hws[IMX8MP_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core_div", ccm_base + 0x4460, 0);
index fbef740..b8b2072 100644 (file)
@@ -43,12 +43,12 @@ struct imx_sc_msg_req_set_clock_rate {
        __le32 rate;
        __le16 resource;
        u8 clk;
-} __packed;
+} __packed __aligned(4);
 
 struct req_get_clock_rate {
        __le16 resource;
        u8 clk;
-} __packed;
+} __packed __aligned(4);
 
 struct resp_get_clock_rate {
        __le32 rate;
@@ -84,7 +84,7 @@ struct imx_sc_msg_get_clock_parent {
                struct req_get_clock_parent {
                        __le16 resource;
                        u8 clk;
-               } __packed req;
+               } __packed __aligned(4) req;
                struct resp_get_clock_parent {
                        u8 parent;
                } resp;
@@ -121,7 +121,7 @@ struct imx_sc_msg_req_clock_enable {
        u8 clk;
        u8 enable;
        u8 autog;
-} __packed;
+} __packed __aligned(4);
 
 static inline struct clk_scu *to_clk_scu(struct clk_hw *hw)
 {
index dd7af41..0a5d395 100644 (file)
@@ -592,24 +592,6 @@ static struct clk_branch disp_cc_mdss_rot_clk = {
        },
 };
 
-static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
-       .halt_reg = 0x400c,
-       .halt_check = BRANCH_HALT,
-       .clkr = {
-               .enable_reg = 0x400c,
-               .enable_mask = BIT(0),
-               .hw.init = &(struct clk_init_data){
-                       .name = "disp_cc_mdss_rscc_ahb_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
-                       },
-                       .num_parents = 1,
-                       .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
-                       .ops = &clk_branch2_ops,
-               },
-       },
-};
-
 static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
        .halt_reg = 0x4008,
        .halt_check = BRANCH_HALT,
@@ -687,7 +669,6 @@ static struct clk_regmap *disp_cc_sc7180_clocks[] = {
        [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
        [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
        [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
-       [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
        [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
        [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
        [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
index c363c3c..276e5ec 100644 (file)
@@ -97,7 +97,7 @@ static struct clk_branch video_cc_vcodec0_axi_clk = {
 
 static struct clk_branch video_cc_vcodec0_core_clk = {
        .halt_reg = 0x890,
-       .halt_check = BRANCH_HALT,
+       .halt_check = BRANCH_HALT_VOTED,
        .clkr = {
                .enable_reg = 0x890,
                .enable_mask = BIT(0),
index af3e780..e5538d5 100644 (file)
@@ -78,7 +78,7 @@ static const struct omap_clkctrl_reg_data am4_gfx_l3_clkctrl_regs[] __initconst
 };
 
 static const struct omap_clkctrl_reg_data am4_l4_rtc_clkctrl_regs[] __initconst = {
-       { AM4_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" },
+       { AM4_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clkdiv32k_ick" },
        { 0 },
 };
 
index 9d808d5..eb0ba78 100644 (file)
@@ -343,7 +343,8 @@ static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
 
 static u64 read_hv_sched_clock_tsc(void)
 {
-       return read_hv_clock_tsc() - hv_sched_clock_offset;
+       return (read_hv_clock_tsc() - hv_sched_clock_offset) *
+               (NSEC_PER_SEC / HV_CLOCK_HZ);
 }
 
 static void suspend_hv_clock_tsc(struct clocksource *arg)
@@ -398,7 +399,8 @@ static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
 
 static u64 read_hv_sched_clock_msr(void)
 {
-       return read_hv_clock_msr() - hv_sched_clock_offset;
+       return (read_hv_clock_msr() - hv_sched_clock_offset) *
+               (NSEC_PER_SEC / HV_CLOCK_HZ);
 }
 
 static struct clocksource hyperv_cs_msr = {
index c3b1283..17909fd 100644 (file)
@@ -1151,7 +1151,7 @@ int dma_async_device_register(struct dma_device *device)
        }
 
        if (!device->device_release)
-               dev_warn(device->dev,
+               dev_dbg(device->dev,
                         "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
 
        kref_init(&device->ref);
index df47be6..989b7a2 100644 (file)
@@ -81,9 +81,9 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
        dev = &idxd->pdev->dev;
        idxd_cdev = &wq->idxd_cdev;
 
-       dev_dbg(dev, "%s called\n", __func__);
+       dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
 
-       if (idxd_wq_refcount(wq) > 1 && wq_dedicated(wq))
+       if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq))
                return -EBUSY;
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
index c151129..4d7561a 100644 (file)
@@ -564,12 +564,12 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
        if (IS_ERR(flow->udma_rflow)) {
                ret = PTR_ERR(flow->udma_rflow);
                dev_err(dev, "UDMAX rflow get err %d\n", ret);
-               goto err;
+               return ret;
        }
 
        if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
-               xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto err_rflow_put;
        }
 
        /* request and cfg rings */
@@ -578,7 +578,7 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
        if (!flow->ringrx) {
                ret = -ENODEV;
                dev_err(dev, "Failed to get RX ring\n");
-               goto err;
+               goto err_rflow_put;
        }
 
        flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc,
@@ -586,19 +586,19 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
        if (!flow->ringrxfdq) {
                ret = -ENODEV;
                dev_err(dev, "Failed to get RXFDQ ring\n");
-               goto err;
+               goto err_ringrx_free;
        }
 
        ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
        if (ret) {
                dev_err(dev, "Failed to cfg ringrx %d\n", ret);
-               goto err;
+               goto err_ringrxfdq_free;
        }
 
        ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
        if (ret) {
                dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
-               goto err;
+               goto err_ringrxfdq_free;
        }
 
        if (rx_chn->remote) {
@@ -648,7 +648,7 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
        if (ret) {
                dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
                        ret);
-               goto err;
+               goto err_ringrxfdq_free;
        }
 
        rx_chn->flows_ready++;
@@ -656,8 +656,17 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
                flow->udma_rflow_id, rx_chn->flows_ready);
 
        return 0;
-err:
-       k3_udma_glue_release_rx_flow(rx_chn, flow_idx);
+
+err_ringrxfdq_free:
+       k3_ringacc_ring_free(flow->ringrxfdq);
+
+err_ringrx_free:
+       k3_ringacc_ring_free(flow->ringrx);
+
+err_rflow_put:
+       xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
+       flow->udma_rflow = NULL;
+
        return ret;
 }
 
index 7576450..aff3dfb 100644 (file)
@@ -83,13 +83,16 @@ static ssize_t
 efivar_attr_read(struct efivar_entry *entry, char *buf)
 {
        struct efi_variable *var = &entry->var;
+       unsigned long size = sizeof(var->Data);
        char *str = buf;
+       int ret;
 
        if (!entry || !buf)
                return -EINVAL;
 
-       var->DataSize = 1024;
-       if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+       ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
+       var->DataSize = size;
+       if (ret)
                return -EIO;
 
        if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
@@ -116,13 +119,16 @@ static ssize_t
 efivar_size_read(struct efivar_entry *entry, char *buf)
 {
        struct efi_variable *var = &entry->var;
+       unsigned long size = sizeof(var->Data);
        char *str = buf;
+       int ret;
 
        if (!entry || !buf)
                return -EINVAL;
 
-       var->DataSize = 1024;
-       if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+       ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
+       var->DataSize = size;
+       if (ret)
                return -EIO;
 
        str += sprintf(str, "0x%lx\n", var->DataSize);
@@ -133,12 +139,15 @@ static ssize_t
 efivar_data_read(struct efivar_entry *entry, char *buf)
 {
        struct efi_variable *var = &entry->var;
+       unsigned long size = sizeof(var->Data);
+       int ret;
 
        if (!entry || !buf)
                return -EINVAL;
 
-       var->DataSize = 1024;
-       if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+       ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
+       var->DataSize = size;
+       if (ret)
                return -EIO;
 
        memcpy(buf, var->Data, var->DataSize);
@@ -199,6 +208,9 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
        u8 *data;
        int err;
 
+       if (!entry || !buf)
+               return -EINVAL;
+
        if (in_compat_syscall()) {
                struct compat_efi_variable *compat;
 
@@ -250,14 +262,16 @@ efivar_show_raw(struct efivar_entry *entry, char *buf)
 {
        struct efi_variable *var = &entry->var;
        struct compat_efi_variable *compat;
+       unsigned long datasize = sizeof(var->Data);
        size_t size;
+       int ret;
 
        if (!entry || !buf)
                return 0;
 
-       var->DataSize = 1024;
-       if (efivar_entry_get(entry, &entry->var.Attributes,
-                            &entry->var.DataSize, entry->var.Data))
+       ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data);
+       var->DataSize = datasize;
+       if (ret)
                return -EIO;
 
        if (in_compat_syscall()) {
index 31fee5e..0017367 100644 (file)
 #include "gpiolib.h"
 #include "gpiolib-acpi.h"
 
-#define QUIRK_NO_EDGE_EVENTS_ON_BOOT           0x01l
-#define QUIRK_NO_WAKEUP                                0x02l
-
 static int run_edge_events_on_boot = -1;
 module_param(run_edge_events_on_boot, int, 0444);
 MODULE_PARM_DESC(run_edge_events_on_boot,
                 "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
 
-static int honor_wakeup = -1;
-module_param(honor_wakeup, int, 0444);
-MODULE_PARM_DESC(honor_wakeup,
-                "Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto");
+static char *ignore_wake;
+module_param(ignore_wake, charp, 0444);
+MODULE_PARM_DESC(ignore_wake,
+                "controller@pin combos on which to ignore the ACPI wake flag "
+                "ignore_wake=controller@pin[,controller@pin[,...]]");
+
+struct acpi_gpiolib_dmi_quirk {
+       bool no_edge_events_on_boot;
+       char *ignore_wake;
+};
 
 /**
  * struct acpi_gpio_event - ACPI GPIO event handler data
@@ -202,6 +205,57 @@ static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
                acpi_gpiochip_request_irq(acpi_gpio, event);
 }
 
+static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
+{
+       const char *controller, *pin_str;
+       int len, pin;
+       char *endp;
+
+       controller = ignore_wake;
+       while (controller) {
+               pin_str = strchr(controller, '@');
+               if (!pin_str)
+                       goto err;
+
+               len = pin_str - controller;
+               if (len == strlen(controller_in) &&
+                   strncmp(controller, controller_in, len) == 0) {
+                       pin = simple_strtoul(pin_str + 1, &endp, 10);
+                       if (*endp != 0 && *endp != ',')
+                               goto err;
+
+                       if (pin == pin_in)
+                               return true;
+               }
+
+               controller = strchr(controller, ',');
+               if (controller)
+                       controller++;
+       }
+
+       return false;
+err:
+       pr_err_once("Error invalid value for gpiolib_acpi.ignore_wake: %s\n",
+                   ignore_wake);
+       return false;
+}
+
+static bool acpi_gpio_irq_is_wake(struct device *parent,
+                                 struct acpi_resource_gpio *agpio)
+{
+       int pin = agpio->pin_table[0];
+
+       if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
+               return false;
+
+       if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) {
+               dev_info(parent, "Ignoring wakeup on pin %d\n", pin);
+               return false;
+       }
+
+       return true;
+}
+
 /* Always returns AE_OK so that we keep looping over the resources */
 static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
                                             void *context)
@@ -289,7 +343,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
        event->handle = evt_handle;
        event->handler = handler;
        event->irq = irq;
-       event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE;
+       event->irq_is_wake = acpi_gpio_irq_is_wake(chip->parent, agpio);
        event->pin = pin;
        event->desc = desc;
 
@@ -1328,7 +1382,9 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
                },
-               .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
+               .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+                       .no_edge_events_on_boot = true,
+               },
        },
        {
                /*
@@ -1341,16 +1397,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
                },
-               .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
+               .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+                       .no_edge_events_on_boot = true,
+               },
        },
        {
                /*
-                * Various HP X2 10 Cherry Trail models use an external
-                * embedded-controller connected via I2C + an ACPI GPIO
-                * event handler. The embedded controller generates various
-                * spurious wakeup events when suspended. So disable wakeup
-                * for its handler (it uses the only ACPI GPIO event handler).
-                * This breaks wakeup when opening the lid, the user needs
+                * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
+                * external embedded-controller connected via I2C + an ACPI GPIO
+                * event handler on INT33FF:01 pin 0, causing spurious wakeups.
+                * When suspending by closing the LID, the power to the USB
+                * keyboard is turned off, causing INT0002 ACPI events to
+                * trigger once the XHCI controller notices the keyboard is
+                * gone. So INT0002 events cause spurious wakeups too. Ignoring
+                * EC wakes breaks wakeup when opening the lid, the user needs
                 * to press the power-button to wakeup the system. The
                 * alternative is suspend simply not working, which is worse.
                 */
@@ -1358,33 +1418,61 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "HP"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
                },
-               .driver_data = (void *)QUIRK_NO_WAKEUP,
+               .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+                       .ignore_wake = "INT33FF:01@0,INT0002:00@2",
+               },
+       },
+       {
+               /*
+                * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an
+                * external embedded-controller connected via I2C + an ACPI GPIO
+                * event handler on INT33FC:02 pin 28, causing spurious wakeups.
+                */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+                       DMI_MATCH(DMI_BOARD_NAME, "815D"),
+               },
+               .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+                       .ignore_wake = "INT33FC:02@28",
+               },
+       },
+       {
+               /*
+                * HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an
+                * external embedded-controller connected via I2C + an ACPI GPIO
+                * event handler on INT33FF:01 pin 0, causing spurious wakeups.
+                */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+                       DMI_MATCH(DMI_BOARD_NAME, "813E"),
+               },
+               .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+                       .ignore_wake = "INT33FF:01@0",
+               },
        },
        {} /* Terminating entry */
 };
 
 static int acpi_gpio_setup_params(void)
 {
+       const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
        const struct dmi_system_id *id;
-       long quirks = 0;
 
        id = dmi_first_match(gpiolib_acpi_quirks);
        if (id)
-               quirks = (long)id->driver_data;
+               quirk = id->driver_data;
 
        if (run_edge_events_on_boot < 0) {
-               if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT)
+               if (quirk && quirk->no_edge_events_on_boot)
                        run_edge_events_on_boot = 0;
                else
                        run_edge_events_on_boot = 1;
        }
 
-       if (honor_wakeup < 0) {
-               if (quirks & QUIRK_NO_WAKEUP)
-                       honor_wakeup = 0;
-               else
-                       honor_wakeup = 1;
-       }
+       if (ignore_wake == NULL && quirk && quirk->ignore_wake)
+               ignore_wake = quirk->ignore_wake;
 
        return 0;
 }
index 4d0106c..00fb91f 100644 (file)
@@ -2306,9 +2306,16 @@ static void gpiochip_irq_disable(struct irq_data *d)
 {
        struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
 
+       /*
+        * Since we override .irq_disable() we need to mimic the
+        * behaviour of __irq_disable() in irq/chip.c.
+        * First call .irq_disable() if it exists, else mimic the
+        * behaviour of mask_irq() which calls .irq_mask() if
+        * it exists.
+        */
        if (chip->irq.irq_disable)
                chip->irq.irq_disable(d);
-       else
+       else if (chip->irq.chip->irq_mask)
                chip->irq.chip->irq_mask(d);
        gpiochip_disable_irq(chip, d->hwirq);
 }
index f24ed9a..337d7cd 100644 (file)
@@ -781,11 +781,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
        ssize_t result = 0;
        uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
 
-       if (size & 3 || *pos & 3)
+       if (size > 4096 || size & 3 || *pos & 3)
                return -EINVAL;
 
        /* decode offset */
-       offset = *pos & GENMASK_ULL(11, 0);
+       offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
        se = (*pos & GENMASK_ULL(19, 12)) >> 12;
        sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
        cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
@@ -823,7 +823,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
        while (size) {
                uint32_t value;
 
-               value = data[offset++];
+               value = data[result >> 2];
                r = put_user(value, (uint32_t *)buf);
                if (r) {
                        result = r;
index 39cd545..b897585 100644 (file)
@@ -3913,6 +3913,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
                                if (r)
                                        goto out;
 
+                               amdgpu_fbdev_set_suspend(tmp_adev, 0);
+
                                /* must succeed. */
                                amdgpu_ras_resume(tmp_adev);
 
@@ -4086,6 +4088,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                 */
                amdgpu_unregister_gpu_instance(tmp_adev);
 
+               amdgpu_fbdev_set_suspend(adev, 1);
+
                /* disable ras on ALL IPs */
                if (!(in_ras_intr && !use_baco) &&
                      amdgpu_device_ip_need_full_reset(tmp_adev))
index dee4462..c6e9885 100644 (file)
@@ -974,7 +974,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
        /* Map SG to device */
        r = -ENOMEM;
        nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
-       if (nents != ttm->sg->nents)
+       if (nents == 0)
                goto release_sg;
 
        /* convert SG to linear array of pages and dma addresses */
index ff2e6e1..6173951 100644 (file)
@@ -693,7 +693,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle,
        bool enable = (state == AMD_CG_STATE_GATE);
 
        if (enable) {
-               if (jpeg_v2_0_is_idle(handle))
+               if (!jpeg_v2_0_is_idle(handle))
                        return -EBUSY;
                jpeg_v2_0_enable_clock_gating(adev);
        } else {
index c6d046d..c04c207 100644 (file)
@@ -477,7 +477,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle,
                        continue;
 
                if (enable) {
-                       if (jpeg_v2_5_is_idle(handle))
+                       if (!jpeg_v2_5_is_idle(handle))
                                return -EBUSY;
                        jpeg_v2_5_enable_clock_gating(adev, i);
                } else {
index 2b488df..d8945c3 100644 (file)
 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK  0x00010000L
 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK            0x00020000L
 #define mmHDP_MEM_POWER_CTRL_BASE_IDX  0
+
+/* for Vega20/arcturus regiter offset change */
+#define        mmROM_INDEX_VG20                                0x00e4
+#define        mmROM_INDEX_VG20_BASE_IDX                       0
+#define        mmROM_DATA_VG20                                 0x00e5
+#define        mmROM_DATA_VG20_BASE_IDX                        0
+
 /*
  * Indirect registers accessor
  */
@@ -309,6 +316,8 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
 {
        u32 *dw_ptr;
        u32 i, length_dw;
+       uint32_t rom_index_offset;
+       uint32_t rom_data_offset;
 
        if (bios == NULL)
                return false;
@@ -321,11 +330,23 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
        dw_ptr = (u32 *)bios;
        length_dw = ALIGN(length_bytes, 4) / 4;
 
+       switch (adev->asic_type) {
+       case CHIP_VEGA20:
+       case CHIP_ARCTURUS:
+               rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20);
+               rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20);
+               break;
+       default:
+               rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
+               rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
+               break;
+       }
+
        /* set rom index to 0 */
-       WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
+       WREG32(rom_index_offset, 0);
        /* read out the rom data */
        for (i = 0; i < length_dw; i++)
-               dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
+               dw_ptr[i] = RREG32(rom_data_offset);
 
        return true;
 }
index 71f61af..09b0572 100644 (file)
@@ -1352,7 +1352,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle,
 
        if (enable) {
                /* wait for STATUS to clear */
-               if (vcn_v1_0_is_idle(handle))
+               if (!vcn_v1_0_is_idle(handle))
                        return -EBUSY;
                vcn_v1_0_enable_clock_gating(adev);
        } else {
index c387c81..b7f1734 100644 (file)
@@ -1217,7 +1217,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
 
        if (enable) {
                /* wait for STATUS to clear */
-               if (vcn_v2_0_is_idle(handle))
+               if (!vcn_v2_0_is_idle(handle))
                        return -EBUSY;
                vcn_v2_0_enable_clock_gating(adev);
        } else {
index 2d64ba1..678253d 100644 (file)
@@ -1672,7 +1672,7 @@ static int vcn_v2_5_set_clockgating_state(void *handle,
                return 0;
 
        if (enable) {
-               if (vcn_v2_5_is_idle(handle))
+               if (!vcn_v2_5_is_idle(handle))
                        return -EBUSY;
                vcn_v2_5_enable_clock_gating(adev);
        } else {
index e997251..6240259 100644 (file)
@@ -522,8 +522,9 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
 
        acrtc_state = to_dm_crtc_state(acrtc->base.state);
 
-       DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
-                               amdgpu_dm_vrr_active(acrtc_state));
+       DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
+                        amdgpu_dm_vrr_active(acrtc_state),
+                        acrtc_state->active_planes);
 
        amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
        drm_crtc_handle_vblank(&acrtc->base);
@@ -543,7 +544,18 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
                        &acrtc_state->vrr_params.adjust);
        }
 
-       if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) {
+       /*
+        * If there aren't any active_planes then DCH HUBP may be clock-gated.
+        * In that case, pageflip completion interrupts won't fire and pageflip
+        * completion events won't get delivered. Prevent this by sending
+        * pending pageflip events from here if a flip is still pending.
+        *
+        * If any planes are enabled, use dm_pflip_high_irq() instead, to
+        * avoid race conditions between flip programming and completion,
+        * which could cause too early flip completion events.
+        */
+       if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
+           acrtc_state->active_planes == 0) {
                if (acrtc->event) {
                        drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
                        acrtc->event = NULL;
index cb731c1..fd9e696 100644 (file)
@@ -3401,6 +3401,17 @@ static bool retrieve_link_cap(struct dc_link *link)
                sink_id.ieee_device_id,
                sizeof(sink_id.ieee_device_id));
 
+       /* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */
+       {
+               uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 };
+
+               if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
+                   !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017,
+                           sizeof(str_mbp_2017))) {
+                       link->reported_link_cap.link_rate = 0x0c;
+               }
+       }
+
        core_link_read_dpcd(
                link,
                DP_SINK_HW_REVISION_START,
index d51e02f..5e640f1 100644 (file)
@@ -108,7 +108,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
        .enable_power_gating_plane = dcn20_enable_power_gating_plane,
        .dpp_pg_control = dcn20_dpp_pg_control,
        .hubp_pg_control = dcn20_hubp_pg_control,
-       .dsc_pg_control = NULL,
        .update_odm = dcn20_update_odm,
        .dsc_pg_control = dcn20_dsc_pg_control,
        .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
index 85f90f3..e310d67 100644 (file)
@@ -335,6 +335,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
        .use_urgent_burst_bw = 0
 };
 
+struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
+       .clock_limits = {
+                       {
+                               .state = 0,
+                               .dcfclk_mhz = 560.0,
+                               .fabricclk_mhz = 560.0,
+                               .dispclk_mhz = 513.0,
+                               .dppclk_mhz = 513.0,
+                               .phyclk_mhz = 540.0,
+                               .socclk_mhz = 560.0,
+                               .dscclk_mhz = 171.0,
+                               .dram_speed_mts = 8960.0,
+                       },
+                       {
+                               .state = 1,
+                               .dcfclk_mhz = 694.0,
+                               .fabricclk_mhz = 694.0,
+                               .dispclk_mhz = 642.0,
+                               .dppclk_mhz = 642.0,
+                               .phyclk_mhz = 600.0,
+                               .socclk_mhz = 694.0,
+                               .dscclk_mhz = 214.0,
+                               .dram_speed_mts = 11104.0,
+                       },
+                       {
+                               .state = 2,
+                               .dcfclk_mhz = 875.0,
+                               .fabricclk_mhz = 875.0,
+                               .dispclk_mhz = 734.0,
+                               .dppclk_mhz = 734.0,
+                               .phyclk_mhz = 810.0,
+                               .socclk_mhz = 875.0,
+                               .dscclk_mhz = 245.0,
+                               .dram_speed_mts = 14000.0,
+                       },
+                       {
+                               .state = 3,
+                               .dcfclk_mhz = 1000.0,
+                               .fabricclk_mhz = 1000.0,
+                               .dispclk_mhz = 1100.0,
+                               .dppclk_mhz = 1100.0,
+                               .phyclk_mhz = 810.0,
+                               .socclk_mhz = 1000.0,
+                               .dscclk_mhz = 367.0,
+                               .dram_speed_mts = 16000.0,
+                       },
+                       {
+                               .state = 4,
+                               .dcfclk_mhz = 1200.0,
+                               .fabricclk_mhz = 1200.0,
+                               .dispclk_mhz = 1284.0,
+                               .dppclk_mhz = 1284.0,
+                               .phyclk_mhz = 810.0,
+                               .socclk_mhz = 1200.0,
+                               .dscclk_mhz = 428.0,
+                               .dram_speed_mts = 16000.0,
+                       },
+                       /*Extra state, no dispclk ramping*/
+                       {
+                               .state = 5,
+                               .dcfclk_mhz = 1200.0,
+                               .fabricclk_mhz = 1200.0,
+                               .dispclk_mhz = 1284.0,
+                               .dppclk_mhz = 1284.0,
+                               .phyclk_mhz = 810.0,
+                               .socclk_mhz = 1200.0,
+                               .dscclk_mhz = 428.0,
+                               .dram_speed_mts = 16000.0,
+                       },
+               },
+       .num_states = 5,
+       .sr_exit_time_us = 8.6,
+       .sr_enter_plus_exit_time_us = 10.9,
+       .urgent_latency_us = 4.0,
+       .urgent_latency_pixel_data_only_us = 4.0,
+       .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+       .urgent_latency_vm_data_only_us = 4.0,
+       .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+       .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+       .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+       .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
+       .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
+       .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+       .max_avg_sdp_bw_use_normal_percent = 40.0,
+       .max_avg_dram_bw_use_normal_percent = 40.0,
+       .writeback_latency_us = 12.0,
+       .ideal_dram_bw_after_urgent_percent = 40.0,
+       .max_request_size_bytes = 256,
+       .dram_channel_width_bytes = 2,
+       .fabric_datapath_to_dcn_data_return_bytes = 64,
+       .dcn_downspread_percent = 0.5,
+       .downspread_percent = 0.38,
+       .dram_page_open_time_ns = 50.0,
+       .dram_rw_turnaround_time_ns = 17.5,
+       .dram_return_buffer_per_channel_bytes = 8192,
+       .round_trip_ping_latency_dcfclk_cycles = 131,
+       .urgent_out_of_order_return_per_channel_bytes = 256,
+       .channel_interleave_bytes = 256,
+       .num_banks = 8,
+       .num_chans = 8,
+       .vmm_page_size_bytes = 4096,
+       .dram_clock_change_latency_us = 404.0,
+       .dummy_pstate_latency_us = 5.0,
+       .writeback_dram_clock_change_latency_us = 23.0,
+       .return_bus_width_bytes = 64,
+       .dispclk_dppclk_vco_speed_mhz = 3850,
+       .xfc_bus_transport_time_us = 20,
+       .xfc_xbuf_latency_tolerance_us = 4,
+       .use_urgent_burst_bw = 0
+};
+
 struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
 
 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
@@ -3291,6 +3402,9 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
 static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
        uint32_t hw_internal_rev)
 {
+       if (ASICREV_IS_NAVI14_M(hw_internal_rev))
+               return &dcn2_0_nv14_soc;
+
        if (ASICREV_IS_NAVI12_P(hw_internal_rev))
                return &dcn2_0_nv12_soc;
 
index 4861aa5..fddbd59 100644 (file)
@@ -116,7 +116,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
        .enable_power_gating_plane = dcn20_enable_power_gating_plane,
        .dpp_pg_control = dcn20_dpp_pg_control,
        .hubp_pg_control = dcn20_hubp_pg_control,
-       .dsc_pg_control = NULL,
        .update_odm = dcn20_update_odm,
        .dsc_pg_control = dcn20_dsc_pg_control,
        .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
index ad8e9b5..96e81c7 100644 (file)
@@ -2006,8 +2006,11 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
                        smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
                        smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
                smu_set_watermarks_table(smu, table, clock_ranges);
-               smu->watermarks_bitmap |= WATERMARKS_EXIST;
-               smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+
+               if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
+                       smu->watermarks_bitmap |= WATERMARKS_EXIST;
+                       smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+               }
        }
 
        mutex_unlock(&smu->mutex);
index 0d73a49..aed4d6e 100644 (file)
@@ -1063,15 +1063,6 @@ static int navi10_display_config_changed(struct smu_context *smu)
        int ret = 0;
 
        if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
-           !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
-               ret = smu_write_watermarks_table(smu);
-               if (ret)
-                       return ret;
-
-               smu->watermarks_bitmap |= WATERMARKS_LOADED;
-       }
-
-       if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
            smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
            smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
                ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
@@ -1493,6 +1484,7 @@ static int navi10_set_watermarks_table(struct smu_context *smu,
                                       *clock_ranges)
 {
        int i;
+       int ret = 0;
        Watermarks_t *table = watermarks;
 
        if (!table || !clock_ranges)
@@ -1544,6 +1536,18 @@ static int navi10_set_watermarks_table(struct smu_context *smu,
                                clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
        }
 
+       smu->watermarks_bitmap |= WATERMARKS_EXIST;
+
+       /* pass data to smu controller */
+       if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+               ret = smu_write_watermarks_table(smu);
+               if (ret) {
+                       pr_err("Failed to update WMTABLE!");
+                       return ret;
+               }
+               smu->watermarks_bitmap |= WATERMARKS_LOADED;
+       }
+
        return 0;
 }
 
index 568c041..3ad0f4a 100644 (file)
@@ -806,9 +806,10 @@ static int renoir_set_watermarks_table(
                                clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
        }
 
+       smu->watermarks_bitmap |= WATERMARKS_EXIST;
+
        /* pass data to smu controller */
-       if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
-                       !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+       if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
                ret = smu_write_watermarks_table(smu);
                if (ret) {
                        pr_err("Failed to update WMTABLE!");
index ea5cd1e..e793393 100644 (file)
@@ -146,14 +146,14 @@ static const struct of_device_id komeda_of_match[] = {
 
 MODULE_DEVICE_TABLE(of, komeda_of_match);
 
-static int komeda_rt_pm_suspend(struct device *dev)
+static int __maybe_unused komeda_rt_pm_suspend(struct device *dev)
 {
        struct komeda_drv *mdrv = dev_get_drvdata(dev);
 
        return komeda_dev_suspend(mdrv->mdev);
 }
 
-static int komeda_rt_pm_resume(struct device *dev)
+static int __maybe_unused komeda_rt_pm_resume(struct device *dev)
 {
        struct komeda_drv *mdrv = dev_get_drvdata(dev);
 
index b615b7d..a4fc4e6 100644 (file)
@@ -156,10 +156,8 @@ int bochs_hw_init(struct drm_device *dev)
                size = min(size, mem);
        }
 
-       if (pci_request_region(pdev, 0, "bochs-drm") != 0) {
-               DRM_ERROR("Cannot request framebuffer\n");
-               return -EBUSY;
-       }
+       if (pci_request_region(pdev, 0, "bochs-drm") != 0)
+               DRM_WARN("Cannot request framebuffer, boot fb still active?\n");
 
        bochs->fb_map = ioremap(addr, size);
        if (bochs->fb_map == NULL) {
index 67fca43..24965e5 100644 (file)
@@ -1624,28 +1624,34 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
                frame.colorspace = HDMI_COLORSPACE_RGB;
 
        /* Set up colorimetry */
-       switch (hdmi->hdmi_data.enc_out_encoding) {
-       case V4L2_YCBCR_ENC_601:
-               if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
-                       frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
-               else
+       if (!hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
+               switch (hdmi->hdmi_data.enc_out_encoding) {
+               case V4L2_YCBCR_ENC_601:
+                       if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
+                               frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
+                       else
+                               frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
+                       frame.extended_colorimetry =
+                                       HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+                       break;
+               case V4L2_YCBCR_ENC_709:
+                       if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
+                               frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
+                       else
+                               frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
+                       frame.extended_colorimetry =
+                                       HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
+                       break;
+               default: /* Carries no data */
                        frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
+                       frame.extended_colorimetry =
+                                       HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+                       break;
+               }
+       } else {
+               frame.colorimetry = HDMI_COLORIMETRY_NONE;
                frame.extended_colorimetry =
-                               HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
-               break;
-       case V4L2_YCBCR_ENC_709:
-               if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
-                       frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
-               else
-                       frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
-               frame.extended_colorimetry =
-                               HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
-               break;
-       default: /* Carries no data */
-               frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
-               frame.extended_colorimetry =
-                               HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
-               break;
+                       HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
        }
 
        frame.scan_mode = HDMI_SCAN_MODE_NONE;
index cce0b1b..ed0fea2 100644 (file)
@@ -1935,7 +1935,7 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
        return parent_lct + 1;
 }
 
-static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
+static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
 {
        switch (pdt) {
        case DP_PEER_DEVICE_DP_LEGACY_CONV:
@@ -1965,13 +1965,13 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
 
        /* Teardown the old pdt, if there is one */
        if (port->pdt != DP_PEER_DEVICE_NONE) {
-               if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+               if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
                        /*
                         * If the new PDT would also have an i2c bus,
                         * don't bother with reregistering it
                         */
                        if (new_pdt != DP_PEER_DEVICE_NONE &&
-                           drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
+                           drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
                                port->pdt = new_pdt;
                                port->mcs = new_mcs;
                                return 0;
@@ -1991,7 +1991,7 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
        port->mcs = new_mcs;
 
        if (port->pdt != DP_PEER_DEVICE_NONE) {
-               if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+               if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
                        /* add i2c over sideband */
                        ret = drm_dp_mst_register_i2c_bus(&port->aux);
                } else {
@@ -2172,7 +2172,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
        }
 
        if (port->pdt != DP_PEER_DEVICE_NONE &&
-           drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+           drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
                port->cached_edid = drm_get_edid(port->connector,
                                                 &port->aux.ddc);
                drm_connector_set_tile_property(port->connector);
@@ -2302,14 +2302,18 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
                mutex_unlock(&mgr->lock);
        }
 
-       if (old_ddps != port->ddps) {
-               if (port->ddps) {
-                       if (!port->input) {
-                               drm_dp_send_enum_path_resources(mgr, mstb,
-                                                               port);
-                       }
+       /*
+        * Reprobe PBN caps on both hotplug, and when re-probing the link
+        * for our parent mstb
+        */
+       if (old_ddps != port->ddps || !created) {
+               if (port->ddps && !port->input) {
+                       ret = drm_dp_send_enum_path_resources(mgr, mstb,
+                                                             port);
+                       if (ret == 1)
+                               changed = true;
                } else {
-                       port->available_pbn = 0;
+                       port->full_pbn = 0;
                }
        }
 
@@ -2401,11 +2405,10 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
        port->ddps = conn_stat->displayport_device_plug_status;
 
        if (old_ddps != port->ddps) {
-               if (port->ddps) {
-                       dowork = true;
-               } else {
-                       port->available_pbn = 0;
-               }
+               if (port->ddps && !port->input)
+                       drm_dp_send_enum_path_resources(mgr, mstb, port);
+               else
+                       port->full_pbn = 0;
        }
 
        new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
@@ -2556,13 +2559,6 @@ static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mg
                if (port->input || !port->ddps)
                        continue;
 
-               if (!port->available_pbn) {
-                       drm_modeset_lock(&mgr->base.lock, NULL);
-                       drm_dp_send_enum_path_resources(mgr, mstb, port);
-                       drm_modeset_unlock(&mgr->base.lock);
-                       changed = true;
-               }
-
                if (port->mstb)
                        mstb_child = drm_dp_mst_topology_get_mstb_validated(
                            mgr, port->mstb);
@@ -2990,6 +2986,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
 
        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
        if (ret > 0) {
+               ret = 0;
                path_res = &txmsg->reply.u.path_resources;
 
                if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
@@ -3002,14 +2999,22 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
                                      path_res->port_number,
                                      path_res->full_payload_bw_number,
                                      path_res->avail_payload_bw_number);
-                       port->available_pbn =
-                               path_res->avail_payload_bw_number;
+
+                       /*
+                        * If something changed, make sure we send a
+                        * hotplug
+                        */
+                       if (port->full_pbn != path_res->full_payload_bw_number ||
+                           port->fec_capable != path_res->fec_capable)
+                               ret = 1;
+
+                       port->full_pbn = path_res->full_payload_bw_number;
                        port->fec_capable = path_res->fec_capable;
                }
        }
 
        kfree(txmsg);
-       return 0;
+       return ret;
 }
 
 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
@@ -3596,13 +3601,9 @@ drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
        /* The link address will need to be re-sent on resume */
        mstb->link_address_sent = false;
 
-       list_for_each_entry(port, &mstb->ports, next) {
-               /* The PBN for each port will also need to be re-probed */
-               port->available_pbn = 0;
-
+       list_for_each_entry(port, &mstb->ports, next)
                if (port->mstb)
                        drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
-       }
 }
 
 /**
@@ -4829,41 +4830,102 @@ static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
        return false;
 }
 
-static inline
-int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch,
-                                    struct drm_dp_mst_topology_state *mst_state)
+static int
+drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
+                                     struct drm_dp_mst_topology_state *state);
+
+static int
+drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
+                                     struct drm_dp_mst_topology_state *state)
 {
-       struct drm_dp_mst_port *port;
        struct drm_dp_vcpi_allocation *vcpi;
-       int pbn_limit = 0, pbn_used = 0;
+       struct drm_dp_mst_port *port;
+       int pbn_used = 0, ret;
+       bool found = false;
 
-       list_for_each_entry(port, &branch->ports, next) {
-               if (port->mstb)
-                       if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state))
-                               return -ENOSPC;
+       /* Check that we have at least one port in our state that's downstream
+        * of this branch, otherwise we can skip this branch
+        */
+       list_for_each_entry(vcpi, &state->vcpis, next) {
+               if (!vcpi->pbn ||
+                   !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
+                       continue;
 
-               if (port->available_pbn > 0)
-                       pbn_limit = port->available_pbn;
+               found = true;
+               break;
        }
-       DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n",
-                        branch, pbn_limit);
+       if (!found)
+               return 0;
 
-       list_for_each_entry(vcpi, &mst_state->vcpis, next) {
-               if (!vcpi->pbn)
-                       continue;
+       if (mstb->port_parent)
+               DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
+                                mstb->port_parent->parent, mstb->port_parent,
+                                mstb);
+       else
+               DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n",
+                                mstb);
+
+       list_for_each_entry(port, &mstb->ports, next) {
+               ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
+               if (ret < 0)
+                       return ret;
 
-               if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch))
-                       pbn_used += vcpi->pbn;
+               pbn_used += ret;
        }
-       DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n",
-                        branch, pbn_used);
 
-       if (pbn_used > pbn_limit) {
-               DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n",
-                                branch);
+       return pbn_used;
+}
+
+static int
+drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
+                                     struct drm_dp_mst_topology_state *state)
+{
+       struct drm_dp_vcpi_allocation *vcpi;
+       int pbn_used = 0;
+
+       if (port->pdt == DP_PEER_DEVICE_NONE)
+               return 0;
+
+       if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
+               bool found = false;
+
+               list_for_each_entry(vcpi, &state->vcpis, next) {
+                       if (vcpi->port != port)
+                               continue;
+                       if (!vcpi->pbn)
+                               return 0;
+
+                       found = true;
+                       break;
+               }
+               if (!found)
+                       return 0;
+
+               /* This should never happen, as it means we tried to
+                * set a mode before querying the full_pbn
+                */
+               if (WARN_ON(!port->full_pbn))
+                       return -EINVAL;
+
+               pbn_used = vcpi->pbn;
+       } else {
+               pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
+                                                                state);
+               if (pbn_used <= 0)
+                       return pbn_used;
+       }
+
+       if (pbn_used > port->full_pbn) {
+               DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
+                                port->parent, port, pbn_used,
+                                port->full_pbn);
                return -ENOSPC;
        }
-       return 0;
+
+       DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
+                        port->parent, port, pbn_used, port->full_pbn);
+
+       return pbn_used;
 }
 
 static inline int
@@ -5061,9 +5123,15 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
                ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
                if (ret)
                        break;
-               ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state);
-               if (ret)
+
+               mutex_lock(&mgr->lock);
+               ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
+                                                           mst_state);
+               mutex_unlock(&mgr->lock);
+               if (ret < 0)
                        break;
+               else
+                       ret = 0;
        }
 
        return ret;
index b481caf..825abe3 100644 (file)
@@ -542,10 +542,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
        }
 
        DRM_DEBUG_LEASE("Creating lease\n");
+       /* lessee will take the ownership of leases */
        lessee = drm_lease_create(lessor, &leases);
 
        if (IS_ERR(lessee)) {
                ret = PTR_ERR(lessee);
+               idr_destroy(&leases);
                goto out_leases;
        }
 
@@ -580,7 +582,6 @@ out_lessee:
 
 out_leases:
        put_unused_fd(fd);
-       idr_destroy(&leases);
 
        DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
        return ret;
index 86d9b0e..1de2cde 100644 (file)
@@ -967,7 +967,7 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
 
        index = 0;
        for_each_sg(sgt->sgl, sg, sgt->nents, count) {
-               len = sg->length;
+               len = sg_dma_len(sg);
                page = sg_page(sg);
                addr = sg_dma_address(sg);
 
index 8428ae1..1f79bc2 100644 (file)
@@ -55,6 +55,7 @@ static const char * const decon_clks_name[] = {
 struct decon_context {
        struct device                   *dev;
        struct drm_device               *drm_dev;
+       void                            *dma_priv;
        struct exynos_drm_crtc          *crtc;
        struct exynos_drm_plane         planes[WINDOWS_NR];
        struct exynos_drm_plane_config  configs[WINDOWS_NR];
@@ -644,7 +645,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
 
        decon_clear_channels(ctx->crtc);
 
-       return exynos_drm_register_dma(drm_dev, dev);
+       return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
 }
 
 static void decon_unbind(struct device *dev, struct device *master, void *data)
@@ -654,7 +655,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
        decon_atomic_disable(ctx->crtc);
 
        /* detach this sub driver from iommu mapping if supported. */
-       exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
+       exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
 }
 
 static const struct component_ops decon_component_ops = {
index ff59c64..1eed332 100644 (file)
@@ -40,6 +40,7 @@
 struct decon_context {
        struct device                   *dev;
        struct drm_device               *drm_dev;
+       void                            *dma_priv;
        struct exynos_drm_crtc          *crtc;
        struct exynos_drm_plane         planes[WINDOWS_NR];
        struct exynos_drm_plane_config  configs[WINDOWS_NR];
@@ -127,13 +128,13 @@ static int decon_ctx_initialize(struct decon_context *ctx,
 
        decon_clear_channels(ctx->crtc);
 
-       return exynos_drm_register_dma(drm_dev, ctx->dev);
+       return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
 }
 
 static void decon_ctx_remove(struct decon_context *ctx)
 {
        /* detach this sub driver from iommu mapping if supported. */
-       exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
+       exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
 }
 
 static u32 decon_calc_clkdiv(struct decon_context *ctx,
index 9ebc027..619f814 100644 (file)
@@ -58,7 +58,7 @@ static inline void clear_dma_max_seg_size(struct device *dev)
  * mapping.
  */
 static int drm_iommu_attach_device(struct drm_device *drm_dev,
-                               struct device *subdrv_dev)
+                               struct device *subdrv_dev, void **dma_priv)
 {
        struct exynos_drm_private *priv = drm_dev->dev_private;
        int ret;
@@ -74,7 +74,14 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
                return ret;
 
        if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
-               if (to_dma_iommu_mapping(subdrv_dev))
+               /*
+                * Keep the original DMA mapping of the sub-device and
+                * restore it on Exynos DRM detach, otherwise the DMA
+                * framework considers it as IOMMU-less during the next
+                * probe (in case of deferred probe or modular build)
+                */
+               *dma_priv = to_dma_iommu_mapping(subdrv_dev);
+               if (*dma_priv)
                        arm_iommu_detach_device(subdrv_dev);
 
                ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
@@ -98,19 +105,21 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
  * mapping
  */
 static void drm_iommu_detach_device(struct drm_device *drm_dev,
-                               struct device *subdrv_dev)
+                                   struct device *subdrv_dev, void **dma_priv)
 {
        struct exynos_drm_private *priv = drm_dev->dev_private;
 
-       if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+       if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
                arm_iommu_detach_device(subdrv_dev);
-       else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+               arm_iommu_attach_device(subdrv_dev, *dma_priv);
+       } else if (IS_ENABLED(CONFIG_IOMMU_DMA))
                iommu_detach_device(priv->mapping, subdrv_dev);
 
        clear_dma_max_seg_size(subdrv_dev);
 }
 
-int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
+                           void **dma_priv)
 {
        struct exynos_drm_private *priv = drm->dev_private;
 
@@ -137,13 +146,14 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
                priv->mapping = mapping;
        }
 
-       return drm_iommu_attach_device(drm, dev);
+       return drm_iommu_attach_device(drm, dev, dma_priv);
 }
 
-void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev)
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
+                              void **dma_priv)
 {
        if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
-               drm_iommu_detach_device(drm, dev);
+               drm_iommu_detach_device(drm, dev, dma_priv);
 }
 
 void exynos_drm_cleanup_dma(struct drm_device *drm)
index d4d21d8..6ae9056 100644 (file)
@@ -223,8 +223,10 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
        return priv->mapping ? true : false;
 }
 
-int exynos_drm_register_dma(struct drm_device *drm, struct device *dev);
-void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev);
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
+                           void **dma_priv);
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
+                              void **dma_priv);
 void exynos_drm_cleanup_dma(struct drm_device *drm);
 
 #ifdef CONFIG_DRM_EXYNOS_DPI
index 8ea2e1d..29ab8be 100644 (file)
@@ -97,6 +97,7 @@ struct fimc_scaler {
 struct fimc_context {
        struct exynos_drm_ipp ipp;
        struct drm_device *drm_dev;
+       void            *dma_priv;
        struct device   *dev;
        struct exynos_drm_ipp_task      *task;
        struct exynos_drm_ipp_formats   *formats;
@@ -1133,7 +1134,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
 
        ctx->drm_dev = drm_dev;
        ipp->drm_dev = drm_dev;
-       exynos_drm_register_dma(drm_dev, dev);
+       exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
 
        exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
                        DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1153,7 +1154,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
        struct exynos_drm_ipp *ipp = &ctx->ipp;
 
        exynos_drm_ipp_unregister(dev, ipp);
-       exynos_drm_unregister_dma(drm_dev, dev);
+       exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
 }
 
 static const struct component_ops fimc_component_ops = {
index 21aec38..bb67cad 100644 (file)
@@ -167,6 +167,7 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = {
 struct fimd_context {
        struct device                   *dev;
        struct drm_device               *drm_dev;
+       void                            *dma_priv;
        struct exynos_drm_crtc          *crtc;
        struct exynos_drm_plane         planes[WINDOWS_NR];
        struct exynos_drm_plane_config  configs[WINDOWS_NR];
@@ -1090,7 +1091,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
        if (is_drm_iommu_supported(drm_dev))
                fimd_clear_channels(ctx->crtc);
 
-       return exynos_drm_register_dma(drm_dev, dev);
+       return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
 }
 
 static void fimd_unbind(struct device *dev, struct device *master,
@@ -1100,7 +1101,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
 
        fimd_atomic_disable(ctx->crtc);
 
-       exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
+       exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
 
        if (ctx->encoder)
                exynos_dpi_remove(ctx->encoder);
index 2a3382d..fcee33a 100644 (file)
@@ -232,6 +232,7 @@ struct g2d_runqueue_node {
 
 struct g2d_data {
        struct device                   *dev;
+       void                            *dma_priv;
        struct clk                      *gate_clk;
        void __iomem                    *regs;
        int                             irq;
@@ -1409,7 +1410,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data)
                return ret;
        }
 
-       ret = exynos_drm_register_dma(drm_dev, dev);
+       ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv);
        if (ret < 0) {
                dev_err(dev, "failed to enable iommu.\n");
                g2d_fini_cmdlist(g2d);
@@ -1434,7 +1435,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data)
        priv->g2d_dev = NULL;
 
        cancel_work_sync(&g2d->runqueue_work);
-       exynos_drm_unregister_dma(g2d->drm_dev, dev);
+       exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv);
 }
 
 static const struct component_ops g2d_component_ops = {
index 88b6fca..45e9aee 100644 (file)
@@ -97,6 +97,7 @@ struct gsc_scaler {
 struct gsc_context {
        struct exynos_drm_ipp ipp;
        struct drm_device *drm_dev;
+       void            *dma_priv;
        struct device   *dev;
        struct exynos_drm_ipp_task      *task;
        struct exynos_drm_ipp_formats   *formats;
@@ -1169,7 +1170,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
 
        ctx->drm_dev = drm_dev;
        ctx->drm_dev = drm_dev;
-       exynos_drm_register_dma(drm_dev, dev);
+       exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
 
        exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
                        DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1189,7 +1190,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
        struct exynos_drm_ipp *ipp = &ctx->ipp;
 
        exynos_drm_ipp_unregister(dev, ipp);
-       exynos_drm_unregister_dma(drm_dev, dev);
+       exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
 }
 
 static const struct component_ops gsc_component_ops = {
index b984829..dafa87b 100644 (file)
@@ -56,6 +56,7 @@ struct rot_variant {
 struct rot_context {
        struct exynos_drm_ipp ipp;
        struct drm_device *drm_dev;
+       void            *dma_priv;
        struct device   *dev;
        void __iomem    *regs;
        struct clk      *clock;
@@ -243,7 +244,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
 
        rot->drm_dev = drm_dev;
        ipp->drm_dev = drm_dev;
-       exynos_drm_register_dma(drm_dev, dev);
+       exynos_drm_register_dma(drm_dev, dev, &rot->dma_priv);
 
        exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
                           DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
@@ -261,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master,
        struct exynos_drm_ipp *ipp = &rot->ipp;
 
        exynos_drm_ipp_unregister(dev, ipp);
-       exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
+       exynos_drm_unregister_dma(rot->drm_dev, rot->dev, &rot->dma_priv);
 }
 
 static const struct component_ops rotator_component_ops = {
index 497973e..93c43c8 100644 (file)
@@ -39,6 +39,7 @@ struct scaler_data {
 struct scaler_context {
        struct exynos_drm_ipp           ipp;
        struct drm_device               *drm_dev;
+       void                            *dma_priv;
        struct device                   *dev;
        void __iomem                    *regs;
        struct clk                      *clock[SCALER_MAX_CLK];
@@ -450,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
 
        scaler->drm_dev = drm_dev;
        ipp->drm_dev = drm_dev;
-       exynos_drm_register_dma(drm_dev, dev);
+       exynos_drm_register_dma(drm_dev, dev, &scaler->dma_priv);
 
        exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
                        DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -470,7 +471,8 @@ static void scaler_unbind(struct device *dev, struct device *master,
        struct exynos_drm_ipp *ipp = &scaler->ipp;
 
        exynos_drm_ipp_unregister(dev, ipp);
-       exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
+       exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev,
+                                 &scaler->dma_priv);
 }
 
 static const struct component_ops scaler_component_ops = {
index 38ae9c3..21b726b 100644 (file)
@@ -94,6 +94,7 @@ struct mixer_context {
        struct platform_device *pdev;
        struct device           *dev;
        struct drm_device       *drm_dev;
+       void                    *dma_priv;
        struct exynos_drm_crtc  *crtc;
        struct exynos_drm_plane planes[MIXER_WIN_NR];
        unsigned long           flags;
@@ -894,12 +895,14 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
                }
        }
 
-       return exynos_drm_register_dma(drm_dev, mixer_ctx->dev);
+       return exynos_drm_register_dma(drm_dev, mixer_ctx->dev,
+                                      &mixer_ctx->dma_priv);
 }
 
 static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
 {
-       exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev);
+       exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev,
+                                 &mixer_ctx->dma_priv);
 }
 
 static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
index 60c984e..7643a30 100644 (file)
@@ -423,7 +423,8 @@ eb_validate_vma(struct i915_execbuffer *eb,
        if (unlikely(entry->flags & eb->invalid_flags))
                return -EINVAL;
 
-       if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
+       if (unlikely(entry->alignment &&
+                    !is_power_of_2_u64(entry->alignment)))
                return -EINVAL;
 
        /*
index fe8a59a..31455ec 100644 (file)
@@ -1600,17 +1600,6 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
        spin_unlock(&old->breadcrumbs.irq_lock);
 }
 
-static struct i915_request *
-last_active(const struct intel_engine_execlists *execlists)
-{
-       struct i915_request * const *last = READ_ONCE(execlists->active);
-
-       while (*last && i915_request_completed(*last))
-               last++;
-
-       return *last;
-}
-
 #define for_each_waiter(p__, rq__) \
        list_for_each_entry_lockless(p__, \
                                     &(rq__)->sched.waiters_list, \
@@ -1679,11 +1668,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
        if (!intel_engine_has_timeslices(engine))
                return false;
 
-       if (list_is_last(&rq->sched.link, &engine->active.requests))
-               return false;
-
-       hint = max(rq_prio(list_next_entry(rq, sched.link)),
-                  engine->execlists.queue_priority_hint);
+       hint = engine->execlists.queue_priority_hint;
+       if (!list_is_last(&rq->sched.link, &engine->active.requests))
+               hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
 
        return hint >= effective_prio(rq);
 }
@@ -1725,16 +1712,26 @@ static void set_timeslice(struct intel_engine_cs *engine)
        set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
 }
 
+static void start_timeslice(struct intel_engine_cs *engine)
+{
+       struct intel_engine_execlists *execlists = &engine->execlists;
+
+       execlists->switch_priority_hint = execlists->queue_priority_hint;
+
+       if (timer_pending(&execlists->timer))
+               return;
+
+       set_timer_ms(&execlists->timer, timeslice(engine));
+}
+
 static void record_preemption(struct intel_engine_execlists *execlists)
 {
        (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
 }
 
-static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
+static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
+                                           const struct i915_request *rq)
 {
-       struct i915_request *rq;
-
-       rq = last_active(&engine->execlists);
        if (!rq)
                return 0;
 
@@ -1745,13 +1742,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
        return READ_ONCE(engine->props.preempt_timeout_ms);
 }
 
-static void set_preempt_timeout(struct intel_engine_cs *engine)
+static void set_preempt_timeout(struct intel_engine_cs *engine,
+                               const struct i915_request *rq)
 {
        if (!intel_engine_has_preempt_reset(engine))
                return;
 
        set_timer_ms(&engine->execlists.preempt,
-                    active_preempt_timeout(engine));
+                    active_preempt_timeout(engine, rq));
 }
 
 static inline void clear_ports(struct i915_request **ports, int count)
@@ -1764,6 +1762,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
        struct intel_engine_execlists * const execlists = &engine->execlists;
        struct i915_request **port = execlists->pending;
        struct i915_request ** const last_port = port + execlists->port_mask;
+       struct i915_request * const *active;
        struct i915_request *last;
        struct rb_node *rb;
        bool submit = false;
@@ -1818,7 +1817,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
         * i.e. we will retrigger preemption following the ack in case
         * of trouble.
         */
-       last = last_active(execlists);
+       active = READ_ONCE(execlists->active);
+       while ((last = *active) && i915_request_completed(last))
+               active++;
+
        if (last) {
                if (need_preempt(engine, last, rb)) {
                        ENGINE_TRACE(engine,
@@ -1888,11 +1890,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                 * Even if ELSP[1] is occupied and not worthy
                                 * of timeslices, our queue might be.
                                 */
-                               if (!execlists->timer.expires &&
-                                   need_timeslice(engine, last))
-                                       set_timer_ms(&execlists->timer,
-                                                    timeslice(engine));
-
+                               start_timeslice(engine);
                                return;
                        }
                }
@@ -1927,7 +1925,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
                        if (last && !can_merge_rq(last, rq)) {
                                spin_unlock(&ve->base.active.lock);
-                               return; /* leave this for another */
+                               start_timeslice(engine);
+                               return; /* leave this for another sibling */
                        }
 
                        ENGINE_TRACE(engine,
@@ -2103,7 +2102,7 @@ done:
                 * Skip if we ended up with exactly the same set of requests,
                 * e.g. trying to timeslice a pair of ordered contexts
                 */
-               if (!memcmp(execlists->active, execlists->pending,
+               if (!memcmp(active, execlists->pending,
                            (port - execlists->pending + 1) * sizeof(*port))) {
                        do
                                execlists_schedule_out(fetch_and_zero(port));
@@ -2114,7 +2113,7 @@ done:
                clear_ports(port + 1, last_port - port);
 
                execlists_submit_ports(engine);
-               set_preempt_timeout(engine);
+               set_preempt_timeout(engine, *active);
        } else {
 skip_submit:
                ring_set_paused(engine, 0);
@@ -4001,26 +4000,6 @@ static int gen12_emit_flush_render(struct i915_request *request,
 
                *cs++ = preparser_disable(false);
                intel_ring_advance(request, cs);
-
-               /*
-                * Wa_1604544889:tgl
-                */
-               if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) {
-                       flags = 0;
-                       flags |= PIPE_CONTROL_CS_STALL;
-                       flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
-
-                       flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-                       flags |= PIPE_CONTROL_QW_WRITE;
-
-                       cs = intel_ring_begin(request, 6);
-                       if (IS_ERR(cs))
-                               return PTR_ERR(cs);
-
-                       cs = gen8_emit_pipe_control(cs, flags,
-                                                   LRC_PPHWSP_SCRATCH_ADDR);
-                       intel_ring_advance(request, cs);
-               }
        }
 
        return 0;
index 8771652..d8d9f11 100644 (file)
@@ -192,11 +192,15 @@ static void cacheline_release(struct intel_timeline_cacheline *cl)
 
 static void cacheline_free(struct intel_timeline_cacheline *cl)
 {
+       if (!i915_active_acquire_if_busy(&cl->active)) {
+               __idle_cacheline_free(cl);
+               return;
+       }
+
        GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
        cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
 
-       if (i915_active_is_idle(&cl->active))
-               __idle_cacheline_free(cl);
+       i915_active_release(&cl->active);
 }
 
 int intel_timeline_init(struct intel_timeline *timeline,
index 173a7f2..6c2f846 100644 (file)
@@ -1529,15 +1529,34 @@ err_obj:
        return ERR_PTR(err);
 }
 
+static const struct {
+       u32 start;
+       u32 end;
+} mcr_ranges_gen8[] = {
+       { .start = 0x5500, .end = 0x55ff },
+       { .start = 0x7000, .end = 0x7fff },
+       { .start = 0x9400, .end = 0x97ff },
+       { .start = 0xb000, .end = 0xb3ff },
+       { .start = 0xe000, .end = 0xe7ff },
+       {},
+};
+
 static bool mcr_range(struct drm_i915_private *i915, u32 offset)
 {
+       int i;
+
+       if (INTEL_GEN(i915) < 8)
+               return false;
+
        /*
-        * Registers in this range are affected by the MCR selector
+        * Registers in these ranges are affected by the MCR selector
         * which only controls CPU initiated MMIO. Routing does not
         * work for CS access so we cannot verify them on this path.
         */
-       if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff))
-               return true;
+       for (i = 0; mcr_ranges_gen8[i].start; i++)
+               if (offset >= mcr_ranges_gen8[i].start &&
+                   offset <= mcr_ranges_gen8[i].end)
+                       return true;
 
        return false;
 }
index e1c313d..a62bdf9 100644 (file)
@@ -457,7 +457,8 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 
        /* TODO: add more platforms support */
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+               IS_COFFEELAKE(dev_priv)) {
                if (connected) {
                        vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
                                SFUSE_STRAP_DDID_DETECTED;
index 867e762..33569b9 100644 (file)
@@ -147,15 +147,14 @@ static void virt_vbt_generation(struct vbt *v)
        /* there's features depending on version! */
        v->header.version = 155;
        v->header.header_size = sizeof(v->header);
-       v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header);
+       v->header.vbt_size = sizeof(struct vbt);
        v->header.bdb_offset = offsetof(struct vbt, bdb_header);
 
        strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
        v->bdb_header.version = 186; /* child_dev_size = 33 */
        v->bdb_header.header_size = sizeof(v->bdb_header);
 
-       v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
-               - sizeof(struct bdb_header);
+       v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header);
 
        /* general features */
        v->general_features_header.id = BDB_GENERAL_FEATURES;
index 487af6e..345c2aa 100644 (file)
@@ -272,10 +272,17 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
 {
        struct intel_gvt *gvt = vgpu->gvt;
 
-       mutex_lock(&vgpu->vgpu_lock);
-
        WARN(vgpu->active, "vGPU is still active!\n");
 
+       /*
+        * remove idr first so later clean can judge if need to stop
+        * service if no active vgpu.
+        */
+       mutex_lock(&gvt->lock);
+       idr_remove(&gvt->vgpu_idr, vgpu->id);
+       mutex_unlock(&gvt->lock);
+
+       mutex_lock(&vgpu->vgpu_lock);
        intel_gvt_debugfs_remove_vgpu(vgpu);
        intel_vgpu_clean_sched_policy(vgpu);
        intel_vgpu_clean_submission(vgpu);
@@ -290,7 +297,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
        mutex_unlock(&vgpu->vgpu_lock);
 
        mutex_lock(&gvt->lock);
-       idr_remove(&gvt->vgpu_idr, vgpu->id);
        if (idr_is_empty(&gvt->vgpu_idr))
                intel_gvt_clean_irq(gvt);
        intel_gvt_update_vgpu_types(gvt);
index dcaa85a..a18b2a2 100644 (file)
@@ -527,19 +527,31 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
        return NOTIFY_DONE;
 }
 
+static void irq_semaphore_cb(struct irq_work *wrk)
+{
+       struct i915_request *rq =
+               container_of(wrk, typeof(*rq), semaphore_work);
+
+       i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE);
+       i915_request_put(rq);
+}
+
 static int __i915_sw_fence_call
 semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 {
-       struct i915_request *request =
-               container_of(fence, typeof(*request), semaphore);
+       struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
 
        switch (state) {
        case FENCE_COMPLETE:
-               i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
+               if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
+                       i915_request_get(rq);
+                       init_irq_work(&rq->semaphore_work, irq_semaphore_cb);
+                       irq_work_queue(&rq->semaphore_work);
+               }
                break;
 
        case FENCE_FREE:
-               i915_request_put(request);
+               i915_request_put(rq);
                break;
        }
 
@@ -776,8 +788,8 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
        struct dma_fence *fence;
        int err;
 
-       GEM_BUG_ON(i915_request_timeline(rq) ==
-                  rcu_access_pointer(signal->timeline));
+       if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
+               return 0;
 
        if (i915_request_started(signal))
                return 0;
@@ -821,7 +833,7 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
                return 0;
 
        err = 0;
-       if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
+       if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
                err = i915_sw_fence_await_dma_fence(&rq->submit,
                                                    fence, 0,
                                                    I915_FENCE_GFP);
@@ -1318,9 +1330,9 @@ void __i915_request_queue(struct i915_request *rq,
         * decide whether to preempt the entire chain so that it is ready to
         * run at the earliest possible convenience.
         */
-       i915_sw_fence_commit(&rq->semaphore);
        if (attr && rq->engine->schedule)
                rq->engine->schedule(rq, attr);
+       i915_sw_fence_commit(&rq->semaphore);
        i915_sw_fence_commit(&rq->submit);
 }
 
index f57eadc..fccc339 100644 (file)
@@ -26,6 +26,7 @@
 #define I915_REQUEST_H
 
 #include <linux/dma-fence.h>
+#include <linux/irq_work.h>
 #include <linux/lockdep.h>
 
 #include "gem/i915_gem_context_types.h"
@@ -208,6 +209,7 @@ struct i915_request {
        };
        struct list_head execute_cb;
        struct i915_sw_fence semaphore;
+       struct irq_work semaphore_work;
 
        /*
         * A list of everyone we wait upon, and everyone who waits upon us.
index b0ade76..d34141f 100644 (file)
@@ -234,6 +234,11 @@ static inline u64 ptr_to_u64(const void *ptr)
        __idx;                                                          \
 })
 
+static inline bool is_power_of_2_u64(u64 n)
+{
+       return (n != 0 && ((n & (n - 1)) == 0));
+}
+
 static inline void __list_del_many(struct list_head *head,
                                   struct list_head *first)
 {
index 3b92311..b3380ff 100644 (file)
@@ -528,7 +528,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 
        r = -ENOMEM;
        nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
-       if (nents != ttm->sg->nents)
+       if (nents == 0)
                goto release_sg;
 
        drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
index 71ce621..60c4c6a 100644 (file)
@@ -661,7 +661,9 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
 
        trace_drm_sched_process_job(s_fence);
 
+       dma_fence_get(&s_fence->finished);
        drm_sched_fence_finished(s_fence);
+       dma_fence_put(&s_fence->finished);
        wake_up_interruptible(&sched->wake_up_worker);
 }
 
index 2aa4ed1..85a054f 100644 (file)
@@ -533,6 +533,8 @@ static const struct hid_device_id hammer_devices[] = {
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MOONBALL) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WAND) },
index 3a400ce..9f22134 100644 (file)
 #define USB_DEVICE_ID_GOOGLE_WHISKERS  0x5030
 #define USB_DEVICE_ID_GOOGLE_MASTERBALL        0x503c
 #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
+#define USB_DEVICE_ID_GOOGLE_MOONBALL  0x5044
 
 #define USB_VENDOR_ID_GOTOP            0x08f2
 #define USB_DEVICE_ID_SUPER_Q2         0x007f
 #define USB_DEVICE_ID_LENOVO_X1_COVER  0x6085
 #define USB_DEVICE_ID_LENOVO_X1_TAB    0x60a3
 #define USB_DEVICE_ID_LENOVO_X1_TAB3   0x60b5
+#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D     0x608d
 
 #define USB_VENDOR_ID_LG               0x1fd2
 #define USB_DEVICE_ID_LG_MULTITOUCH    0x0064
index a549c42..33c102a 100644 (file)
@@ -458,9 +458,9 @@ static ssize_t picolcd_fb_update_rate_show(struct device *dev,
                if (ret >= PAGE_SIZE)
                        break;
                else if (i == fb_update_rate)
-                       ret += snprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i);
+                       ret += scnprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i);
                else
-                       ret += snprintf(buf+ret, PAGE_SIZE-ret, "%u ", i);
+                       ret += scnprintf(buf+ret, PAGE_SIZE-ret, "%u ", i);
        if (ret > 0)
                buf[min(ret, (size_t)PAGE_SIZE)-1] = '\n';
        return ret;
index 0e7b2d9..3735546 100644 (file)
@@ -103,6 +103,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS), HID_QUIRK_NOGET },
index fb827c2..4d25577 100644 (file)
@@ -313,7 +313,7 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
 
                        while (i < ret) {
                                if (i + attribute->size > ret) {
-                                       len += snprintf(&buf[len],
+                                       len += scnprintf(&buf[len],
                                                        PAGE_SIZE - len,
                                                        "%d ", values[i]);
                                        break;
@@ -336,10 +336,10 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
                                        ++i;
                                        break;
                                }
-                               len += snprintf(&buf[len], PAGE_SIZE - len,
+                               len += scnprintf(&buf[len], PAGE_SIZE - len,
                                                "%lld ", value);
                        }
-                       len += snprintf(&buf[len], PAGE_SIZE - len, "\n");
+                       len += scnprintf(&buf[len], PAGE_SIZE - len, "\n");
 
                        return len;
                } else if (input)
index 8e48c74..255f8f4 100644 (file)
@@ -718,9 +718,6 @@ static int msc_win_set_lockout(struct msc_window *win,
 
        if (old != expect) {
                ret = -EINVAL;
-               dev_warn_ratelimited(msc_dev(win->msc),
-                                    "expected lockout state %d, got %d\n",
-                                    expect, old);
                goto unlock;
        }
 
@@ -741,6 +738,10 @@ unlock:
                /* from intel_th_msc_window_unlock(), don't warn if not locked */
                if (expect == WIN_LOCKED && old == new)
                        return 0;
+
+               dev_warn_ratelimited(msc_dev(win->msc),
+                                    "expected lockout state %d, got %d\n",
+                                    expect, old);
        }
 
        return ret;
@@ -760,7 +761,7 @@ static int msc_configure(struct msc *msc)
        lockdep_assert_held(&msc->buf_mutex);
 
        if (msc->mode > MSC_MODE_MULTI)
-               return -ENOTSUPP;
+               return -EINVAL;
 
        if (msc->mode == MSC_MODE_MULTI) {
                if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE))
@@ -1294,7 +1295,7 @@ static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
        } else if (msc->mode == MSC_MODE_MULTI) {
                ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
        } else {
-               ret = -ENOTSUPP;
+               ret = -EINVAL;
        }
 
        if (!ret) {
@@ -1530,7 +1531,7 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
                if (ret >= 0)
                        *ppos = iter->offset;
        } else {
-               ret = -ENOTSUPP;
+               ret = -EINVAL;
        }
 
 put_count:
index e9d90b5..86aa6a4 100644 (file)
@@ -235,6 +235,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
        {
+               /* Elkhart Lake CPU */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4529),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
+       {
                /* Elkhart Lake */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
index b178a54..360b5c0 100644 (file)
@@ -238,7 +238,7 @@ static struct configfs_attribute *sys_t_policy_attrs[] = {
 static inline bool sys_t_need_ts(struct sys_t_output *op)
 {
        if (op->node.ts_interval &&
-           time_after(op->ts_jiffies + op->node.ts_interval, jiffies)) {
+           time_after(jiffies, op->ts_jiffies + op->node.ts_interval)) {
                op->ts_jiffies = jiffies;
 
                return true;
@@ -250,8 +250,8 @@ static inline bool sys_t_need_ts(struct sys_t_output *op)
 static bool sys_t_need_clock_sync(struct sys_t_output *op)
 {
        if (op->node.clocksync_interval &&
-           time_after(op->clocksync_jiffies + op->node.clocksync_interval,
-                      jiffies)) {
+           time_after(jiffies,
+                      op->clocksync_jiffies + op->node.clocksync_interval)) {
                op->clocksync_jiffies = jiffies;
 
                return true;
index 050adda..05b35ac 100644 (file)
@@ -313,6 +313,7 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
        pm_runtime_get_noresume(&pdev->dev);
 
        i2c_del_adapter(&dev->adapter);
+       devm_free_irq(&pdev->dev, dev->irq, dev);
        pci_free_irq_vectors(pdev);
 }
 
index 3a9e840..a4a6825 100644 (file)
@@ -348,7 +348,7 @@ static struct gpio_desc *i2c_gpio_get_desc(struct device *dev,
        if (ret == -ENOENT)
                retdesc = ERR_PTR(-EPROBE_DEFER);
 
-       if (ret != -EPROBE_DEFER)
+       if (PTR_ERR(retdesc) != -EPROBE_DEFER)
                dev_err(dev, "error trying to get descriptor: %d\n", ret);
 
        return retdesc;
index 8497c7a..224f830 100644 (file)
@@ -477,6 +477,7 @@ static int hix5hd2_i2c_remove(struct platform_device *pdev)
        i2c_del_adapter(&priv->adap);
        pm_runtime_disable(priv->dev);
        pm_runtime_set_suspended(priv->dev);
+       clk_disable_unprepare(priv->clk);
 
        return 0;
 }
index ca4f096..a9c03f5 100644 (file)
 #define TCOBASE                0x050
 #define TCOCTL         0x054
 
-#define ACPIBASE               0x040
-#define ACPIBASE_SMI_OFF       0x030
-#define ACPICTRL               0x044
-#define ACPICTRL_EN            0x080
-
 #define SBREG_BAR              0x10
 #define SBREG_SMBCTRL          0xc6000c
 #define SBREG_SMBCTRL_DNV      0xcf000c
@@ -1553,7 +1548,7 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
                pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, hidden);
        spin_unlock(&p2sb_spinlock);
 
-       res = &tco_res[ICH_RES_MEM_OFF];
+       res = &tco_res[1];
        if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
                res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
        else
@@ -1563,7 +1558,7 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
        res->flags = IORESOURCE_MEM;
 
        return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
-                                       tco_res, 3, &spt_tco_platform_data,
+                                       tco_res, 2, &spt_tco_platform_data,
                                        sizeof(spt_tco_platform_data));
 }
 
@@ -1576,17 +1571,16 @@ static struct platform_device *
 i801_add_tco_cnl(struct i801_priv *priv, struct pci_dev *pci_dev,
                 struct resource *tco_res)
 {
-       return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
-                                       tco_res, 2, &cnl_tco_platform_data,
-                                       sizeof(cnl_tco_platform_data));
+       return platform_device_register_resndata(&pci_dev->dev,
+                       "iTCO_wdt", -1, tco_res, 1, &cnl_tco_platform_data,
+                       sizeof(cnl_tco_platform_data));
 }
 
 static void i801_add_tco(struct i801_priv *priv)
 {
-       u32 base_addr, tco_base, tco_ctl, ctrl_val;
        struct pci_dev *pci_dev = priv->pci_dev;
-       struct resource tco_res[3], *res;
-       unsigned int devfn;
+       struct resource tco_res[2], *res;
+       u32 tco_base, tco_ctl;
 
        /* If we have ACPI based watchdog use that instead */
        if (acpi_has_watchdog())
@@ -1601,30 +1595,15 @@ static void i801_add_tco(struct i801_priv *priv)
                return;
 
        memset(tco_res, 0, sizeof(tco_res));
-
-       res = &tco_res[ICH_RES_IO_TCO];
-       res->start = tco_base & ~1;
-       res->end = res->start + 32 - 1;
-       res->flags = IORESOURCE_IO;
-
        /*
-        * Power Management registers.
+        * Always populate the main iTCO IO resource here. The second entry
+        * for NO_REBOOT MMIO is filled by the SPT specific function.
         */
-       devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 2);
-       pci_bus_read_config_dword(pci_dev->bus, devfn, ACPIBASE, &base_addr);
-
-       res = &tco_res[ICH_RES_IO_SMI];
-       res->start = (base_addr & ~1) + ACPIBASE_SMI_OFF;
-       res->end = res->start + 3;
+       res = &tco_res[0];
+       res->start = tco_base & ~1;
+       res->end = res->start + 32 - 1;
        res->flags = IORESOURCE_IO;
 
-       /*
-        * Enable the ACPI I/O space.
-        */
-       pci_bus_read_config_dword(pci_dev->bus, devfn, ACPICTRL, &ctrl_val);
-       ctrl_val |= ACPICTRL_EN;
-       pci_bus_write_config_dword(pci_dev->bus, devfn, ACPICTRL, ctrl_val);
-
        if (priv->features & FEATURE_TCO_CNL)
                priv->tco_pdev = i801_add_tco_cnl(priv, pci_dev, tco_res);
        else
index 62e18b4..f5d25ce 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/delay.h>
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
+#include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
@@ -75,20 +76,15 @@ static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd)
 
 static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd)
 {
-       unsigned long target = jiffies + msecs_to_jiffies(1000);
        u32 val;
+       int ret;
 
-       do {
-               val = readl(i2cd->regs + I2C_MST_CNTL);
-               if (!(val & I2C_MST_CNTL_CYCLE_TRIGGER))
-                       break;
-               if ((val & I2C_MST_CNTL_STATUS) !=
-                               I2C_MST_CNTL_STATUS_BUS_BUSY)
-                       break;
-               usleep_range(500, 600);
-       } while (time_is_after_jiffies(target));
-
-       if (time_is_before_jiffies(target)) {
+       ret = readl_poll_timeout(i2cd->regs + I2C_MST_CNTL, val,
+                                !(val & I2C_MST_CNTL_CYCLE_TRIGGER) ||
+                                (val & I2C_MST_CNTL_STATUS) != I2C_MST_CNTL_STATUS_BUS_BUSY,
+                                500, 1000 * USEC_PER_MSEC);
+
+       if (ret) {
                dev_err(i2cd->dev, "i2c timeout error %x\n", val);
                return -ETIMEDOUT;
        }
index a7a8184..635dd69 100644 (file)
@@ -140,7 +140,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev)
        int ret = 0;
        int irq;
 
-       irq = platform_get_irq(pdev, 0);
+       irq = platform_get_irq_optional(pdev, 0);
        /* If irq is 0, we do polling. */
        if (irq < 0)
                irq = 0;
index 54e1fc8..f7f7b5b 100644 (file)
@@ -434,6 +434,7 @@ static void st_i2c_wr_fill_tx_fifo(struct st_i2c_dev *i2c_dev)
 /**
  * st_i2c_rd_fill_tx_fifo() - Fill the Tx FIFO in read mode
  * @i2c_dev: Controller's private data
+ * @max: Maximum amount of data to fill into the Tx FIFO
  *
  * This functions fills the Tx FIFO with fixed pattern when
  * in read mode to trigger clock.
index 8f3dbc9..8b0ff78 100644 (file)
@@ -394,9 +394,17 @@ EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle);
 static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
 {
        struct device *dev;
+       struct i2c_client *client;
 
        dev = bus_find_device_by_acpi_dev(&i2c_bus_type, adev);
-       return dev ? i2c_verify_client(dev) : NULL;
+       if (!dev)
+               return NULL;
+
+       client = i2c_verify_client(dev);
+       if (!client)
+               put_device(dev);
+
+       return client;
 }
 
 static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
index 9e2e140..bb8e60d 100644 (file)
@@ -213,40 +213,34 @@ i3c_device_match_id(struct i3c_device *i3cdev,
 {
        struct i3c_device_info devinfo;
        const struct i3c_device_id *id;
+       u16 manuf, part, ext_info;
+       bool rndpid;
 
        i3c_device_get_info(i3cdev, &devinfo);
 
-       /*
-        * The lower 32bits of the provisional ID is just filled with a random
-        * value, try to match using DCR info.
-        */
-       if (!I3C_PID_RND_LOWER_32BITS(devinfo.pid)) {
-               u16 manuf = I3C_PID_MANUF_ID(devinfo.pid);
-               u16 part = I3C_PID_PART_ID(devinfo.pid);
-               u16 ext_info = I3C_PID_EXTRA_INFO(devinfo.pid);
-
-               /* First try to match by manufacturer/part ID. */
-               for (id = id_table; id->match_flags != 0; id++) {
-                       if ((id->match_flags & I3C_MATCH_MANUF_AND_PART) !=
-                           I3C_MATCH_MANUF_AND_PART)
-                               continue;
-
-                       if (manuf != id->manuf_id || part != id->part_id)
-                               continue;
-
-                       if ((id->match_flags & I3C_MATCH_EXTRA_INFO) &&
-                           ext_info != id->extra_info)
-                               continue;
-
-                       return id;
-               }
-       }
+       manuf = I3C_PID_MANUF_ID(devinfo.pid);
+       part = I3C_PID_PART_ID(devinfo.pid);
+       ext_info = I3C_PID_EXTRA_INFO(devinfo.pid);
+       rndpid = I3C_PID_RND_LOWER_32BITS(devinfo.pid);
 
-       /* Fallback to DCR match. */
        for (id = id_table; id->match_flags != 0; id++) {
                if ((id->match_flags & I3C_MATCH_DCR) &&
-                   id->dcr == devinfo.dcr)
-                       return id;
+                   id->dcr != devinfo.dcr)
+                       continue;
+
+               if ((id->match_flags & I3C_MATCH_MANUF) &&
+                   id->manuf_id != manuf)
+                       continue;
+
+               if ((id->match_flags & I3C_MATCH_PART) &&
+                   (rndpid || id->part_id != part))
+                       continue;
+
+               if ((id->match_flags & I3C_MATCH_EXTRA_INFO) &&
+                   (rndpid || id->extra_info != ext_info))
+                       continue;
+
+               return id;
        }
 
        return NULL;
index 7f8f896..d79cd6d 100644 (file)
@@ -241,12 +241,34 @@ out:
 }
 static DEVICE_ATTR_RO(hdrcap);
 
+static ssize_t modalias_show(struct device *dev,
+                            struct device_attribute *da, char *buf)
+{
+       struct i3c_device *i3c = dev_to_i3cdev(dev);
+       struct i3c_device_info devinfo;
+       u16 manuf, part, ext;
+
+       i3c_device_get_info(i3c, &devinfo);
+       manuf = I3C_PID_MANUF_ID(devinfo.pid);
+       part = I3C_PID_PART_ID(devinfo.pid);
+       ext = I3C_PID_EXTRA_INFO(devinfo.pid);
+
+       if (I3C_PID_RND_LOWER_32BITS(devinfo.pid))
+               return sprintf(buf, "i3c:dcr%02Xmanuf%04X", devinfo.dcr,
+                              manuf);
+
+       return sprintf(buf, "i3c:dcr%02Xmanuf%04Xpart%04Xext%04X",
+                      devinfo.dcr, manuf, part, ext);
+}
+static DEVICE_ATTR_RO(modalias);
+
 static struct attribute *i3c_device_attrs[] = {
        &dev_attr_bcr.attr,
        &dev_attr_dcr.attr,
        &dev_attr_pid.attr,
        &dev_attr_dynamic_address.attr,
        &dev_attr_hdrcap.attr,
+       &dev_attr_modalias.attr,
        NULL,
 };
 ATTRIBUTE_GROUPS(i3c_device);
@@ -267,7 +289,7 @@ static int i3c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
                                      devinfo.dcr, manuf);
 
        return add_uevent_var(env,
-                             "MODALIAS=i3c:dcr%02Xmanuf%04Xpart%04xext%04x",
+                             "MODALIAS=i3c:dcr%02Xmanuf%04Xpart%04Xext%04X",
                              devinfo.dcr, manuf, part, ext);
 }
 
@@ -1953,7 +1975,7 @@ of_i3c_master_add_i2c_boardinfo(struct i3c_master_controller *master,
         * DEFSLVS command.
         */
        if (boardinfo->base.flags & I2C_CLIENT_TEN) {
-               dev_err(&master->dev, "I2C device with 10 bit address not supported.");
+               dev_err(dev, "I2C device with 10 bit address not supported.");
                return -ENOTSUPP;
        }
 
@@ -2138,7 +2160,7 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
         * correctly even if one or more i2c devices are not registered.
         */
        i3c_bus_for_each_i2cdev(&master->bus, i2cdev)
-               i2cdev->dev = i2c_new_device(adap, &i2cdev->boardinfo->base);
+               i2cdev->dev = i2c_new_client_device(adap, &i2cdev->boardinfo->base);
 
        return 0;
 }
index bd26c3b..5c5306c 100644 (file)
@@ -221,7 +221,7 @@ struct dw_i3c_xfer {
        struct completion comp;
        int ret;
        unsigned int ncmds;
-       struct dw_i3c_cmd cmds[0];
+       struct dw_i3c_cmd cmds[];
 };
 
 struct dw_i3c_master {
index 5471279..3fee8bd 100644 (file)
@@ -388,7 +388,7 @@ struct cdns_i3c_xfer {
        struct completion comp;
        int ret;
        unsigned int ncmds;
-       struct cdns_i3c_cmd cmds[0];
+       struct cdns_i3c_cmd cmds[];
 };
 
 struct cdns_i3c_data {
index 67b8817..60daf04 100644 (file)
@@ -237,6 +237,7 @@ static const struct adxl372_axis_lookup adxl372_axis_lookup_table[] = {
                .realbits = 12,                                         \
                .storagebits = 16,                                      \
                .shift = 4,                                             \
+               .endianness = IIO_BE,                                   \
        },                                                              \
 }
 
index 633955d..849cf74 100644 (file)
@@ -110,7 +110,7 @@ MODULE_DEVICE_TABLE(of, st_accel_of_match);
 
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id st_accel_acpi_match[] = {
-       {"SMO8840", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
+       {"SMO8840", (kernel_ulong_t)LIS2DH12_ACCEL_DEV_NAME},
        {"SMO8A90", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
        { },
 };
index a5c7771..9d96f7d 100644 (file)
@@ -723,6 +723,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
 
        for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
                struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit);
+               u32 cor;
 
                if (!chan)
                        continue;
@@ -732,6 +733,20 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
                        continue;
 
                if (state) {
+                       cor = at91_adc_readl(st, AT91_SAMA5D2_COR);
+
+                       if (chan->differential)
+                               cor |= (BIT(chan->channel) |
+                                       BIT(chan->channel2)) <<
+                                       AT91_SAMA5D2_COR_DIFF_OFFSET;
+                       else
+                               cor &= ~(BIT(chan->channel) <<
+                                      AT91_SAMA5D2_COR_DIFF_OFFSET);
+
+                       at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
+               }
+
+               if (state) {
                        at91_adc_writel(st, AT91_SAMA5D2_CHER,
                                        BIT(chan->channel));
                        /* enable irq only if not using DMA */
index 2aad2cd..76a60d9 100644 (file)
@@ -842,31 +842,6 @@ static inline void stm32_dfsdm_process_data(struct stm32_dfsdm_adc *adc,
        }
 }
 
-static irqreturn_t stm32_dfsdm_adc_trigger_handler(int irq, void *p)
-{
-       struct iio_poll_func *pf = p;
-       struct iio_dev *indio_dev = pf->indio_dev;
-       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
-       int available = stm32_dfsdm_adc_dma_residue(adc);
-
-       while (available >= indio_dev->scan_bytes) {
-               s32 *buffer = (s32 *)&adc->rx_buf[adc->bufi];
-
-               stm32_dfsdm_process_data(adc, buffer);
-
-               iio_push_to_buffers_with_timestamp(indio_dev, buffer,
-                                                  pf->timestamp);
-               available -= indio_dev->scan_bytes;
-               adc->bufi += indio_dev->scan_bytes;
-               if (adc->bufi >= adc->buf_sz)
-                       adc->bufi = 0;
-       }
-
-       iio_trigger_notify_done(indio_dev->trig);
-
-       return IRQ_HANDLED;
-}
-
 static void stm32_dfsdm_dma_buffer_done(void *data)
 {
        struct iio_dev *indio_dev = data;
@@ -874,11 +849,6 @@ static void stm32_dfsdm_dma_buffer_done(void *data)
        int available = stm32_dfsdm_adc_dma_residue(adc);
        size_t old_pos;
 
-       if (indio_dev->currentmode & INDIO_BUFFER_TRIGGERED) {
-               iio_trigger_poll_chained(indio_dev->trig);
-               return;
-       }
-
        /*
         * FIXME: In Kernel interface does not support cyclic DMA buffer,and
         * offers only an interface to push data samples per samples.
@@ -906,7 +876,15 @@ static void stm32_dfsdm_dma_buffer_done(void *data)
                        adc->bufi = 0;
                        old_pos = 0;
                }
-               /* regular iio buffer without trigger */
+               /*
+                * In DMA mode the trigger services of IIO are not used
+                * (e.g. no call to iio_trigger_poll).
+                * Calling irq handler associated to the hardware trigger is not
+                * relevant as the conversions have already been done. Data
+                * transfers are performed directly in DMA callback instead.
+                * This implementation avoids to call trigger irq handler that
+                * may sleep, in an atomic context (DMA irq handler context).
+                */
                if (adc->dev_data->type == DFSDM_IIO)
                        iio_push_to_buffers(indio_dev, buffer);
        }
@@ -1536,8 +1514,7 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
        }
 
        ret = iio_triggered_buffer_setup(indio_dev,
-                                        &iio_pollfunc_store_time,
-                                        &stm32_dfsdm_adc_trigger_handler,
+                                        &iio_pollfunc_store_time, NULL,
                                         &stm32_dfsdm_buffer_setup_ops);
        if (ret) {
                stm32_dfsdm_dma_release(indio_dev);
index 0b91de4..a7e65a5 100644 (file)
@@ -91,6 +91,8 @@ config SPS30
        tristate "SPS30 particulate matter sensor"
        depends on I2C
        select CRC8
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
        help
          Say Y here to build support for the Sensirion SPS30 particulate
          matter sensor.
index b0e241a..e5b00a6 100644 (file)
@@ -167,16 +167,17 @@ static int vcnl4200_init(struct vcnl4000_data *data)
        data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
        switch (id) {
        case VCNL4200_PROD_ID:
-               /* Integration time is 50ms, but the experiments */
-               /* show 54ms in total. */
-               data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000);
-               data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000);
+               /* Default wait time is 50ms, add 20% tolerance. */
+               data->vcnl4200_al.sampling_rate = ktime_set(0, 60000 * 1000);
+               /* Default wait time is 4.8ms, add 20% tolerance. */
+               data->vcnl4200_ps.sampling_rate = ktime_set(0, 5760 * 1000);
                data->al_scale = 24000;
                break;
        case VCNL4040_PROD_ID:
-               /* Integration time is 80ms, add 10ms. */
-               data->vcnl4200_al.sampling_rate = ktime_set(0, 100000 * 1000);
-               data->vcnl4200_ps.sampling_rate = ktime_set(0, 100000 * 1000);
+               /* Default wait time is 80ms, add 20% tolerance. */
+               data->vcnl4200_al.sampling_rate = ktime_set(0, 96000 * 1000);
+               /* Default wait time is 5ms, add 20% tolerance. */
+               data->vcnl4200_ps.sampling_rate = ktime_set(0, 6000 * 1000);
                data->al_scale = 120000;
                break;
        }
index fc7e910..d329967 100644 (file)
@@ -564,7 +564,7 @@ static int ak8974_read_raw(struct iio_dev *indio_dev,
                 * We read all axes and discard all but one, for optimized
                 * reading, use the triggered buffer.
                 */
-               *val = le16_to_cpu(hw_values[chan->address]);
+               *val = (s16)le16_to_cpu(hw_values[chan->address]);
 
                ret = IIO_VAL_INT;
        }
index 34aff10..12b893c 100644 (file)
@@ -269,7 +269,7 @@ static const struct iio_chan_spec ping_chan_spec[] = {
 
 static const struct of_device_id of_ping_match[] = {
        { .compatible = "parallax,ping", .data = &pa_ping_cfg},
-       { .compatible = "parallax,laserping", .data = &pa_ping_cfg},
+       { .compatible = "parallax,laserping", .data = &pa_laser_ping_cfg},
        {},
 };
 
index 2e0d32a..2f82e8c 100644 (file)
@@ -161,7 +161,8 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
        return 0;
 }
 
-static void stm32_timer_stop(struct stm32_timer_trigger *priv)
+static void stm32_timer_stop(struct stm32_timer_trigger *priv,
+                            struct iio_trigger *trig)
 {
        u32 ccer, cr1;
 
@@ -179,6 +180,12 @@ static void stm32_timer_stop(struct stm32_timer_trigger *priv)
        regmap_write(priv->regmap, TIM_PSC, 0);
        regmap_write(priv->regmap, TIM_ARR, 0);
 
+       /* Force disable master mode */
+       if (stm32_timer_is_trgo2_name(trig->name))
+               regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2, 0);
+       else
+               regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS, 0);
+
        /* Make sure that registers are updated */
        regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
 }
@@ -197,7 +204,7 @@ static ssize_t stm32_tt_store_frequency(struct device *dev,
                return ret;
 
        if (freq == 0) {
-               stm32_timer_stop(priv);
+               stm32_timer_stop(priv, trig);
        } else {
                ret = stm32_timer_start(priv, trig, freq);
                if (ret)
index f6c2552..d0b3d35 100644 (file)
@@ -896,7 +896,9 @@ static int add_one_compat_dev(struct ib_device *device,
        cdev->dev.parent = device->dev.parent;
        rdma_init_coredev(cdev, device, read_pnet(&rnet->net));
        cdev->dev.release = compatdev_release;
-       dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
+       ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
+       if (ret)
+               goto add_err;
 
        ret = device_add(&cdev->dev);
        if (ret)
index e0b0a91..9eec26d 100644 (file)
@@ -918,6 +918,10 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
 
                nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
                            IB_DEVICE_NAME_MAX);
+               if (strlen(name) == 0) {
+                       err = -EINVAL;
+                       goto done;
+               }
                err = ib_device_rename(device, name);
                goto done;
        }
@@ -1514,7 +1518,7 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
                    sizeof(ibdev_name));
-       if (strchr(ibdev_name, '%'))
+       if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0)
                return -EINVAL;
 
        nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
index 2d56083..75e7ec0 100644 (file)
@@ -349,16 +349,11 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
        else if (qp_pps)
                new_pps->main.pkey_index = qp_pps->main.pkey_index;
 
-       if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
+       if (((qp_attr_mask & IB_QP_PKEY_INDEX) &&
+            (qp_attr_mask & IB_QP_PORT)) ||
+           (qp_pps && qp_pps->main.state != IB_PORT_PKEY_NOT_VALID))
                new_pps->main.state = IB_PORT_PKEY_VALID;
 
-       if (!(qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) && qp_pps) {
-               new_pps->main.port_num = qp_pps->main.port_num;
-               new_pps->main.pkey_index = qp_pps->main.pkey_index;
-               if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
-                       new_pps->main.state = IB_PORT_PKEY_VALID;
-       }
-
        if (qp_attr_mask & IB_QP_ALT_PATH) {
                new_pps->alt.port_num = qp_attr->alt_port_num;
                new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
index cd656ad..3b1e627 100644 (file)
@@ -275,8 +275,8 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
                mmu_interval_notifier_remove(&umem_odp->notifier);
                kvfree(umem_odp->dma_list);
                kvfree(umem_odp->page_list);
-               put_pid(umem_odp->tgid);
        }
+       put_pid(umem_odp->tgid);
        kfree(umem_odp);
 }
 EXPORT_SYMBOL(ib_umem_odp_release);
index 1235ffb..da229ea 100644 (file)
@@ -1129,17 +1129,30 @@ static const struct file_operations umad_sm_fops = {
        .llseek  = no_llseek,
 };
 
+static struct ib_umad_port *get_port(struct ib_device *ibdev,
+                                    struct ib_umad_device *umad_dev,
+                                    unsigned int port)
+{
+       if (!umad_dev)
+               return ERR_PTR(-EOPNOTSUPP);
+       if (!rdma_is_port_valid(ibdev, port))
+               return ERR_PTR(-EINVAL);
+       if (!rdma_cap_ib_mad(ibdev, port))
+               return ERR_PTR(-EOPNOTSUPP);
+
+       return &umad_dev->ports[port - rdma_start_port(ibdev)];
+}
+
 static int ib_umad_get_nl_info(struct ib_device *ibdev, void *client_data,
                               struct ib_client_nl_info *res)
 {
-       struct ib_umad_device *umad_dev = client_data;
+       struct ib_umad_port *port = get_port(ibdev, client_data, res->port);
 
-       if (!rdma_is_port_valid(ibdev, res->port))
-               return -EINVAL;
+       if (IS_ERR(port))
+               return PTR_ERR(port);
 
        res->abi = IB_USER_MAD_ABI_VERSION;
-       res->cdev = &umad_dev->ports[res->port - rdma_start_port(ibdev)].dev;
-
+       res->cdev = &port->dev;
        return 0;
 }
 
@@ -1154,15 +1167,13 @@ MODULE_ALIAS_RDMA_CLIENT("umad");
 static int ib_issm_get_nl_info(struct ib_device *ibdev, void *client_data,
                               struct ib_client_nl_info *res)
 {
-       struct ib_umad_device *umad_dev =
-               ib_get_client_data(ibdev, &umad_client);
+       struct ib_umad_port *port = get_port(ibdev, client_data, res->port);
 
-       if (!rdma_is_port_valid(ibdev, res->port))
-               return -EINVAL;
+       if (IS_ERR(port))
+               return PTR_ERR(port);
 
        res->abi = IB_USER_MAD_ABI_VERSION;
-       res->cdev = &umad_dev->ports[res->port - rdma_start_port(ibdev)].sm_dev;
-
+       res->cdev = &port->sm_dev;
        return 0;
 }
 
index c2f0d9b..13e4203 100644 (file)
@@ -141,6 +141,7 @@ static int defer_packet_queue(
         */
        xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
        if (list_empty(&pq->busy.list)) {
+               pq->busy.lock = &sde->waitlock;
                iowait_get_priority(&pq->busy);
                iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
        }
@@ -155,6 +156,7 @@ static void activate_packet_queue(struct iowait *wait, int reason)
 {
        struct hfi1_user_sdma_pkt_q *pq =
                container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
+       pq->busy.lock = NULL;
        xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
        wake_up(&wait->wait_dma);
 };
@@ -256,6 +258,21 @@ pq_reqs_nomem:
        return ret;
 }
 
+static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq)
+{
+       unsigned long flags;
+       seqlock_t *lock = pq->busy.lock;
+
+       if (!lock)
+               return;
+       write_seqlock_irqsave(lock, flags);
+       if (!list_empty(&pq->busy.list)) {
+               list_del_init(&pq->busy.list);
+               pq->busy.lock = NULL;
+       }
+       write_sequnlock_irqrestore(lock, flags);
+}
+
 int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
                               struct hfi1_ctxtdata *uctxt)
 {
@@ -281,6 +298,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
                kfree(pq->reqs);
                kfree(pq->req_in_use);
                kmem_cache_destroy(pq->txreq_cache);
+               flush_pq_iowait(pq);
                kfree(pq);
        } else {
                spin_unlock(&fd->pq_rcu_lock);
@@ -587,11 +605,12 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
                if (ret < 0) {
                        if (ret != -EBUSY)
                                goto free_req;
-                       wait_event_interruptible_timeout(
+                       if (wait_event_interruptible_timeout(
                                pq->busy.wait_dma,
-                               (pq->state == SDMA_PKT_Q_ACTIVE),
+                               pq->state == SDMA_PKT_Q_ACTIVE,
                                msecs_to_jiffies(
-                                       SDMA_IOWAIT_TIMEOUT));
+                                       SDMA_IOWAIT_TIMEOUT)) <= 0)
+                               flush_pq_iowait(pq);
                }
        }
        *count += idx;
index 367a71b..3dec3de 100644 (file)
@@ -330,6 +330,22 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
                dump_cqe(dev, cqe);
 }
 
+static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
+                          u16 tail, u16 head)
+{
+       u16 idx;
+
+       do {
+               idx = tail & (qp->sq.wqe_cnt - 1);
+               if (idx == head)
+                       break;
+
+               tail = qp->sq.w_list[idx].next;
+       } while (1);
+       tail = qp->sq.w_list[idx].next;
+       qp->sq.last_poll = tail;
+}
+
 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
 {
        mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
@@ -368,7 +384,7 @@ static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
 }
 
 static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
-                   int *npolled, int is_send)
+                   int *npolled, bool is_send)
 {
        struct mlx5_ib_wq *wq;
        unsigned int cur;
@@ -383,10 +399,16 @@ static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
                return;
 
        for (i = 0;  i < cur && np < num_entries; i++) {
-               wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+               unsigned int idx;
+
+               idx = (is_send) ? wq->last_poll : wq->tail;
+               idx &= (wq->wqe_cnt - 1);
+               wc->wr_id = wq->wrid[idx];
                wc->status = IB_WC_WR_FLUSH_ERR;
                wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
                wq->tail++;
+               if (is_send)
+                       wq->last_poll = wq->w_list[idx].next;
                np++;
                wc->qp = &qp->ibqp;
                wc++;
@@ -473,6 +495,7 @@ repoll:
                wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
                idx = wqe_ctr & (wq->wqe_cnt - 1);
                handle_good_req(wc, cqe64, wq, idx);
+               handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
                wc->wr_id = wq->wrid[idx];
                wq->tail = wq->wqe_head[idx] + 1;
                wc->status = IB_WC_SUCCESS;
index e4bcfa8..ffa7c21 100644 (file)
@@ -5722,9 +5722,10 @@ mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
        const struct mlx5_ib_counters *cnts =
                get_counters(dev, counter->port - 1);
 
-       /* Q counters are in the beginning of all counters */
        return rdma_alloc_hw_stats_struct(cnts->names,
-                                         cnts->num_q_counters,
+                                         cnts->num_q_counters +
+                                         cnts->num_cong_counters +
+                                         cnts->num_ext_ppcnt_counters,
                                          RDMA_HW_STATS_DEFAULT_LIFESPAN);
 }
 
index bb78142..f3bdbd5 100644 (file)
@@ -288,6 +288,7 @@ struct mlx5_ib_wq {
        unsigned                head;
        unsigned                tail;
        u16                     cur_post;
+       u16                     last_poll;
        void                    *cur_edge;
 };
 
index 957f3a5..8fe149e 100644 (file)
@@ -3775,6 +3775,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
                qp->sq.cur_post = 0;
                if (qp->sq.wqe_cnt)
                        qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
+               qp->sq.last_poll = 0;
                qp->db.db[MLX5_RCV_DBR] = 0;
                qp->db.db[MLX5_SND_DBR] = 0;
        }
@@ -6204,6 +6205,10 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
        if (udata->outlen && udata->outlen < min_resp_len)
                return ERR_PTR(-EINVAL);
 
+       if (!capable(CAP_SYS_RAWIO) &&
+           init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP)
+               return ERR_PTR(-EPERM);
+
        dev = to_mdev(pd->device);
        switch (init_attr->wq_type) {
        case IB_WQT_RQ:
index 13d7f66..5724cbb 100644 (file)
@@ -327,7 +327,7 @@ void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
        if (cq->ip)
                kref_put(&cq->ip->ref, rvt_release_mmap_info);
        else
-               vfree(cq->queue);
+               vfree(cq->kqueue);
 }
 
 /**
index fce43e6..3cfd2c1 100644 (file)
@@ -190,6 +190,7 @@ static void input_repeat_key(struct timer_list *t)
                        input_value_sync
                };
 
+               input_set_timestamp(dev, ktime_get());
                input_pass_values(dev, vals, ARRAY_SIZE(vals));
 
                if (dev->rep[REP_PERIOD])
index 14b55ba..fb078e0 100644 (file)
@@ -75,6 +75,14 @@ static struct touchkey_variant aries_touchkey_variant = {
        .cmd_led_off = ARIES_TOUCHKEY_CMD_LED_OFF,
 };
 
+static const struct touchkey_variant tc360_touchkey_variant = {
+       .keycode_reg = 0x00,
+       .base_reg = 0x00,
+       .fixed_regulator = true,
+       .cmd_led_on = TM2_TOUCHKEY_CMD_LED_ON,
+       .cmd_led_off = TM2_TOUCHKEY_CMD_LED_OFF,
+};
+
 static int tm2_touchkey_led_brightness_set(struct led_classdev *led_dev,
                                            enum led_brightness brightness)
 {
@@ -327,6 +335,9 @@ static const struct of_device_id tm2_touchkey_of_match[] = {
        }, {
                .compatible = "cypress,aries-touchkey",
                .data = &aries_touchkey_variant,
+       }, {
+               .compatible = "coreriver,tc360-touchkey",
+               .data = &tc360_touchkey_variant,
        },
        { },
 };
index 2c666fb..4d20362 100644 (file)
@@ -186,6 +186,7 @@ static const char * const smbus_pnp_ids[] = {
        "SYN3052", /* HP EliteBook 840 G4 */
        "SYN3221", /* HP 15-ay000 */
        "SYN323d", /* HP Spectre X360 13-w013dx */
+       "SYN3257", /* HP Envy 13-ad105ng */
        NULL
 };
 
index 6adea8a..ffa39ab 100644 (file)
@@ -1203,8 +1203,8 @@ static int rmi_f11_initialize(struct rmi_function *fn)
         * If distance threshold values are set, switch to reduced reporting
         * mode so they actually get used by the controller.
         */
-       if (ctrl->ctrl0_11[RMI_F11_DELTA_X_THRESHOLD] ||
-           ctrl->ctrl0_11[RMI_F11_DELTA_Y_THRESHOLD]) {
+       if (sensor->axis_align.delta_x_threshold ||
+           sensor->axis_align.delta_y_threshold) {
                ctrl->ctrl0_11[0] &= ~RMI_F11_REPORT_MODE_MASK;
                ctrl->ctrl0_11[0] |= RMI_F11_REPORT_MODE_REDUCED;
        }
index 6ed9f22..fe24543 100644 (file)
@@ -432,7 +432,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
        return 0;
 }
 
-static bool raydium_i2c_boot_trigger(struct i2c_client *client)
+static int raydium_i2c_boot_trigger(struct i2c_client *client)
 {
        static const u8 cmd[7][6] = {
                { 0x08, 0x0C, 0x09, 0x00, 0x50, 0xD7 },
@@ -457,10 +457,10 @@ static bool raydium_i2c_boot_trigger(struct i2c_client *client)
                }
        }
 
-       return false;
+       return 0;
 }
 
-static bool raydium_i2c_fw_trigger(struct i2c_client *client)
+static int raydium_i2c_fw_trigger(struct i2c_client *client)
 {
        static const u8 cmd[5][11] = {
                { 0, 0x09, 0x71, 0x0C, 0x09, 0x00, 0x50, 0xD7, 0, 0, 0 },
@@ -483,7 +483,7 @@ static bool raydium_i2c_fw_trigger(struct i2c_client *client)
                }
        }
 
-       return false;
+       return 0;
 }
 
 static int raydium_i2c_check_path(struct i2c_client *client)
index aac132b..20cce36 100644 (file)
@@ -3826,7 +3826,7 @@ int amd_iommu_activate_guest_mode(void *data)
        entry->lo.fields_vapic.ga_tag      = ir_data->ga_tag;
 
        return modify_irte_ga(ir_data->irq_2_irte.devid,
-                             ir_data->irq_2_irte.index, entry, NULL);
+                             ir_data->irq_2_irte.index, entry, ir_data);
 }
 EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
 
@@ -3852,7 +3852,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
                                APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
 
        return modify_irte_ga(ir_data->irq_2_irte.devid,
-                             ir_data->irq_2_irte.index, entry, NULL);
+                             ir_data->irq_2_irte.index, entry, ir_data);
 }
 EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
 
index a2e96a5..ba128d1 100644 (file)
@@ -177,15 +177,15 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
        start -= iova_offset(iovad, start);
        num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
 
-       msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
-       if (!msi_page)
-               return -ENOMEM;
-
        for (i = 0; i < num_pages; i++) {
-               msi_page[i].phys = start;
-               msi_page[i].iova = start;
-               INIT_LIST_HEAD(&msi_page[i].list);
-               list_add(&msi_page[i].list, &cookie->msi_page_list);
+               msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
+               if (!msi_page)
+                       return -ENOMEM;
+
+               msi_page->phys = start;
+               msi_page->iova = start;
+               INIT_LIST_HEAD(&msi_page->list);
+               list_add(&msi_page->list, &cookie->msi_page_list);
                start += iovad->granule;
        }
 
index 071bb42..f77dae7 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <linux/iommu.h>
 #include <linux/numa.h>
+#include <linux/limits.h>
 #include <asm/irq_remapping.h>
 #include <asm/iommu_table.h>
 
@@ -128,6 +129,13 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
 
        BUG_ON(dev->is_virtfn);
 
+       /*
+        * Ignore devices that have a domain number higher than what can
+        * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
+        */
+       if (pci_domain_nr(dev->bus) > U16_MAX)
+               return NULL;
+
        /* Only generate path[] for device addition event */
        if (event == BUS_NOTIFY_ADD_DEVICE)
                for (tmp = dev; tmp; tmp = tmp->bus->self)
@@ -363,7 +371,8 @@ dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
 {
        struct dmar_drhd_unit *dmaru;
 
-       list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
+       list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
+                               dmar_rcu_check())
                if (dmaru->segment == drhd->segment &&
                    dmaru->reg_base_addr == drhd->address)
                        return dmaru;
@@ -440,12 +449,13 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
 
        /* Check for NUL termination within the designated length */
        if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
-               WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
+               pr_warn(FW_BUG
                           "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
                           "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
                           dmi_get_system_info(DMI_BIOS_VENDOR),
                           dmi_get_system_info(DMI_BIOS_VERSION),
                           dmi_get_system_info(DMI_PRODUCT_VERSION));
+               add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
                return -EINVAL;
        }
        pr_info("ANDD device: %x name: %s\n", andd->device_number,
@@ -471,14 +481,14 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
                        return 0;
                }
        }
-       WARN_TAINT(
-               1, TAINT_FIRMWARE_WORKAROUND,
+       pr_warn(FW_BUG
                "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
                "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
-               drhd->reg_base_addr,
+               rhsa->base_address,
                dmi_get_system_info(DMI_BIOS_VENDOR),
                dmi_get_system_info(DMI_BIOS_VERSION),
                dmi_get_system_info(DMI_PRODUCT_VERSION));
+       add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
 
        return 0;
 }
@@ -827,14 +837,14 @@ int __init dmar_table_init(void)
 
 static void warn_invalid_dmar(u64 addr, const char *message)
 {
-       WARN_TAINT_ONCE(
-               1, TAINT_FIRMWARE_WORKAROUND,
+       pr_warn_once(FW_BUG
                "Your BIOS is broken; DMAR reported at address %llx%s!\n"
                "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
                addr, message,
                dmi_get_system_info(DMI_BIOS_VENDOR),
                dmi_get_system_info(DMI_BIOS_VERSION),
                dmi_get_system_info(DMI_PRODUCT_VERSION));
+       add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
 }
 
 static int __ref
index c1257be..3eb1fe2 100644 (file)
@@ -33,38 +33,42 @@ struct iommu_regset {
 
 #define IOMMU_REGSET_ENTRY(_reg_)                                      \
        { DMAR_##_reg_##_REG, __stringify(_reg_) }
-static const struct iommu_regset iommu_regs[] = {
+
+static const struct iommu_regset iommu_regs_32[] = {
        IOMMU_REGSET_ENTRY(VER),
-       IOMMU_REGSET_ENTRY(CAP),
-       IOMMU_REGSET_ENTRY(ECAP),
        IOMMU_REGSET_ENTRY(GCMD),
        IOMMU_REGSET_ENTRY(GSTS),
-       IOMMU_REGSET_ENTRY(RTADDR),
-       IOMMU_REGSET_ENTRY(CCMD),
        IOMMU_REGSET_ENTRY(FSTS),
        IOMMU_REGSET_ENTRY(FECTL),
        IOMMU_REGSET_ENTRY(FEDATA),
        IOMMU_REGSET_ENTRY(FEADDR),
        IOMMU_REGSET_ENTRY(FEUADDR),
-       IOMMU_REGSET_ENTRY(AFLOG),
        IOMMU_REGSET_ENTRY(PMEN),
        IOMMU_REGSET_ENTRY(PLMBASE),
        IOMMU_REGSET_ENTRY(PLMLIMIT),
+       IOMMU_REGSET_ENTRY(ICS),
+       IOMMU_REGSET_ENTRY(PRS),
+       IOMMU_REGSET_ENTRY(PECTL),
+       IOMMU_REGSET_ENTRY(PEDATA),
+       IOMMU_REGSET_ENTRY(PEADDR),
+       IOMMU_REGSET_ENTRY(PEUADDR),
+};
+
+static const struct iommu_regset iommu_regs_64[] = {
+       IOMMU_REGSET_ENTRY(CAP),
+       IOMMU_REGSET_ENTRY(ECAP),
+       IOMMU_REGSET_ENTRY(RTADDR),
+       IOMMU_REGSET_ENTRY(CCMD),
+       IOMMU_REGSET_ENTRY(AFLOG),
        IOMMU_REGSET_ENTRY(PHMBASE),
        IOMMU_REGSET_ENTRY(PHMLIMIT),
        IOMMU_REGSET_ENTRY(IQH),
        IOMMU_REGSET_ENTRY(IQT),
        IOMMU_REGSET_ENTRY(IQA),
-       IOMMU_REGSET_ENTRY(ICS),
        IOMMU_REGSET_ENTRY(IRTA),
        IOMMU_REGSET_ENTRY(PQH),
        IOMMU_REGSET_ENTRY(PQT),
        IOMMU_REGSET_ENTRY(PQA),
-       IOMMU_REGSET_ENTRY(PRS),
-       IOMMU_REGSET_ENTRY(PECTL),
-       IOMMU_REGSET_ENTRY(PEDATA),
-       IOMMU_REGSET_ENTRY(PEADDR),
-       IOMMU_REGSET_ENTRY(PEUADDR),
        IOMMU_REGSET_ENTRY(MTRRCAP),
        IOMMU_REGSET_ENTRY(MTRRDEF),
        IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000),
@@ -127,10 +131,16 @@ static int iommu_regset_show(struct seq_file *m, void *unused)
                 * by adding the offset to the pointer (virtual address).
                 */
                raw_spin_lock_irqsave(&iommu->register_lock, flag);
-               for (i = 0 ; i < ARRAY_SIZE(iommu_regs); i++) {
-                       value = dmar_readq(iommu->reg + iommu_regs[i].offset);
+               for (i = 0 ; i < ARRAY_SIZE(iommu_regs_32); i++) {
+                       value = dmar_readl(iommu->reg + iommu_regs_32[i].offset);
+                       seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
+                                  iommu_regs_32[i].regs, iommu_regs_32[i].offset,
+                                  value);
+               }
+               for (i = 0 ; i < ARRAY_SIZE(iommu_regs_64); i++) {
+                       value = dmar_readq(iommu->reg + iommu_regs_64[i].offset);
                        seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
-                                  iommu_regs[i].regs, iommu_regs[i].offset,
+                                  iommu_regs_64[i].regs, iommu_regs_64[i].offset,
                                   value);
                }
                raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
@@ -272,9 +282,16 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused)
 {
        struct dmar_drhd_unit *drhd;
        struct intel_iommu *iommu;
+       u32 sts;
 
        rcu_read_lock();
        for_each_active_iommu(iommu, drhd) {
+               sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
+               if (!(sts & DMA_GSTS_TES)) {
+                       seq_printf(m, "DMA Remapping is not enabled on %s\n",
+                                  iommu->name);
+                       continue;
+               }
                root_tbl_walk(m, iommu);
                seq_putc(m, '\n');
        }
@@ -415,6 +432,7 @@ static int ir_translation_struct_show(struct seq_file *m, void *unused)
        struct dmar_drhd_unit *drhd;
        struct intel_iommu *iommu;
        u64 irta;
+       u32 sts;
 
        rcu_read_lock();
        for_each_active_iommu(iommu, drhd) {
@@ -424,7 +442,8 @@ static int ir_translation_struct_show(struct seq_file *m, void *unused)
                seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n",
                           iommu->name);
 
-               if (iommu->ir_table) {
+               sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
+               if (iommu->ir_table && (sts & DMA_GSTS_IRES)) {
                        irta = virt_to_phys(iommu->ir_table->base);
                        seq_printf(m, " IR table address:%llx\n", irta);
                        ir_tbl_remap_entry_show(m, iommu);
index 6fa6de2..4be5494 100644 (file)
@@ -4261,10 +4261,11 @@ static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
 
        /* we know that the this iommu should be at offset 0xa000 from vtbar */
        drhd = dmar_find_matched_drhd_unit(pdev);
-       if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
-                           TAINT_FIRMWARE_WORKAROUND,
-                           "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
+       if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
+               pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
+               add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
                pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+       }
 }
 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
 
@@ -4460,14 +4461,16 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
        struct dmar_rmrr_unit *rmrru;
 
        rmrr = (struct acpi_dmar_reserved_memory *)header;
-       if (rmrr_sanity_check(rmrr))
-               WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
+       if (rmrr_sanity_check(rmrr)) {
+               pr_warn(FW_BUG
                           "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n"
                           "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
                           rmrr->base_address, rmrr->end_address,
                           dmi_get_system_info(DMI_BIOS_VENDOR),
                           dmi_get_system_info(DMI_BIOS_VERSION),
                           dmi_get_system_info(DMI_PRODUCT_VERSION));
+               add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+       }
 
        rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
        if (!rmrru)
@@ -5130,6 +5133,9 @@ int __init intel_iommu_init(void)
 
        down_write(&dmar_global_lock);
 
+       if (!no_iommu)
+               intel_iommu_debugfs_init();
+
        if (no_iommu || dmar_disabled) {
                /*
                 * We exit the function here to ensure IOMMU's remapping and
@@ -5193,6 +5199,7 @@ int __init intel_iommu_init(void)
 
        init_iommu_pm_ops();
 
+       down_read(&dmar_global_lock);
        for_each_active_iommu(iommu, drhd) {
                iommu_device_sysfs_add(&iommu->iommu, NULL,
                                       intel_iommu_groups,
@@ -5200,6 +5207,7 @@ int __init intel_iommu_init(void)
                iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
                iommu_device_register(&iommu->iommu);
        }
+       up_read(&dmar_global_lock);
 
        bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
        if (si_domain && !hw_pass_through)
@@ -5210,7 +5218,6 @@ int __init intel_iommu_init(void)
        down_read(&dmar_global_lock);
        if (probe_acpi_namespace_devices())
                pr_warn("ACPI name space devices didn't probe correctly\n");
-       up_read(&dmar_global_lock);
 
        /* Finally, we enable the DMA remapping hardware. */
        for_each_iommu(iommu, drhd) {
@@ -5219,10 +5226,11 @@ int __init intel_iommu_init(void)
 
                iommu_disable_protect_mem_regions(iommu);
        }
+       up_read(&dmar_global_lock);
+
        pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
 
        intel_iommu_enabled = 1;
-       intel_iommu_debugfs_init();
 
        return 0;
 
@@ -5700,8 +5708,10 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
        u64 phys = 0;
 
        pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
-       if (pte)
-               phys = dma_pte_addr(pte);
+       if (pte && dma_pte_present(pte))
+               phys = dma_pte_addr(pte) +
+                       (iova & (BIT_MASK(level_to_offset_bits(level) +
+                                               VTD_PAGE_SHIFT) - 1));
 
        return phys;
 }
index 983b084..04fbd4b 100644 (file)
@@ -468,7 +468,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
        arm_lpae_iopte *ptep = data->pgd;
        int ret, lvl = data->start_level;
        arm_lpae_iopte prot;
-       long iaext = (long)iova >> cfg->ias;
+       long iaext = (s64)iova >> cfg->ias;
 
        /* If no access, then nothing to do */
        if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
@@ -645,7 +645,7 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
        struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
        struct io_pgtable_cfg *cfg = &data->iop.cfg;
        arm_lpae_iopte *ptep = data->pgd;
-       long iaext = (long)iova >> cfg->ias;
+       long iaext = (s64)iova >> cfg->ias;
 
        if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
                return 0;
index c1f7af9..1eec9d4 100644 (file)
@@ -34,6 +34,7 @@
 #define GICD_INT_NMI_PRI       (GICD_INT_DEF_PRI & ~0x80)
 
 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996    (1ULL << 0)
+#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539  (1ULL << 1)
 
 struct redist_region {
        void __iomem            *redist_base;
@@ -1464,6 +1465,15 @@ static bool gic_enable_quirk_msm8996(void *data)
        return true;
 }
 
+static bool gic_enable_quirk_cavium_38539(void *data)
+{
+       struct gic_chip_data *d = data;
+
+       d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
+
+       return true;
+}
+
 static bool gic_enable_quirk_hip06_07(void *data)
 {
        struct gic_chip_data *d = data;
@@ -1503,6 +1513,19 @@ static const struct gic_quirk gic_quirks[] = {
                .init   = gic_enable_quirk_hip06_07,
        },
        {
+               /*
+                * Reserved register accesses generate a Synchronous
+                * External Abort. This erratum applies to:
+                * - ThunderX: CN88xx
+                * - OCTEON TX: CN83xx, CN81xx
+                * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
+                */
+               .desc   = "GICv3: Cavium erratum 38539",
+               .iidr   = 0xa000034c,
+               .mask   = 0xe8f00fff,
+               .init   = gic_enable_quirk_cavium_38539,
+       },
+       {
        }
 };
 
@@ -1577,7 +1600,12 @@ static int __init gic_init_bases(void __iomem *dist_base,
        pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
        pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
 
-       gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
+       /*
+        * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
+        * architecture spec (which says that reserved registers are RES0).
+        */
+       if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
+               gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
 
        gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
                                                 &gic_data);
index 1256059..e7dec32 100644 (file)
@@ -312,9 +312,16 @@ static const struct i2c_device_id wf_ad7417_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_ad7417_id);
 
+static const struct of_device_id wf_ad7417_of_id[] = {
+       { .compatible = "ad7417", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_ad7417_of_id);
+
 static struct i2c_driver wf_ad7417_driver = {
        .driver = {
                .name   = "wf_ad7417",
+               .of_match_table = wf_ad7417_of_id,
        },
        .probe          = wf_ad7417_probe,
        .remove         = wf_ad7417_remove,
index 67daeec..2470e5a 100644 (file)
@@ -580,9 +580,16 @@ static const struct i2c_device_id wf_fcu_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_fcu_id);
 
+static const struct of_device_id wf_fcu_of_id[] = {
+       { .compatible = "fcu", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_fcu_of_id);
+
 static struct i2c_driver wf_fcu_driver = {
        .driver = {
                .name   = "wf_fcu",
+               .of_match_table = wf_fcu_of_id,
        },
        .probe          = wf_fcu_probe,
        .remove         = wf_fcu_remove,
index 282c28a..1e5fa09 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/wait.h>
 #include <linux/i2c.h>
+#include <linux/of_device.h>
 #include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/io.h>
@@ -91,9 +92,14 @@ static int wf_lm75_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {      
        struct wf_lm75_sensor *lm;
-       int rc, ds1775 = id->driver_data;
+       int rc, ds1775;
        const char *name, *loc;
 
+       if (id)
+               ds1775 = id->driver_data;
+       else
+               ds1775 = !!of_device_get_match_data(&client->dev);
+
        DBG("wf_lm75: creating  %s device at address 0x%02x\n",
            ds1775 ? "ds1775" : "lm75", client->addr);
 
@@ -164,9 +170,17 @@ static const struct i2c_device_id wf_lm75_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_lm75_id);
 
+static const struct of_device_id wf_lm75_of_id[] = {
+       { .compatible = "lm75", .data = (void *)0},
+       { .compatible = "ds1775", .data = (void *)1 },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_lm75_of_id);
+
 static struct i2c_driver wf_lm75_driver = {
        .driver = {
                .name   = "wf_lm75",
+               .of_match_table = wf_lm75_of_id,
        },
        .probe          = wf_lm75_probe,
        .remove         = wf_lm75_remove,
index b03a33b..d011899 100644 (file)
@@ -166,9 +166,16 @@ static const struct i2c_device_id wf_lm87_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_lm87_id);
 
+static const struct of_device_id wf_lm87_of_id[] = {
+       { .compatible = "lm87cimt", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_lm87_of_id);
+
 static struct i2c_driver wf_lm87_driver = {
        .driver = {
                .name   = "wf_lm87",
+               .of_match_table = wf_lm87_of_id,
        },
        .probe          = wf_lm87_probe,
        .remove         = wf_lm87_remove,
index e666cc0..1e7b03d 100644 (file)
@@ -120,9 +120,16 @@ static const struct i2c_device_id wf_max6690_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_max6690_id);
 
+static const struct of_device_id wf_max6690_of_id[] = {
+       { .compatible = "max6690", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_max6690_of_id);
+
 static struct i2c_driver wf_max6690_driver = {
        .driver = {
                .name           = "wf_max6690",
+               .of_match_table = wf_max6690_of_id,
        },
        .probe          = wf_max6690_probe,
        .remove         = wf_max6690_remove,
index c84ec49..cb75dc0 100644 (file)
@@ -341,9 +341,16 @@ static const struct i2c_device_id wf_sat_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_sat_id);
 
+static const struct of_device_id wf_sat_of_id[] = {
+       { .compatible = "smu-sat", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_sat_of_id);
+
 static struct i2c_driver wf_sat_driver = {
        .driver = {
                .name           = "wf_smu_sat",
+               .of_match_table = wf_sat_of_id,
        },
        .probe          = wf_sat_probe,
        .remove         = wf_sat_remove,
index 4feed29..423fecc 100644 (file)
@@ -394,7 +394,7 @@ static const struct pcr_ops rts522a_pcr_ops = {
 void rts522a_init_params(struct rtsx_pcr *pcr)
 {
        rts5227_init_params(pcr);
-
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
        pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
 
        pcr->option.ocp_en = 1;
index db936e4..1a81cda 100644 (file)
@@ -618,6 +618,7 @@ static const struct pcr_ops rts524a_pcr_ops = {
 void rts524a_init_params(struct rtsx_pcr *pcr)
 {
        rts5249_init_params(pcr);
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
        pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
        pcr->option.ltr_l1off_snooze_sspwrgate =
                LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
@@ -733,6 +734,7 @@ static const struct pcr_ops rts525a_pcr_ops = {
 void rts525a_init_params(struct rtsx_pcr *pcr)
 {
        rts5249_init_params(pcr);
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(25, 29, 11);
        pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
        pcr->option.ltr_l1off_snooze_sspwrgate =
                LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
index 4214f02..711054e 100644 (file)
@@ -662,7 +662,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
        pcr->aspm_en = ASPM_L1_EN;
-       pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
        pcr->ic_version = rts5260_get_ic_version(pcr);
index bc4967a..78c3b1d 100644 (file)
@@ -764,7 +764,7 @@ void rts5261_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
        pcr->aspm_en = ASPM_L1_EN;
-       pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 27, 16);
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 11);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
        pcr->ic_version = rts5261_get_ic_version(pcr);
index 031eb64..282c9ef 100644 (file)
@@ -712,13 +712,14 @@ static int at24_probe(struct i2c_client *client)
         * chip is functional.
         */
        err = at24_read(at24, 0, &test_byte, 1);
-       pm_runtime_idle(dev);
        if (err) {
                pm_runtime_disable(dev);
                regulator_disable(at24->vcc_reg);
                return -ENODEV;
        }
 
+       pm_runtime_idle(dev);
+
        if (writable)
                dev_info(dev, "%u byte %s EEPROM, writable, %u bytes/write\n",
                         byte_len, client->name, at24->write_max);
index aa54d35..a971c4b 100644 (file)
@@ -1732,8 +1732,11 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
         * the erase operation does not exceed the max_busy_timeout, we should
         * use R1B response. Or we need to prevent the host from doing hw busy
         * detection, which is done by converting to a R1 response instead.
+        * Note, some hosts requires R1B, which also means they are on their own
+        * when it comes to deal with the busy timeout.
         */
-       if (card->host->max_busy_timeout &&
+       if (!(card->host->caps & MMC_CAP_NEED_RSP_BUSY) &&
+           card->host->max_busy_timeout &&
            busy_timeout > card->host->max_busy_timeout) {
                cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
        } else {
index f6912de..de14b58 100644 (file)
@@ -1910,9 +1910,12 @@ static int mmc_sleep(struct mmc_host *host)
         * If the max_busy_timeout of the host is specified, validate it against
         * the sleep cmd timeout. A failure means we need to prevent the host
         * from doing hw busy detection, which is done by converting to a R1
-        * response instead of a R1B.
+        * response instead of a R1B. Note, some hosts requires R1B, which also
+        * means they are on their own when it comes to deal with the busy
+        * timeout.
         */
-       if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) {
+       if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
+           (timeout_ms > host->max_busy_timeout)) {
                cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
        } else {
                cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
index da425ee..e025604 100644 (file)
@@ -542,9 +542,11 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
         * If the max_busy_timeout of the host is specified, make sure it's
         * enough to fit the used timeout_ms. In case it's not, let's instruct
         * the host to avoid HW busy detection, by converting to a R1 response
-        * instead of a R1B.
+        * instead of a R1B. Note, some hosts requires R1B, which also means
+        * they are on their own when it comes to deal with the busy timeout.
         */
-       if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
+       if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
+           (timeout_ms > host->max_busy_timeout))
                use_r1b_resp = false;
 
        cmd.opcode = MMC_SWITCH;
index bd50935..1108797 100644 (file)
@@ -606,19 +606,22 @@ static int sd_change_phase(struct realtek_pci_sdmmc *host,
                u8 sample_point, bool rx)
 {
        struct rtsx_pcr *pcr = host->pcr;
-
+       u16 SD_VP_CTL = 0;
        dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n",
                        __func__, rx ? "RX" : "TX", sample_point);
 
        rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
-       if (rx)
+       if (rx) {
+               SD_VP_CTL = SD_VPRX_CTL;
                rtsx_pci_write_register(pcr, SD_VPRX_CTL,
                        PHASE_SELECT_MASK, sample_point);
-       else
+       } else {
+               SD_VP_CTL = SD_VPTX_CTL;
                rtsx_pci_write_register(pcr, SD_VPTX_CTL,
                        PHASE_SELECT_MASK, sample_point);
-       rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
-       rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET,
+       }
+       rtsx_pci_write_register(pcr, SD_VP_CTL, PHASE_NOT_RESET, 0);
+       rtsx_pci_write_register(pcr, SD_VP_CTL, PHASE_NOT_RESET,
                                PHASE_NOT_RESET);
        rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0);
        rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
index 9651dca..2a2173d 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/pm.h>
 #include <linux/pm_runtime.h>
 #include <linux/delay.h>
+#include <linux/dmi.h>
 
 #include <linux/mmc/host.h>
 #include <linux/mmc/pm.h>
@@ -72,9 +73,16 @@ struct sdhci_acpi_host {
        const struct sdhci_acpi_slot    *slot;
        struct platform_device          *pdev;
        bool                            use_runtime_pm;
+       bool                            is_intel;
+       bool                            reset_signal_volt_on_suspend;
        unsigned long                   private[0] ____cacheline_aligned;
 };
 
+enum {
+       DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP                  = BIT(0),
+       DMI_QUIRK_SD_NO_WRITE_PROTECT                           = BIT(1),
+};
+
 static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c)
 {
        return (void *)c->private;
@@ -391,6 +399,8 @@ static int intel_probe_slot(struct platform_device *pdev, struct acpi_device *ad
        host->mmc_host_ops.start_signal_voltage_switch =
                                        intel_start_signal_voltage_switch;
 
+       c->is_intel = true;
+
        return 0;
 }
 
@@ -647,6 +657,36 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
 };
 MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
 
+static const struct dmi_system_id sdhci_acpi_quirks[] = {
+       {
+               /*
+                * The Lenovo Miix 320-10ICR has a bug in the _PS0 method of
+                * the SHC1 ACPI device, this bug causes it to reprogram the
+                * wrong LDO (DLDO3) to 1.8V if 1.8V modes are used and the
+                * card is (runtime) suspended + resumed. DLDO3 is used for
+                * the LCD and setting it to 1.8V causes the LCD to go black.
+                */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
+               },
+               .driver_data = (void *)DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP,
+       },
+       {
+               /*
+                * The Acer Aspire Switch 10 (SW5-012) microSD slot always
+                * reports the card being write-protected even though microSD
+                * cards do not have a write-protect switch at all.
+                */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
+               },
+               .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+       },
+       {} /* Terminating entry */
+};
+
 static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(struct acpi_device *adev)
 {
        const struct sdhci_acpi_uid_slot *u;
@@ -663,17 +703,23 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        const struct sdhci_acpi_slot *slot;
        struct acpi_device *device, *child;
+       const struct dmi_system_id *id;
        struct sdhci_acpi_host *c;
        struct sdhci_host *host;
        struct resource *iomem;
        resource_size_t len;
        size_t priv_size;
+       int quirks = 0;
        int err;
 
        device = ACPI_COMPANION(dev);
        if (!device)
                return -ENODEV;
 
+       id = dmi_first_match(sdhci_acpi_quirks);
+       if (id)
+               quirks = (long)id->driver_data;
+
        slot = sdhci_acpi_get_slot(device);
 
        /* Power on the SDHCI controller and its children */
@@ -759,6 +805,12 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
                        dev_warn(dev, "failed to setup card detect gpio\n");
                        c->use_runtime_pm = false;
                }
+
+               if (quirks & DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP)
+                       c->reset_signal_volt_on_suspend = true;
+
+               if (quirks & DMI_QUIRK_SD_NO_WRITE_PROTECT)
+                       host->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
        }
 
        err = sdhci_setup_host(host);
@@ -823,17 +875,39 @@ static int sdhci_acpi_remove(struct platform_device *pdev)
        return 0;
 }
 
+static void __maybe_unused sdhci_acpi_reset_signal_voltage_if_needed(
+       struct device *dev)
+{
+       struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+       struct sdhci_host *host = c->host;
+
+       if (c->is_intel && c->reset_signal_volt_on_suspend &&
+           host->mmc->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_330) {
+               struct intel_host *intel_host = sdhci_acpi_priv(c);
+               unsigned int fn = INTEL_DSM_V33_SWITCH;
+               u32 result = 0;
+
+               intel_dsm(intel_host, dev, fn, &result);
+       }
+}
+
 #ifdef CONFIG_PM_SLEEP
 
 static int sdhci_acpi_suspend(struct device *dev)
 {
        struct sdhci_acpi_host *c = dev_get_drvdata(dev);
        struct sdhci_host *host = c->host;
+       int ret;
 
        if (host->tuning_mode != SDHCI_TUNING_MODE_3)
                mmc_retune_needed(host->mmc);
 
-       return sdhci_suspend_host(host);
+       ret = sdhci_suspend_host(host);
+       if (ret)
+               return ret;
+
+       sdhci_acpi_reset_signal_voltage_if_needed(dev);
+       return 0;
 }
 
 static int sdhci_acpi_resume(struct device *dev)
@@ -853,11 +927,17 @@ static int sdhci_acpi_runtime_suspend(struct device *dev)
 {
        struct sdhci_acpi_host *c = dev_get_drvdata(dev);
        struct sdhci_host *host = c->host;
+       int ret;
 
        if (host->tuning_mode != SDHCI_TUNING_MODE_3)
                mmc_retune_needed(host->mmc);
 
-       return sdhci_runtime_suspend_host(host);
+       ret = sdhci_runtime_suspend_host(host);
+       if (ret)
+               return ret;
+
+       sdhci_acpi_reset_signal_voltage_if_needed(dev);
+       return 0;
 }
 
 static int sdhci_acpi_runtime_resume(struct device *dev)
index 5827d37..e573495 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 
 #include "sdhci-pltfm.h"
 
@@ -235,6 +236,11 @@ static const struct sdhci_ops sdhci_cdns_ops = {
        .set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
 };
 
+static const struct sdhci_pltfm_data sdhci_cdns_uniphier_pltfm_data = {
+       .ops = &sdhci_cdns_ops,
+       .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+};
+
 static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = {
        .ops = &sdhci_cdns_ops,
 };
@@ -334,6 +340,7 @@ static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc,
 static int sdhci_cdns_probe(struct platform_device *pdev)
 {
        struct sdhci_host *host;
+       const struct sdhci_pltfm_data *data;
        struct sdhci_pltfm_host *pltfm_host;
        struct sdhci_cdns_priv *priv;
        struct clk *clk;
@@ -350,8 +357,12 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       data = of_device_get_match_data(dev);
+       if (!data)
+               data = &sdhci_cdns_pltfm_data;
+
        nr_phy_params = sdhci_cdns_phy_param_count(dev->of_node);
-       host = sdhci_pltfm_init(pdev, &sdhci_cdns_pltfm_data,
+       host = sdhci_pltfm_init(pdev, data,
                                struct_size(priv, phy_params, nr_phy_params));
        if (IS_ERR(host)) {
                ret = PTR_ERR(host);
@@ -431,7 +442,10 @@ static const struct dev_pm_ops sdhci_cdns_pm_ops = {
 };
 
 static const struct of_device_id sdhci_cdns_match[] = {
-       { .compatible = "socionext,uniphier-sd4hc" },
+       {
+               .compatible = "socionext,uniphier-sd4hc",
+               .data = &sdhci_cdns_uniphier_pltfm_data,
+       },
        { .compatible = "cdns,sd4hc" },
        { /* sentinel */ }
 };
index c3a160c..3955fa5 100644 (file)
@@ -1590,7 +1590,7 @@ static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
        return 0;
 }
 
-void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
+static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
 {
        struct sdhci_host *host = mmc_priv(mmc);
        unsigned long flags;
index ab2bd31..fcef5c0 100644 (file)
@@ -132,7 +132,8 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
 
        sdhci_reset(host, mask);
 
-       if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+       if ((host->mmc->caps & MMC_CAP_NONREMOVABLE)
+           || mmc_gpio_get_cd(host->mmc) >= 0)
                sdhci_at91_set_force_card_detect(host);
 
        if (priv->cal_always_on && (mask & SDHCI_RESET_ALL))
@@ -427,8 +428,11 @@ static int sdhci_at91_probe(struct platform_device *pdev)
         * detection procedure using the SDMCC_CD signal is bypassed.
         * This bit is reset when a software reset for all command is performed
         * so we need to implement our own reset function to set back this bit.
+        *
+        * WA: SAMA5D2 doesn't drive CMD if using CD GPIO line.
         */
-       if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+       if ((host->mmc->caps & MMC_CAP_NONREMOVABLE)
+           || mmc_gpio_get_cd(host->mmc) >= 0)
                sdhci_at91_set_force_card_detect(host);
 
        pm_runtime_put_autosuspend(&pdev->dev);
index 8820531..c497817 100644 (file)
@@ -1192,6 +1192,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
        if (of_find_property(dev->of_node, "dmas", NULL))
                sdhci_switch_external_dma(host, true);
 
+       /* R1B responses is required to properly manage HW busy detection. */
+       mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+
        ret = sdhci_setup_host(host);
        if (ret)
                goto err_put_sync;
index 5eea8d7..ce15a05 100644 (file)
@@ -262,10 +262,26 @@ static int gl9750_execute_tuning(struct sdhci_host *host, u32 opcode)
        return 0;
 }
 
+static void gli_pcie_enable_msi(struct sdhci_pci_slot *slot)
+{
+       int ret;
+
+       ret = pci_alloc_irq_vectors(slot->chip->pdev, 1, 1,
+                                   PCI_IRQ_MSI | PCI_IRQ_MSIX);
+       if (ret < 0) {
+               pr_warn("%s: enable PCI MSI failed, error=%d\n",
+                      mmc_hostname(slot->host->mmc), ret);
+               return;
+       }
+
+       slot->host->irq = pci_irq_vector(slot->chip->pdev, 0);
+}
+
 static int gli_probe_slot_gl9750(struct sdhci_pci_slot *slot)
 {
        struct sdhci_host *host = slot->host;
 
+       gli_pcie_enable_msi(slot);
        slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
        sdhci_enable_v4_mode(host);
 
@@ -276,6 +292,7 @@ static int gli_probe_slot_gl9755(struct sdhci_pci_slot *slot)
 {
        struct sdhci_host *host = slot->host;
 
+       gli_pcie_enable_msi(slot);
        slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
        sdhci_enable_v4_mode(host);
 
index 403ac44..a25c3a4 100644 (file)
@@ -1552,6 +1552,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
        if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
                host->mmc->caps |= MMC_CAP_1_8V_DDR;
 
+       /* R1B responses is required to properly manage HW busy detection. */
+       host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+
        tegra_sdhci_parse_dt(host);
 
        tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
index 25a8f93..db8884a 100644 (file)
@@ -149,6 +149,7 @@ config NET_FC
 config IFB
        tristate "Intermediate Functional Block support"
        depends on NET_CLS_ACT
+       select NET_REDIRECT
        ---help---
          This is an intermediate driver that allows sharing of
          resources.
index 1cc2cd8..c816985 100644 (file)
@@ -50,11 +50,6 @@ struct arp_pkt {
 };
 #pragma pack()
 
-static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
-{
-       return (struct arp_pkt *)skb_network_header(skb);
-}
-
 /* Forward declaration */
 static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
                                      bool strict_match);
@@ -553,10 +548,11 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
        spin_unlock(&bond->mode_lock);
 }
 
-static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond)
+static struct slave *rlb_choose_channel(struct sk_buff *skb,
+                                       struct bonding *bond,
+                                       const struct arp_pkt *arp)
 {
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
-       struct arp_pkt *arp = arp_pkt(skb);
        struct slave *assigned_slave, *curr_active_slave;
        struct rlb_client_info *client_info;
        u32 hash_index = 0;
@@ -653,8 +649,12 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
  */
 static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
 {
-       struct arp_pkt *arp = arp_pkt(skb);
        struct slave *tx_slave = NULL;
+       struct arp_pkt *arp;
+
+       if (!pskb_network_may_pull(skb, sizeof(*arp)))
+               return NULL;
+       arp = (struct arp_pkt *)skb_network_header(skb);
 
        /* Don't modify or load balance ARPs that do not originate locally
         * (e.g.,arrive via a bridge).
@@ -664,7 +664,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
 
        if (arp->op_code == htons(ARPOP_REPLY)) {
                /* the arp must be sent on the selected rx channel */
-               tx_slave = rlb_choose_channel(skb, bond);
+               tx_slave = rlb_choose_channel(skb, bond, arp);
                if (tx_slave)
                        bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr,
                                          tx_slave->dev->addr_len);
@@ -676,7 +676,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
                 * When the arp reply is received the entry will be updated
                 * with the correct unicast address of the client.
                 */
-               tx_slave = rlb_choose_channel(skb, bond);
+               tx_slave = rlb_choose_channel(skb, bond, arp);
 
                /* The ARP reply packets must be delayed so that
                 * they can cancel out the influence of the ARP request.
index 8e81bdf..63f2548 100644 (file)
@@ -141,29 +141,29 @@ static ssize_t dbgfs_state(struct file *file, char __user *user_buf,
                return 0;
 
        /* Print out debug information. */
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "CAIF SPI debug information:\n");
-
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR);
-
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "STATE: %d\n", cfspi->dbg_state);
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Previous CMD: 0x%x\n", cfspi->pcmd);
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Current CMD: 0x%x\n", cfspi->cmd);
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Previous TX len: %d\n", cfspi->tx_ppck_len);
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Previous RX len: %d\n", cfspi->rx_ppck_len);
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Current TX len: %d\n", cfspi->tx_cpck_len);
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Current RX len: %d\n", cfspi->rx_cpck_len);
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Next TX len: %d\n", cfspi->tx_npck_len);
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Next RX len: %d\n", cfspi->rx_npck_len);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "CAIF SPI debug information:\n");
+
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR);
+
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "STATE: %d\n", cfspi->dbg_state);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Previous CMD: 0x%x\n", cfspi->pcmd);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Current CMD: 0x%x\n", cfspi->cmd);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Previous TX len: %d\n", cfspi->tx_ppck_len);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Previous RX len: %d\n", cfspi->rx_ppck_len);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Current TX len: %d\n", cfspi->tx_cpck_len);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Current RX len: %d\n", cfspi->rx_cpck_len);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Next TX len: %d\n", cfspi->tx_npck_len);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Next RX len: %d\n", cfspi->rx_npck_len);
 
        if (len > DEBUGFS_BUF_SIZE)
                len = DEBUGFS_BUF_SIZE;
@@ -180,23 +180,23 @@ static ssize_t print_frame(char *buf, size_t size, char *frm,
        int len = 0;
        int i;
        for (i = 0; i < count; i++) {
-               len += snprintf((buf + len), (size - len),
+               len += scnprintf((buf + len), (size - len),
                                        "[0x" BYTE_HEX_FMT "]",
                                        frm[i]);
                if ((i == cut) && (count > (cut * 2))) {
                        /* Fast forward. */
                        i = count - cut;
-                       len += snprintf((buf + len), (size - len),
-                                       "--- %zu bytes skipped ---\n",
-                                       count - (cut * 2));
+                       len += scnprintf((buf + len), (size - len),
+                                        "--- %zu bytes skipped ---\n",
+                                        count - (cut * 2));
                }
 
                if ((!(i % 10)) && i) {
-                       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                                       "\n");
+                       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                                        "\n");
                }
        }
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n");
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n");
        return len;
 }
 
@@ -214,18 +214,18 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
                return 0;
 
        /* Print out debug information. */
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Current frame:\n");
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Current frame:\n");
 
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Tx data (Len: %d):\n", cfspi->tx_cpck_len);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Tx data (Len: %d):\n", cfspi->tx_cpck_len);
 
        len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
                           cfspi->xfer.va_tx[0],
                           (cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
 
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Rx data (Len: %d):\n", cfspi->rx_cpck_len);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Rx data (Len: %d):\n", cfspi->rx_cpck_len);
 
        len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
                           cfspi->xfer.va_rx,
index 6ee06a4..68834a2 100644 (file)
@@ -883,6 +883,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
                                = { .len = sizeof(struct can_bittiming) },
        [IFLA_CAN_DATA_BITTIMING_CONST]
                                = { .len = sizeof(struct can_bittiming_const) },
+       [IFLA_CAN_TERMINATION]  = { .type = NLA_U16 },
 };
 
 static int can_validate(struct nlattr *tb[], struct nlattr *data[],
index 2f5c287..a366428 100644 (file)
@@ -625,7 +625,10 @@ err_free_chan:
        tty->disc_data = NULL;
        clear_bit(SLF_INUSE, &sl->flags);
        slc_free_netdev(sl->dev);
+       /* do not call free_netdev before rtnl_unlock */
+       rtnl_unlock();
        free_netdev(sl->dev);
+       return err;
 
 err_exit:
        rtnl_unlock();
index 022466c..7cbd1bd 100644 (file)
@@ -566,7 +566,7 @@ mt7530_mib_reset(struct dsa_switch *ds)
 static void
 mt7530_port_set_status(struct mt7530_priv *priv, int port, int enable)
 {
-       u32 mask = PMCR_TX_EN | PMCR_RX_EN;
+       u32 mask = PMCR_TX_EN | PMCR_RX_EN | PMCR_FORCE_LNK;
 
        if (enable)
                mt7530_set(priv, MT7530_PMCR_P(port), mask);
@@ -1444,7 +1444,7 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
        mcr_new &= ~(PMCR_FORCE_SPEED_1000 | PMCR_FORCE_SPEED_100 |
                     PMCR_FORCE_FDX | PMCR_TX_FC_EN | PMCR_RX_FC_EN);
        mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
-                  PMCR_BACKPR_EN | PMCR_FORCE_MODE | PMCR_FORCE_LNK;
+                  PMCR_BACKPR_EN | PMCR_FORCE_MODE;
 
        /* Are we connected to external phy */
        if (port == 5 && dsa_is_user_port(ds, 5))
index 8c92895..2f993e6 100644 (file)
@@ -2769,6 +2769,8 @@ static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip,
                goto unlock;
        }
 
+       occupancy &= MV88E6XXX_G2_ATU_STATS_MASK;
+
 unlock:
        mv88e6xxx_reg_unlock(chip);
 
index 0150301..8fd4830 100644 (file)
@@ -1099,6 +1099,13 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
 {
        int err, irq, virq;
 
+       chip->g2_irq.masked = ~0;
+       mv88e6xxx_reg_lock(chip);
+       err = mv88e6xxx_g2_int_mask(chip, ~chip->g2_irq.masked);
+       mv88e6xxx_reg_unlock(chip);
+       if (err)
+               return err;
+
        chip->g2_irq.domain = irq_domain_add_simple(
                chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip);
        if (!chip->g2_irq.domain)
@@ -1108,7 +1115,6 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
                irq_create_mapping(chip->g2_irq.domain, irq);
 
        chip->g2_irq.chip = mv88e6xxx_g2_irq_chip;
-       chip->g2_irq.masked = ~0;
 
        chip->device_irq = irq_find_mapping(chip->g1_irq.domain,
                                            MV88E6XXX_G1_STS_IRQ_DEVICE);
index 03ba6d2..7edea57 100644 (file)
@@ -1741,7 +1741,8 @@ static void sja1105_teardown(struct dsa_switch *ds)
                if (!dsa_is_user_port(ds, port))
                        continue;
 
-               kthread_destroy_worker(sp->xmit_worker);
+               if (sp->xmit_worker)
+                       kthread_destroy_worker(sp->xmit_worker);
        }
 
        sja1105_tas_teardown(ds);
index 0b2fd96..cada6e7 100644 (file)
@@ -1018,13 +1018,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
                struct ena_rx_buffer *rx_info;
 
                req_id = rx_ring->free_ids[next_to_use];
-               rc = validate_rx_req_id(rx_ring, req_id);
-               if (unlikely(rc < 0))
-                       break;
 
                rx_info = &rx_ring->rx_buffer_info[req_id];
 
-
                rc = ena_alloc_rx_page(rx_ring, rx_info,
                                       GFP_ATOMIC | __GFP_COMP);
                if (unlikely(rc < 0)) {
@@ -1379,9 +1375,15 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
        struct ena_rx_buffer *rx_info;
        u16 len, req_id, buf = 0;
        void *va;
+       int rc;
 
        len = ena_bufs[buf].len;
        req_id = ena_bufs[buf].req_id;
+
+       rc = validate_rx_req_id(rx_ring, req_id);
+       if (unlikely(rc < 0))
+               return NULL;
+
        rx_info = &rx_ring->rx_buffer_info[req_id];
 
        if (unlikely(!rx_info->page)) {
@@ -1454,6 +1456,11 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
                buf++;
                len = ena_bufs[buf].len;
                req_id = ena_bufs[buf].req_id;
+
+               rc = validate_rx_req_id(rx_ring, req_id);
+               if (unlikely(rc < 0))
+                       return NULL;
+
                rx_info = &rx_ring->rx_buffer_info[req_id];
        } while (1);
 
@@ -1968,7 +1975,7 @@ static int ena_enable_msix(struct ena_adapter *adapter)
        }
 
        /* Reserved the max msix vectors we might need */
-       msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_io_queues);
+       msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
        netif_dbg(adapter, probe, adapter->netdev,
                  "trying to enable MSI-X, vectors %d\n", msix_vecs);
 
@@ -2068,6 +2075,7 @@ static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
 
 static int ena_request_io_irq(struct ena_adapter *adapter)
 {
+       u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
        unsigned long flags = 0;
        struct ena_irq *irq;
        int rc = 0, i, k;
@@ -2078,7 +2086,7 @@ static int ena_request_io_irq(struct ena_adapter *adapter)
                return -EINVAL;
        }
 
-       for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
+       for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
                irq = &adapter->irq_tbl[i];
                rc = request_irq(irq->vector, irq->handler, flags, irq->name,
                                 irq->data);
@@ -2119,6 +2127,7 @@ static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
 
 static void ena_free_io_irq(struct ena_adapter *adapter)
 {
+       u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
        struct ena_irq *irq;
        int i;
 
@@ -2129,7 +2138,7 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
        }
 #endif /* CONFIG_RFS_ACCEL */
 
-       for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
+       for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
                irq = &adapter->irq_tbl[i];
                irq_set_affinity_hint(irq->vector, NULL);
                free_irq(irq->vector, irq->data);
@@ -2144,12 +2153,13 @@ static void ena_disable_msix(struct ena_adapter *adapter)
 
 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
 {
+       u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
        int i;
 
        if (!netif_running(adapter->netdev))
                return;
 
-       for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++)
+       for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++)
                synchronize_irq(adapter->irq_tbl[i].vector);
 }
 
@@ -3476,6 +3486,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
                netif_carrier_on(adapter->netdev);
 
        mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
+       adapter->last_keep_alive_jiffies = jiffies;
        dev_err(&pdev->dev,
                "Device reset completed successfully, Driver info: %s\n",
                version);
@@ -4325,13 +4336,15 @@ err_disable_device:
 
 /*****************************************************************************/
 
-/* ena_remove - Device Removal Routine
+/* __ena_shutoff - Helper used in both PCI remove/shutdown routines
  * @pdev: PCI device information struct
+ * @shutdown: Is it a shutdown operation? If false, means it is a removal
  *
- * ena_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device.
+ * __ena_shutoff is a helper routine that does the real work on shutdown and
+ * removal paths; the difference between those paths is with regards to whether
+ * dettach or unregister the netdevice.
  */
-static void ena_remove(struct pci_dev *pdev)
+static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
 {
        struct ena_adapter *adapter = pci_get_drvdata(pdev);
        struct ena_com_dev *ena_dev;
@@ -4350,13 +4363,17 @@ static void ena_remove(struct pci_dev *pdev)
 
        cancel_work_sync(&adapter->reset_task);
 
-       rtnl_lock();
+       rtnl_lock(); /* lock released inside the below if-else block */
        ena_destroy_device(adapter, true);
-       rtnl_unlock();
-
-       unregister_netdev(netdev);
-
-       free_netdev(netdev);
+       if (shutdown) {
+               netif_device_detach(netdev);
+               dev_close(netdev);
+               rtnl_unlock();
+       } else {
+               rtnl_unlock();
+               unregister_netdev(netdev);
+               free_netdev(netdev);
+       }
 
        ena_com_rss_destroy(ena_dev);
 
@@ -4371,6 +4388,30 @@ static void ena_remove(struct pci_dev *pdev)
        vfree(ena_dev);
 }
 
+/* ena_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ena_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+
+static void ena_remove(struct pci_dev *pdev)
+{
+       __ena_shutoff(pdev, false);
+}
+
+/* ena_shutdown - Device Shutdown Routine
+ * @pdev: PCI device information struct
+ *
+ * ena_shutdown is called by the PCI subsystem to alert the driver that
+ * a shutdown/reboot (or kexec) is happening and device must be disabled.
+ */
+
+static void ena_shutdown(struct pci_dev *pdev)
+{
+       __ena_shutoff(pdev, true);
+}
+
 #ifdef CONFIG_PM
 /* ena_suspend - PM suspend callback
  * @pdev: PCI device information struct
@@ -4420,6 +4461,7 @@ static struct pci_driver ena_pci_driver = {
        .id_table       = ena_pci_tbl,
        .probe          = ena_probe,
        .remove         = ena_remove,
+       .shutdown       = ena_shutdown,
 #ifdef CONFIG_PM
        .suspend    = ena_suspend,
        .resume     = ena_resume,
index e0611cb..15b31cd 100644 (file)
@@ -2135,7 +2135,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
                return -ENOSPC;
 
        index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
-       if (index > RXCHK_BRCM_TAG_MAX)
+       if (index >= RXCHK_BRCM_TAG_MAX)
                return -ENOSPC;
 
        /* Location is the classification ID, and index is the position
index f9a8151..d28b406 100644 (file)
@@ -6880,12 +6880,12 @@ skip_rdma:
        }
        ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
        rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
-       if (rc)
+       if (rc) {
                netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
                           rc);
-       else
-               ctx->flags |= BNXT_CTX_FLAG_INITED;
-
+               return rc;
+       }
+       ctx->flags |= BNXT_CTX_FLAG_INITED;
        return 0;
 }
 
@@ -7406,14 +7406,22 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
                pri2cos = &resp2->pri0_cos_queue_id;
                for (i = 0; i < 8; i++) {
                        u8 queue_id = pri2cos[i];
+                       u8 queue_idx;
 
+                       /* Per port queue IDs start from 0, 10, 20, etc */
+                       queue_idx = queue_id % 10;
+                       if (queue_idx > BNXT_MAX_QUEUE) {
+                               bp->pri2cos_valid = false;
+                               goto qstats_done;
+                       }
                        for (j = 0; j < bp->max_q; j++) {
                                if (bp->q_ids[j] == queue_id)
-                                       bp->pri2cos[i] = j;
+                                       bp->pri2cos_idx[i] = queue_idx;
                        }
                }
                bp->pri2cos_valid = 1;
        }
+qstats_done:
        mutex_unlock(&bp->hwrm_cmd_lock);
        return rc;
 }
@@ -10982,13 +10990,13 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
        struct bnxt *bp = netdev_priv(dev);
 
        if (netif_running(dev))
-               bnxt_close_nic(bp, false, false);
+               bnxt_close_nic(bp, true, false);
 
        dev->mtu = new_mtu;
        bnxt_set_ring_params(bp);
 
        if (netif_running(dev))
-               return bnxt_open_nic(bp, false, false);
+               return bnxt_open_nic(bp, true, false);
 
        return 0;
 }
@@ -11669,6 +11677,10 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
                bp->rx_nr_rings++;
                bp->cp_nr_rings++;
        }
+       if (rc) {
+               bp->tx_nr_rings = 0;
+               bp->rx_nr_rings = 0;
+       }
        return rc;
 }
 
@@ -11962,12 +11974,12 @@ init_err_pci_clean:
        bnxt_hwrm_func_drv_unrgtr(bp);
        bnxt_free_hwrm_short_cmd_req(bp);
        bnxt_free_hwrm_resources(bp);
-       bnxt_free_ctx_mem(bp);
-       kfree(bp->ctx);
-       bp->ctx = NULL;
        kfree(bp->fw_health);
        bp->fw_health = NULL;
        bnxt_cleanup_pci(bp);
+       bnxt_free_ctx_mem(bp);
+       kfree(bp->ctx);
+       bp->ctx = NULL;
 
 init_err_free:
        free_netdev(dev);
index cabef0b..63b1706 100644 (file)
@@ -1716,7 +1716,7 @@ struct bnxt {
        u16                     fw_rx_stats_ext_size;
        u16                     fw_tx_stats_ext_size;
        u16                     hw_ring_stats_size;
-       u8                      pri2cos[8];
+       u8                      pri2cos_idx[8];
        u8                      pri2cos_valid;
 
        u16                     hwrm_max_req_len;
index fb6f30d..b1511bc 100644 (file)
@@ -479,24 +479,26 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
 {
        struct bnxt *bp = netdev_priv(dev);
        struct ieee_ets *my_ets = bp->ieee_ets;
+       int rc;
 
        ets->ets_cap = bp->max_tc;
 
        if (!my_ets) {
-               int rc;
-
                if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
                        return 0;
 
                my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
                if (!my_ets)
-                       return 0;
+                       return -ENOMEM;
                rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
                if (rc)
-                       return 0;
+                       goto error;
                rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
                if (rc)
-                       return 0;
+                       goto error;
+
+               /* cache result */
+               bp->ieee_ets = my_ets;
        }
 
        ets->cbs = my_ets->cbs;
@@ -505,6 +507,9 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
        memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
        memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
        return 0;
+error:
+       kfree(my_ets);
+       return rc;
 }
 
 static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
index e8fc167..3f8a1de 100644 (file)
@@ -589,25 +589,25 @@ skip_ring_stats:
                if (bp->pri2cos_valid) {
                        for (i = 0; i < 8; i++, j++) {
                                long n = bnxt_rx_bytes_pri_arr[i].base_off +
-                                        bp->pri2cos[i];
+                                        bp->pri2cos_idx[i];
 
                                buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
                        }
                        for (i = 0; i < 8; i++, j++) {
                                long n = bnxt_rx_pkts_pri_arr[i].base_off +
-                                        bp->pri2cos[i];
+                                        bp->pri2cos_idx[i];
 
                                buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
                        }
                        for (i = 0; i < 8; i++, j++) {
                                long n = bnxt_tx_bytes_pri_arr[i].base_off +
-                                        bp->pri2cos[i];
+                                        bp->pri2cos_idx[i];
 
                                buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
                        }
                        for (i = 0; i < 8; i++, j++) {
                                long n = bnxt_tx_pkts_pri_arr[i].base_off +
-                                        bp->pri2cos[i];
+                                        bp->pri2cos_idx[i];
 
                                buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
                        }
@@ -2007,8 +2007,8 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
        struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
        struct hwrm_nvm_install_update_input install = {0};
        const struct firmware *fw;
-       int rc, hwrm_err = 0;
        u32 item_len;
+       int rc = 0;
        u16 index;
 
        bnxt_hwrm_fw_set_time(bp);
@@ -2052,15 +2052,14 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
                        memcpy(kmem, fw->data, fw->size);
                        modify.host_src_addr = cpu_to_le64(dma_handle);
 
-                       hwrm_err = hwrm_send_message(bp, &modify,
-                                                    sizeof(modify),
-                                                    FLASH_PACKAGE_TIMEOUT);
+                       rc = hwrm_send_message(bp, &modify, sizeof(modify),
+                                              FLASH_PACKAGE_TIMEOUT);
                        dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
                                          dma_handle);
                }
        }
        release_firmware(fw);
-       if (rc || hwrm_err)
+       if (rc)
                goto err_exit;
 
        if ((install_type & 0xffff) == 0)
@@ -2069,20 +2068,19 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
        install.install_type = cpu_to_le32(install_type);
 
        mutex_lock(&bp->hwrm_cmd_lock);
-       hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
-                                     INSTALL_PACKAGE_TIMEOUT);
-       if (hwrm_err) {
+       rc = _hwrm_send_message(bp, &install, sizeof(install),
+                               INSTALL_PACKAGE_TIMEOUT);
+       if (rc) {
                u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
 
                if (resp->error_code && error_code ==
                    NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
                        install.flags |= cpu_to_le16(
                               NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
-                       hwrm_err = _hwrm_send_message(bp, &install,
-                                                     sizeof(install),
-                                                     INSTALL_PACKAGE_TIMEOUT);
+                       rc = _hwrm_send_message(bp, &install, sizeof(install),
+                                               INSTALL_PACKAGE_TIMEOUT);
                }
-               if (hwrm_err)
+               if (rc)
                        goto flash_pkg_exit;
        }
 
@@ -2094,7 +2092,7 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
 flash_pkg_exit:
        mutex_unlock(&bp->hwrm_cmd_lock);
 err_exit:
-       if (hwrm_err == -EACCES)
+       if (rc == -EACCES)
                bnxt_print_admin_err(bp);
        return rc;
 }
index e50a153..1d678be 100644 (file)
@@ -94,12 +94,6 @@ static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
        bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS);
 }
 
-static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
-                                           void __iomem *d)
-{
-       return bcmgenet_readl(d + DMA_DESC_LENGTH_STATUS);
-}
-
 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
                                    void __iomem *d,
                                    dma_addr_t addr)
@@ -508,61 +502,6 @@ static int bcmgenet_set_link_ksettings(struct net_device *dev,
        return phy_ethtool_ksettings_set(dev->phydev, cmd);
 }
 
-static void bcmgenet_set_rx_csum(struct net_device *dev,
-                                netdev_features_t wanted)
-{
-       struct bcmgenet_priv *priv = netdev_priv(dev);
-       u32 rbuf_chk_ctrl;
-       bool rx_csum_en;
-
-       rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
-
-       rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
-
-       /* enable rx checksumming */
-       if (rx_csum_en)
-               rbuf_chk_ctrl |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
-       else
-               rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
-       priv->desc_rxchk_en = rx_csum_en;
-
-       /* If UniMAC forwards CRC, we need to skip over it to get
-        * a valid CHK bit to be set in the per-packet status word
-       */
-       if (rx_csum_en && priv->crc_fwd_en)
-               rbuf_chk_ctrl |= RBUF_SKIP_FCS;
-       else
-               rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
-
-       bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
-}
-
-static void bcmgenet_set_tx_csum(struct net_device *dev,
-                                netdev_features_t wanted)
-{
-       struct bcmgenet_priv *priv = netdev_priv(dev);
-       bool desc_64b_en;
-       u32 tbuf_ctrl, rbuf_ctrl;
-
-       tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
-       rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
-
-       desc_64b_en = !!(wanted & NETIF_F_HW_CSUM);
-
-       /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
-       if (desc_64b_en) {
-               tbuf_ctrl |= RBUF_64B_EN;
-               rbuf_ctrl |= RBUF_64B_EN;
-       } else {
-               tbuf_ctrl &= ~RBUF_64B_EN;
-               rbuf_ctrl &= ~RBUF_64B_EN;
-       }
-       priv->desc_64b_en = desc_64b_en;
-
-       bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
-       bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
-}
-
 static int bcmgenet_set_features(struct net_device *dev,
                                 netdev_features_t features)
 {
@@ -578,9 +517,6 @@ static int bcmgenet_set_features(struct net_device *dev,
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
        priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
 
-       bcmgenet_set_tx_csum(dev, features);
-       bcmgenet_set_rx_csum(dev, features);
-
        clk_disable_unprepare(priv->clk);
 
        return ret;
@@ -1475,8 +1411,8 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
 /* Reallocate the SKB to put enough headroom in front of it and insert
  * the transmit checksum offsets in the descriptors
  */
-static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
-                                           struct sk_buff *skb)
+static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
+                                       struct sk_buff *skb)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct status_64 *status = NULL;
@@ -1590,13 +1526,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
         */
        GENET_CB(skb)->bytes_sent = skb->len;
 
-       /* set the SKB transmit checksum */
-       if (priv->desc_64b_en) {
-               skb = bcmgenet_put_tx_csum(dev, skb);
-               if (!skb) {
-                       ret = NETDEV_TX_OK;
-                       goto out;
-               }
+       /* add the Transmit Status Block */
+       skb = bcmgenet_add_tsb(dev, skb);
+       if (!skb) {
+               ret = NETDEV_TX_OK;
+               goto out;
        }
 
        for (i = 0; i <= nr_frags; i++) {
@@ -1775,6 +1709,9 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
 
        while ((rxpktprocessed < rxpkttoprocess) &&
               (rxpktprocessed < budget)) {
+               struct status_64 *status;
+               __be16 rx_csum;
+
                cb = &priv->rx_cbs[ring->read_ptr];
                skb = bcmgenet_rx_refill(priv, cb);
 
@@ -1783,20 +1720,12 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
                        goto next;
                }
 
-               if (!priv->desc_64b_en) {
-                       dma_length_status =
-                               dmadesc_get_length_status(priv, cb->bd_addr);
-               } else {
-                       struct status_64 *status;
-                       __be16 rx_csum;
-
-                       status = (struct status_64 *)skb->data;
-                       dma_length_status = status->length_status;
+               status = (struct status_64 *)skb->data;
+               dma_length_status = status->length_status;
+               if (dev->features & NETIF_F_RXCSUM) {
                        rx_csum = (__force __be16)(status->rx_csum & 0xffff);
-                       if (priv->desc_rxchk_en) {
-                               skb->csum = (__force __wsum)ntohs(rx_csum);
-                               skb->ip_summed = CHECKSUM_COMPLETE;
-                       }
+                       skb->csum = (__force __wsum)ntohs(rx_csum);
+                       skb->ip_summed = CHECKSUM_COMPLETE;
                }
 
                /* DMA flags and length are still valid no matter how
@@ -1840,14 +1769,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
                } /* error packet */
 
                skb_put(skb, len);
-               if (priv->desc_64b_en) {
-                       skb_pull(skb, 64);
-                       len -= 64;
-               }
 
-               /* remove hardware 2bytes added for IP alignment */
-               skb_pull(skb, 2);
-               len -= 2;
+               /* remove RSB and hardware 2bytes added for IP alignment */
+               skb_pull(skb, 66);
+               len -= 66;
 
                if (priv->crc_fwd_en) {
                        skb_trim(skb, len - ETH_FCS_LEN);
@@ -1965,6 +1890,8 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
        u32 reg;
 
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       if (reg & CMD_SW_RESET)
+               return;
        if (enable)
                reg |= mask;
        else
@@ -1984,11 +1911,9 @@ static void reset_umac(struct bcmgenet_priv *priv)
        bcmgenet_rbuf_ctrl_set(priv, 0);
        udelay(10);
 
-       /* disable MAC while updating its registers */
-       bcmgenet_umac_writel(priv, 0, UMAC_CMD);
-
-       /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
-       bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD);
+       /* issue soft reset and disable MAC while updating its registers */
+       bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
+       udelay(2);
 }
 
 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
@@ -2038,11 +1963,28 @@ static void init_umac(struct bcmgenet_priv *priv)
 
        bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
 
-       /* init rx registers, enable ip header optimization */
+       /* init tx registers, enable TSB */
+       reg = bcmgenet_tbuf_ctrl_get(priv);
+       reg |= TBUF_64B_EN;
+       bcmgenet_tbuf_ctrl_set(priv, reg);
+
+       /* init rx registers, enable ip header optimization and RSB */
        reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
-       reg |= RBUF_ALIGN_2B;
+       reg |= RBUF_ALIGN_2B | RBUF_64B_EN;
        bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
 
+       /* enable rx checksumming */
+       reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
+       reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
+       /* If UniMAC forwards CRC, we need to skip over it to get
+        * a valid CHK bit to be set in the per-packet status word
+        */
+       if (priv->crc_fwd_en)
+               reg |= RBUF_SKIP_FCS;
+       else
+               reg &= ~RBUF_SKIP_FCS;
+       bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL);
+
        if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
                bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
 
index 61a6fe9..daf8fb2 100644 (file)
@@ -273,6 +273,7 @@ struct bcmgenet_mib_counters {
 #define  RBUF_FLTR_LEN_SHIFT           8
 
 #define TBUF_CTRL                      0x00
+#define  TBUF_64B_EN                   (1 << 0)
 #define TBUF_BP_MC                     0x0C
 #define TBUF_ENERGY_CTRL               0x14
 #define  TBUF_EEE_EN                   (1 << 0)
@@ -662,8 +663,6 @@ struct bcmgenet_priv {
        unsigned int irq0_stat;
 
        /* HW descriptors/checksum variables */
-       bool desc_64b_en;
-       bool desc_rxchk_en;
        bool crc_fwd_en;
 
        u32 dma_max_burst_length;
index ea20d94..c9a4369 100644 (file)
@@ -132,8 +132,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
                return -EINVAL;
        }
 
-       /* disable RX */
+       /* Can't suspend with WoL if MAC is still in reset */
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       if (reg & CMD_SW_RESET)
+               reg &= ~CMD_SW_RESET;
+
+       /* disable RX */
        reg &= ~CMD_RX_EN;
        bcmgenet_umac_writel(priv, reg, UMAC_CMD);
        mdelay(10);
index 1024494..b5930f8 100644 (file)
@@ -95,6 +95,12 @@ void bcmgenet_mii_setup(struct net_device *dev)
                               CMD_HD_EN |
                               CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
                reg |= cmd_bits;
+               if (reg & CMD_SW_RESET) {
+                       reg &= ~CMD_SW_RESET;
+                       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+                       udelay(2);
+                       reg |= CMD_TX_EN | CMD_RX_EN;
+               }
                bcmgenet_umac_writel(priv, reg, UMAC_CMD);
        } else {
                /* done if nothing has changed */
@@ -181,38 +187,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
        const char *phy_name = NULL;
        u32 id_mode_dis = 0;
        u32 port_ctrl;
-       int bmcr = -1;
-       int ret;
        u32 reg;
 
-       /* MAC clocking workaround during reset of umac state machines */
-       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
-       if (reg & CMD_SW_RESET) {
-               /* An MII PHY must be isolated to prevent TXC contention */
-               if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
-                       ret = phy_read(phydev, MII_BMCR);
-                       if (ret >= 0) {
-                               bmcr = ret;
-                               ret = phy_write(phydev, MII_BMCR,
-                                               bmcr | BMCR_ISOLATE);
-                       }
-                       if (ret) {
-                               netdev_err(dev, "failed to isolate PHY\n");
-                               return ret;
-                       }
-               }
-               /* Switch MAC clocking to RGMII generated clock */
-               bcmgenet_sys_writel(priv, PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
-               /* Ensure 5 clks with Rx disabled
-                * followed by 5 clks with Reset asserted
-                */
-               udelay(4);
-               reg &= ~(CMD_SW_RESET | CMD_LCL_LOOP_EN);
-               bcmgenet_umac_writel(priv, reg, UMAC_CMD);
-               /* Ensure 5 more clocks before Rx is enabled */
-               udelay(2);
-       }
-
        switch (priv->phy_interface) {
        case PHY_INTERFACE_MODE_INTERNAL:
                phy_name = "internal PHY";
@@ -282,10 +258,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
 
        bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
 
-       /* Restore the MII PHY after isolation */
-       if (bmcr >= 0)
-               phy_write(phydev, MII_BMCR, bmcr);
-
        priv->ext_phy = !priv->internal_phy &&
                        (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
 
index 2a2938b..fc05248 100644 (file)
@@ -902,7 +902,7 @@ void clear_all_filters(struct adapter *adapter)
                                adapter->tids.tid_tab[i];
 
                        if (f && (f->valid || f->pending))
-                               cxgb4_del_filter(dev, i, &f->fs);
+                               cxgb4_del_filter(dev, f->tid, &f->fs);
                }
 
                sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
@@ -910,7 +910,7 @@ void clear_all_filters(struct adapter *adapter)
                        f = (struct filter_entry *)adapter->tids.tid_tab[i];
 
                        if (f && (f->valid || f->pending))
-                               cxgb4_del_filter(dev, i, &f->fs);
+                               cxgb4_del_filter(dev, f->tid, &f->fs);
                }
        }
 }
index 649842a..97f90ed 100644 (file)
@@ -5381,12 +5381,11 @@ static inline bool is_x_10g_port(const struct link_config *lc)
 static int cfg_queues(struct adapter *adap)
 {
        u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
+       u32 i, n10g = 0, qidx = 0, n1g = 0;
+       u32 ncpus = num_online_cpus();
        u32 niqflint, neq, num_ulds;
        struct sge *s = &adap->sge;
-       u32 i, n10g = 0, qidx = 0;
-#ifndef CONFIG_CHELSIO_T4_DCB
-       int q10g = 0;
-#endif
+       u32 q10g = 0, q1g;
 
        /* Reduce memory usage in kdump environment, disable all offload. */
        if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
@@ -5424,44 +5423,50 @@ static int cfg_queues(struct adapter *adap)
                n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
 
        avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
+
+       /* We default to 1 queue per non-10G port and up to # of cores queues
+        * per 10G port.
+        */
+       if (n10g)
+               q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
+
+       n1g = adap->params.nports - n10g;
 #ifdef CONFIG_CHELSIO_T4_DCB
        /* For Data Center Bridging support we need to be able to support up
         * to 8 Traffic Priorities; each of which will be assigned to its
         * own TX Queue in order to prevent Head-Of-Line Blocking.
         */
+       q1g = 8;
        if (adap->params.nports * 8 > avail_eth_qsets) {
                dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
                        avail_eth_qsets, adap->params.nports * 8);
                return -ENOMEM;
        }
 
-       for_each_port(adap, i) {
-               struct port_info *pi = adap2pinfo(adap, i);
+       if (adap->params.nports * ncpus < avail_eth_qsets)
+               q10g = max(8U, ncpus);
+       else
+               q10g = max(8U, q10g);
 
-               pi->first_qset = qidx;
-               pi->nqsets = is_kdump_kernel() ? 1 : 8;
-               qidx += pi->nqsets;
-       }
-#else /* !CONFIG_CHELSIO_T4_DCB */
-       /* We default to 1 queue per non-10G port and up to # of cores queues
-        * per 10G port.
-        */
-       if (n10g)
-               q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
-       if (q10g > netif_get_num_default_rss_queues())
-               q10g = netif_get_num_default_rss_queues();
+       while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g))
+               q10g--;
 
-       if (is_kdump_kernel())
+#else /* !CONFIG_CHELSIO_T4_DCB */
+       q1g = 1;
+       q10g = min(q10g, ncpus);
+#endif /* !CONFIG_CHELSIO_T4_DCB */
+       if (is_kdump_kernel()) {
                q10g = 1;
+               q1g = 1;
+       }
 
        for_each_port(adap, i) {
                struct port_info *pi = adap2pinfo(adap, i);
 
                pi->first_qset = qidx;
-               pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
+               pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g;
                qidx += pi->nqsets;
        }
-#endif /* !CONFIG_CHELSIO_T4_DCB */
 
        s->ethqsets = qidx;
        s->max_ethqsets = qidx;   /* MSI-X may lower it later */
@@ -5473,7 +5478,7 @@ static int cfg_queues(struct adapter *adap)
                 * capped by the number of available cores.
                 */
                num_ulds = adap->num_uld + adap->num_ofld_uld;
-               i = min_t(u32, MAX_OFLD_QSETS, num_online_cpus());
+               i = min_t(u32, MAX_OFLD_QSETS, ncpus);
                avail_uld_qsets = roundup(i, adap->params.nports);
                if (avail_qsets < num_ulds * adap->params.nports) {
                        adap->params.offload = 0;
index 58a039c..af1f40c 100644 (file)
@@ -246,6 +246,9 @@ static int  cxgb4_ptp_fineadjtime(struct adapter *adapter, s64 delta)
                             FW_PTP_CMD_PORTID_V(0));
        c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
        c.u.ts.sc = FW_PTP_SC_ADJ_FTIME;
+       c.u.ts.sign = (delta < 0) ? 1 : 0;
+       if (delta < 0)
+               delta = -delta;
        c.u.ts.tm = cpu_to_be64(delta);
 
        err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL);
index 97cda50..cab3d17 100644 (file)
@@ -1307,8 +1307,9 @@ static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
 int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
                                 int maxreclaim)
 {
+       unsigned int reclaimed, hw_cidx;
        struct sge_txq *q = &eq->q;
-       unsigned int reclaimed;
+       int hw_in_use;
 
        if (!q->in_use || !__netif_tx_trylock(eq->txq))
                return 0;
@@ -1316,12 +1317,17 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
        /* Reclaim pending completed TX Descriptors. */
        reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
 
+       hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
+       hw_in_use = q->pidx - hw_cidx;
+       if (hw_in_use < 0)
+               hw_in_use += q->size;
+
        /* If the TX Queue is currently stopped and there's now more than half
         * the queue available, restart it.  Otherwise bail out since the rest
         * of what we want do here is with the possibility of shipping any
         * currently buffered Coalesced TX Work Request.
         */
-       if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) {
+       if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
                netif_tx_wake_queue(eq->txq);
                eq->q.restarts++;
        }
@@ -1486,16 +1492,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                 * has opened up.
                 */
                eth_txq_stop(q);
-
-               /* If we're using the SGE Doorbell Queue Timer facility, we
-                * don't need to ask the Firmware to send us Egress Queue CIDX
-                * Updates: the Hardware will do this automatically.  And
-                * since we send the Ingress Queue CIDX Updates to the
-                * corresponding Ethernet Response Queue, we'll get them very
-                * quickly.
-                */
-               if (!q->dbqt)
-                       wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+               wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
        }
 
        wr = (void *)&q->q.desc[q->q.pidx];
@@ -1805,16 +1802,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
                 * has opened up.
                 */
                eth_txq_stop(txq);
-
-               /* If we're using the SGE Doorbell Queue Timer facility, we
-                * don't need to ask the Firmware to send us Egress Queue CIDX
-                * Updates: the Hardware will do this automatically.  And
-                * since we send the Ingress Queue CIDX Updates to the
-                * corresponding Ethernet Response Queue, we'll get them very
-                * quickly.
-                */
-               if (!txq->dbqt)
-                       wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+               wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
        }
 
        /* Start filling in our Work Request.  Note that we do _not_ handle
@@ -3370,26 +3358,6 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq,
        }
 
        txq = &s->ethtxq[pi->first_qset + rspq->idx];
-
-       /* We've got the Hardware Consumer Index Update in the Egress Update
-        * message.  If we're using the SGE Doorbell Queue Timer mechanism,
-        * these Egress Update messages will be our sole CIDX Updates we get
-        * since we don't want to chew up PCIe bandwidth for both Ingress
-        * Messages and Status Page writes.  However, The code which manages
-        * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
-        * stored in the Status Page at the end of the TX Queue.  It's easiest
-        * to simply copy the CIDX Update value from the Egress Update message
-        * to the Status Page.  Also note that no Endian issues need to be
-        * considered here since both are Big Endian and we're just copying
-        * bytes consistently ...
-        */
-       if (txq->dbqt) {
-               struct cpl_sge_egr_update *egr;
-
-               egr = (struct cpl_sge_egr_update *)rsp;
-               WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
-       }
-
        t4_sge_eth_txq_egress_update(adapter, txq, -1);
 }
 
index fd93d54..ca74a68 100644 (file)
@@ -1,4 +1,5 @@
 /* Copyright 2008 - 2016 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -123,7 +124,22 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
 #define FSL_QMAN_MAX_OAL       127
 
 /* Default alignment for start of data in an Rx FD */
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+/* aligning data start to 64 avoids DMA transaction splits, unless the buffer
+ * is crossing a 4k page boundary
+ */
+#define DPAA_FD_DATA_ALIGNMENT  (fman_has_errata_a050385() ? 64 : 16)
+/* aligning to 256 avoids DMA transaction splits caused by 4k page boundary
+ * crossings; also, all SG fragments except the last must have a size multiple
+ * of 256 to avoid DMA transaction splits
+ */
+#define DPAA_A050385_ALIGN 256
+#define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \
+                                  DPAA_A050385_ALIGN : 16)
+#else
 #define DPAA_FD_DATA_ALIGNMENT  16
+#define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT
+#endif
 
 /* The DPAA requires 256 bytes reserved and mapped for the SGT */
 #define DPAA_SGT_SIZE 256
@@ -158,8 +174,13 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
 #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
 #define DPAA_TIME_STAMP_SIZE 8
 #define DPAA_HASH_RESULTS_SIZE 8
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+#define DPAA_RX_PRIV_DATA_SIZE (DPAA_A050385_ALIGN - (DPAA_PARSE_RESULTS_SIZE\
+        + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE))
+#else
 #define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
                                        dpaa_rx_extra_headroom)
+#endif
 
 #define DPAA_ETH_PCD_RXQ_NUM   128
 
@@ -180,7 +201,12 @@ static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
 
 #define DPAA_BP_RAW_SIZE 4096
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+#define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \
+                               ~(DPAA_A050385_ALIGN - 1))
+#else
 #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
+#endif
 
 static int dpaa_max_frm;
 
@@ -1192,7 +1218,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
        buf_prefix_content.pass_prs_result = true;
        buf_prefix_content.pass_hash_result = true;
        buf_prefix_content.pass_time_stamp = true;
-       buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
+       buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT;
 
        rx_p = &params.specific_params.rx_params;
        rx_p->err_fqid = errq->fqid;
@@ -1662,6 +1688,8 @@ static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
        return CHECKSUM_NONE;
 }
 
+#define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a)))
+
 /* Build a linear skb around the received buffer.
  * We are guaranteed there is enough room at the end of the data buffer to
  * accommodate the shared info area of the skb.
@@ -1733,8 +1761,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
 
                sg_addr = qm_sg_addr(&sgt[i]);
                sg_vaddr = phys_to_virt(sg_addr);
-               WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
-                                   SMP_CACHE_BYTES));
+               WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES));
 
                dma_unmap_page(priv->rx_dma_dev, sg_addr,
                               DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
@@ -2022,6 +2049,75 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
        return 0;
 }
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s)
+{
+       struct dpaa_priv *priv = netdev_priv(net_dev);
+       struct sk_buff *new_skb, *skb = *s;
+       unsigned char *start, i;
+
+       /* check linear buffer alignment */
+       if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN))
+               goto workaround;
+
+       /* linear buffers just need to have an aligned start */
+       if (!skb_is_nonlinear(skb))
+               return 0;
+
+       /* linear data size for nonlinear skbs needs to be aligned */
+       if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN))
+               goto workaround;
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               /* all fragments need to have aligned start addresses */
+               if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN))
+                       goto workaround;
+
+               /* all but last fragment need to have aligned sizes */
+               if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) &&
+                   (i < skb_shinfo(skb)->nr_frags - 1))
+                       goto workaround;
+       }
+
+       return 0;
+
+workaround:
+       /* copy all the skb content into a new linear buffer */
+       new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 +
+                                               priv->tx_headroom);
+       if (!new_skb)
+               return -ENOMEM;
+
+       /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */
+       skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD);
+
+       /* Workaround for DPAA_A050385 requires data start to be aligned */
+       start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
+       if (start - new_skb->data != 0)
+               skb_reserve(new_skb, start - new_skb->data);
+
+       skb_put(new_skb, skb->len);
+       skb_copy_bits(skb, 0, new_skb->data, skb->len);
+       skb_copy_header(new_skb, skb);
+       new_skb->dev = skb->dev;
+
+       /* We move the headroom when we align it so we have to reset the
+        * network and transport header offsets relative to the new data
+        * pointer. The checksum offload relies on these offsets.
+        */
+       skb_set_network_header(new_skb, skb_network_offset(skb));
+       skb_set_transport_header(new_skb, skb_transport_offset(skb));
+
+       /* TODO: does timestamping need the result in the old skb? */
+       dev_kfree_skb(skb);
+       *s = new_skb;
+
+       return 0;
+}
+#endif
+
 static netdev_tx_t
 dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
 {
@@ -2068,6 +2164,14 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
                nonlinear = skb_is_nonlinear(skb);
        }
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+       if (unlikely(fman_has_errata_a050385())) {
+               if (dpaa_a050385_wa(net_dev, &skb))
+                       goto enomem;
+               nonlinear = skb_is_nonlinear(skb);
+       }
+#endif
+
        if (nonlinear) {
                /* Just create a S/G fd based on the skb */
                err = skb_to_sg_fd(priv, skb, &fd);
@@ -2741,9 +2845,7 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
        headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
                DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
 
-       return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom,
-                                             DPAA_FD_DATA_ALIGNMENT) :
-                                       headroom;
+       return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
 }
 
 static int dpaa_eth_probe(struct platform_device *pdev)
index 4432a59..23c5fef 100644 (file)
@@ -2529,15 +2529,15 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
                return -EINVAL;
        }
 
-       cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
+       cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
        if (cycle > 0xFFFF) {
                dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
                return -EINVAL;
        }
 
-       cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
+       cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
        if (cycle > 0xFFFF) {
-               dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
+               dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
                return -EINVAL;
        }
 
index 0139cb9..3415018 100644 (file)
@@ -8,3 +8,31 @@ config FSL_FMAN
        help
                Freescale Data-Path Acceleration Architecture Frame Manager
                (FMan) support
+
+config DPAA_ERRATUM_A050385
+       bool
+       depends on ARM64 && FSL_DPAA
+       default y
+       help
+               DPAA FMan erratum A050385 software workaround implementation:
+               align buffers, data start, SG fragment length to avoid FMan DMA
+               splits.
+               FMAN DMA read or writes under heavy traffic load may cause FMAN
+               internal resource leak thus stopping further packet processing.
+               The FMAN internal queue can overflow when FMAN splits single
+               read or write transactions into multiple smaller transactions
+               such that more than 17 AXI transactions are in flight from FMAN
+               to interconnect. When the FMAN internal queue overflows, it can
+               stall further packet processing. The issue can occur with any
+               one of the following three conditions:
+               1. FMAN AXI transaction crosses 4K address boundary (Errata
+               A010022)
+               2. FMAN DMA address for an AXI transaction is not 16 byte
+               aligned, i.e. the last 4 bits of an address are non-zero
+               3. Scatter Gather (SG) frames have more than one SG buffer in
+               the SG list and any one of the buffers, except the last
+               buffer in the SG list has data size that is not a multiple
+               of 16 bytes, i.e., other than 16, 32, 48, 64, etc.
+               With any one of the above three conditions present, there is
+               likelihood of stalled FMAN packet processing, especially under
+               stress with multiple ports injecting line-rate traffic.
index 934111d..f151d6e 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -566,6 +567,10 @@ struct fman_cfg {
        u32 qmi_def_tnums_thresh;
 };
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+static bool fman_has_err_a050385;
+#endif
+
 static irqreturn_t fman_exceptions(struct fman *fman,
                                   enum fman_exceptions exception)
 {
@@ -2518,6 +2523,14 @@ struct fman *fman_bind(struct device *fm_dev)
 }
 EXPORT_SYMBOL(fman_bind);
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+bool fman_has_errata_a050385(void)
+{
+       return fman_has_err_a050385;
+}
+EXPORT_SYMBOL(fman_has_errata_a050385);
+#endif
+
 static irqreturn_t fman_err_irq(int irq, void *handle)
 {
        struct fman *fman = (struct fman *)handle;
@@ -2845,6 +2858,11 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
                goto fman_free;
        }
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+       fman_has_err_a050385 =
+               of_property_read_bool(fm_node, "fsl,erratum-a050385");
+#endif
+
        return fman;
 
 fman_node_put:
index 935c317..f2ede13 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -398,6 +399,10 @@ u16 fman_get_max_frm(void);
 
 int fman_get_rx_extra_headroom(void);
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+bool fman_has_errata_a050385(void);
+#endif
+
 struct fman *fman_bind(struct device *dev);
 
 #endif /* __FM_H */
index e190187..0d2b4ab 100644 (file)
@@ -782,7 +782,7 @@ int memac_adjust_link(struct fman_mac *memac, u16 speed)
        /* Set full duplex */
        tmp &= ~IF_MODE_HD;
 
-       if (memac->phy_if == PHY_INTERFACE_MODE_RGMII) {
+       if (phy_interface_mode_is_rgmii(memac->phy_if)) {
                /* Configure RGMII in manual mode */
                tmp &= ~IF_MODE_RGMII_AUTO;
                tmp &= ~IF_MODE_RGMII_SP_MASK;
index 1b03139..d87158a 100644 (file)
@@ -46,6 +46,7 @@ enum HCLGE_MBX_OPCODE {
        HCLGE_MBX_PUSH_VLAN_INFO,       /* (PF -> VF) push port base vlan */
        HCLGE_MBX_GET_MEDIA_TYPE,       /* (VF -> PF) get media type */
        HCLGE_MBX_PUSH_PROMISC_INFO,    /* (PF -> VF) push vf promisc info */
+       HCLGE_MBX_VF_UNINIT,            /* (VF -> PF) vf is unintializing */
 
        HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
        HCLGE_MBX_PUSH_LINK_STATUS,     /* (M7 -> PF) get port link status */
index acb796c..a7f40aa 100644 (file)
@@ -1711,7 +1711,7 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
        netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
 
        return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
-               kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
+               kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP;
 }
 
 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
index 492bc94..d3b0cd7 100644 (file)
@@ -2446,10 +2446,12 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
 
 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
 {
+       struct hclge_mac *mac = &hdev->hw.mac;
        int ret;
 
        duplex = hclge_check_speed_dup(duplex, speed);
-       if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
+       if (!mac->support_autoneg && mac->speed == speed &&
+           mac->duplex == duplex)
                return 0;
 
        ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
@@ -7743,16 +7745,27 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
        struct hclge_desc desc;
        int ret;
 
-       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
-
+       /* read current vlan filter parameter */
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
        req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
        req->vlan_type = vlan_type;
-       req->vlan_fe = filter_en ? fe_type : 0;
        req->vf_id = vf_id;
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "failed to get vlan filter config, ret = %d.\n", ret);
+               return ret;
+       }
+
+       /* modify and write new config parameter */
+       hclge_cmd_reuse_desc(&desc, false);
+       req->vlan_fe = filter_en ?
+                       (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
+
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret)
-               dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
+               dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
                        ret);
 
        return ret;
@@ -8270,6 +8283,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
                        kfree(vlan);
                }
        }
+       clear_bit(vport->vport_id, hdev->vf_vlan_full);
 }
 
 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
@@ -8486,6 +8500,28 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
        }
 }
 
+static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
+{
+       struct hclge_vlan_info *vlan_info;
+       struct hclge_vport *vport;
+       int ret;
+       int vf;
+
+       /* clear port base vlan for all vf */
+       for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
+               vport = &hdev->vport[vf];
+               vlan_info = &vport->port_base_vlan_cfg.vlan_info;
+
+               ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+                                              vport->vport_id,
+                                              vlan_info->vlan_tag, true);
+               if (ret)
+                       dev_err(&hdev->pdev->dev,
+                               "failed to clear vf vlan for vf%d, ret = %d\n",
+                               vf - HCLGE_VF_VPORT_START_NUM, ret);
+       }
+}
+
 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
                          u16 vlan_id, bool is_kill)
 {
@@ -9895,6 +9931,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
        struct hclge_mac *mac = &hdev->hw.mac;
 
        hclge_reset_vf_rate(hdev);
+       hclge_clear_vf_vlan(hdev);
        hclge_misc_affinity_teardown(hdev);
        hclge_state_uninit(hdev);
 
index a3c0822..3d850f6 100644 (file)
@@ -799,6 +799,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
                        hclge_get_link_mode(vport, req);
                        break;
                case HCLGE_MBX_GET_VF_FLR_STATUS:
+               case HCLGE_MBX_VF_UNINIT:
                        hclge_rm_vport_all_mac_table(vport, true,
                                                     HCLGE_MAC_ADDR_UC);
                        hclge_rm_vport_all_mac_table(vport, true,
index d659720..0510d85 100644 (file)
@@ -2803,6 +2803,9 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
 {
        hclgevf_state_uninit(hdev);
 
+       hclgevf_send_mbx_msg(hdev, HCLGE_MBX_VF_UNINIT, 0, NULL, 0,
+                            false, NULL, 0);
+
        if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
                hclgevf_misc_irq_uninit(hdev);
                hclgevf_uninit_msi(hdev);
index eb53c15..5f2d57d 100644 (file)
@@ -389,7 +389,8 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
 
        spin_unlock_bh(&cmdq->cmdq_lock);
 
-       if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) {
+       if (!wait_for_completion_timeout(&done,
+                                        msecs_to_jiffies(CMDQ_TIMEOUT))) {
                spin_lock_bh(&cmdq->cmdq_lock);
 
                if (cmdq->errcode[curr_prod_idx] == &errcode)
@@ -623,6 +624,8 @@ static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci,
        if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info)))
                return -EBUSY;
 
+       dma_rmb();
+
        errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL);
 
        cmdq_sync_cmd_handler(cmdq, ci, errcode);
index 79b3d53..c7c75b7 100644 (file)
@@ -360,50 +360,6 @@ static int wait_for_db_state(struct hinic_hwdev *hwdev)
        return -EFAULT;
 }
 
-static int wait_for_io_stopped(struct hinic_hwdev *hwdev)
-{
-       struct hinic_cmd_io_status cmd_io_status;
-       struct hinic_hwif *hwif = hwdev->hwif;
-       struct pci_dev *pdev = hwif->pdev;
-       struct hinic_pfhwdev *pfhwdev;
-       unsigned long end;
-       u16 out_size;
-       int err;
-
-       if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
-               dev_err(&pdev->dev, "Unsupported PCI Function type\n");
-               return -EINVAL;
-       }
-
-       pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
-       cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
-       end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT);
-       do {
-               err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
-                                       HINIC_COMM_CMD_IO_STATUS_GET,
-                                       &cmd_io_status, sizeof(cmd_io_status),
-                                       &cmd_io_status, &out_size,
-                                       HINIC_MGMT_MSG_SYNC);
-               if ((err) || (out_size != sizeof(cmd_io_status))) {
-                       dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n",
-                               err);
-                       return err;
-               }
-
-               if (cmd_io_status.status == IO_STOPPED) {
-                       dev_info(&pdev->dev, "IO stopped\n");
-                       return 0;
-               }
-
-               msleep(20);
-       } while (time_before(jiffies, end));
-
-       dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n");
-       return -ETIMEDOUT;
-}
-
 /**
  * clear_io_resource - set the IO resources as not active in the NIC
  * @hwdev: the NIC HW device
@@ -423,11 +379,8 @@ static int clear_io_resources(struct hinic_hwdev *hwdev)
                return -EINVAL;
        }
 
-       err = wait_for_io_stopped(hwdev);
-       if (err) {
-               dev_err(&pdev->dev, "IO has not stopped yet\n");
-               return err;
-       }
+       /* sleep 100ms to wait for firmware stopping I/O */
+       msleep(100);
 
        cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
 
index 79243b6..c0b6bcb 100644 (file)
@@ -188,7 +188,7 @@ static u8 eq_cons_idx_checksum_set(u32 val)
  * eq_update_ci - update the HW cons idx of event queue
  * @eq: the event queue to update the cons idx for
  **/
-static void eq_update_ci(struct hinic_eq *eq)
+static void eq_update_ci(struct hinic_eq *eq, u32 arm_state)
 {
        u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq);
 
@@ -202,7 +202,7 @@ static void eq_update_ci(struct hinic_eq *eq)
 
        val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX)    |
               HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) |
-              HINIC_EQ_CI_SET(EQ_ARMED, INT_ARMED);
+              HINIC_EQ_CI_SET(arm_state, INT_ARMED);
 
        val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
 
@@ -235,6 +235,8 @@ static void aeq_irq_handler(struct hinic_eq *eq)
                if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
                        break;
 
+               dma_rmb();
+
                event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
                if (event >= HINIC_MAX_AEQ_EVENTS) {
                        dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event);
@@ -347,7 +349,7 @@ static void eq_irq_handler(void *data)
        else if (eq->type == HINIC_CEQ)
                ceq_irq_handler(eq);
 
-       eq_update_ci(eq);
+       eq_update_ci(eq, EQ_ARMED);
 }
 
 /**
@@ -702,7 +704,7 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
        }
 
        set_eq_ctrls(eq);
-       eq_update_ci(eq);
+       eq_update_ci(eq, EQ_ARMED);
 
        err = alloc_eq_pages(eq);
        if (err) {
@@ -752,18 +754,28 @@ err_req_irq:
  **/
 static void remove_eq(struct hinic_eq *eq)
 {
-       struct msix_entry *entry = &eq->msix_entry;
-
-       free_irq(entry->vector, eq);
+       hinic_set_msix_state(eq->hwif, eq->msix_entry.entry,
+                            HINIC_MSIX_DISABLE);
+       free_irq(eq->msix_entry.vector, eq);
 
        if (eq->type == HINIC_AEQ) {
                struct hinic_eq_work *aeq_work = &eq->aeq_work;
 
                cancel_work_sync(&aeq_work->work);
+               /* clear aeq_len to avoid hw access host memory */
+               hinic_hwif_write_reg(eq->hwif,
+                                    HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
        } else if (eq->type == HINIC_CEQ) {
                tasklet_kill(&eq->ceq_tasklet);
+               /* clear ceq_len to avoid hw access host memory */
+               hinic_hwif_write_reg(eq->hwif,
+                                    HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id), 0);
        }
 
+       /* update cons_idx to avoid invalid interrupt */
+       eq->cons_idx = hinic_hwif_read_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq));
+       eq_update_ci(eq, EQ_NOT_ARMED);
+
        free_eq_pages(eq);
 }
 
index c1a6be6..8995e32 100644 (file)
@@ -43,7 +43,7 @@
 
 #define MSG_NOT_RESP                    0xFFFF
 
-#define MGMT_MSG_TIMEOUT                1000
+#define MGMT_MSG_TIMEOUT                5000
 
 #define mgmt_to_pfhwdev(pf_mgmt)        \
                container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
@@ -267,7 +267,8 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
                goto unlock_sync_msg;
        }
 
-       if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) {
+       if (!wait_for_completion_timeout(recv_done,
+                                        msecs_to_jiffies(MGMT_MSG_TIMEOUT))) {
                dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
                err = -ETIMEDOUT;
                goto unlock_sync_msg;
index 2695ad6..815649e 100644 (file)
@@ -350,6 +350,9 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
                if (!rq_wqe)
                        break;
 
+               /* make sure we read rx_done before packet length */
+               dma_rmb();
+
                cqe = rq->cqe[ci];
                status =  be32_to_cpu(cqe->status);
                hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
index 0e13d1c..3650164 100644 (file)
@@ -45,7 +45,7 @@
 
 #define HW_CONS_IDX(sq)                 be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
 
-#define MIN_SKB_LEN                     17
+#define MIN_SKB_LEN                    32
 
 #define        MAX_PAYLOAD_OFFSET              221
 #define TRANSPORT_OFFSET(l4_hdr, skb)  ((u32)((l4_hdr) - (skb)->data))
@@ -622,6 +622,8 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
        do {
                hw_ci = HW_CONS_IDX(sq) & wq->mask;
 
+               dma_rmb();
+
                /* Reading a WQEBB to get real WQE size and consumer index. */
                sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
                if ((!sq_wqe) ||
index c75239d..4bd3324 100644 (file)
@@ -2142,6 +2142,8 @@ static void __ibmvnic_reset(struct work_struct *work)
 {
        struct ibmvnic_rwi *rwi;
        struct ibmvnic_adapter *adapter;
+       bool saved_state = false;
+       unsigned long flags;
        u32 reset_state;
        int rc = 0;
 
@@ -2153,17 +2155,25 @@ static void __ibmvnic_reset(struct work_struct *work)
                return;
        }
 
-       reset_state = adapter->state;
-
        rwi = get_next_rwi(adapter);
        while (rwi) {
+               spin_lock_irqsave(&adapter->state_lock, flags);
+
                if (adapter->state == VNIC_REMOVING ||
                    adapter->state == VNIC_REMOVED) {
+                       spin_unlock_irqrestore(&adapter->state_lock, flags);
                        kfree(rwi);
                        rc = EBUSY;
                        break;
                }
 
+               if (!saved_state) {
+                       reset_state = adapter->state;
+                       adapter->state = VNIC_RESETTING;
+                       saved_state = true;
+               }
+               spin_unlock_irqrestore(&adapter->state_lock, flags);
+
                if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
                        /* CHANGE_PARAM requestor holds rtnl_lock */
                        rc = do_change_param_reset(adapter, rwi, reset_state);
@@ -5091,6 +5101,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
                          __ibmvnic_delayed_reset);
        INIT_LIST_HEAD(&adapter->rwi_list);
        spin_lock_init(&adapter->rwi_lock);
+       spin_lock_init(&adapter->state_lock);
        mutex_init(&adapter->fw_lock);
        init_completion(&adapter->init_done);
        init_completion(&adapter->fw_done);
@@ -5163,8 +5174,17 @@ static int ibmvnic_remove(struct vio_dev *dev)
 {
        struct net_device *netdev = dev_get_drvdata(&dev->dev);
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&adapter->state_lock, flags);
+       if (adapter->state == VNIC_RESETTING) {
+               spin_unlock_irqrestore(&adapter->state_lock, flags);
+               return -EBUSY;
+       }
 
        adapter->state = VNIC_REMOVING;
+       spin_unlock_irqrestore(&adapter->state_lock, flags);
+
        rtnl_lock();
        unregister_netdevice(netdev);
 
index 60eccaf..f8416e1 100644 (file)
@@ -941,7 +941,8 @@ enum vnic_state {VNIC_PROBING = 1,
                 VNIC_CLOSING,
                 VNIC_CLOSED,
                 VNIC_REMOVING,
-                VNIC_REMOVED};
+                VNIC_REMOVED,
+                VNIC_RESETTING};
 
 enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
                           VNIC_RESET_MOBILITY,
@@ -1090,4 +1091,7 @@ struct ibmvnic_adapter {
 
        struct ibmvnic_tunables desired;
        struct ibmvnic_tunables fallback;
+
+       /* Used for serializatin of state field */
+       spinlock_t state_lock;
 };
index 0b9e851..d14762d 100644 (file)
@@ -347,7 +347,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
        }
 
 
-       dev->err_interrupt = platform_get_irq(pdev, 0);
+       dev->err_interrupt = platform_get_irq_optional(pdev, 0);
        if (dev->err_interrupt > 0 &&
            resource_size(r) < MVMDIO_ERR_INT_MASK + 4) {
                dev_err(&pdev->dev,
index 98017e7..11babc7 100644 (file)
@@ -3036,11 +3036,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
        /* For the case where the last mvneta_poll did not process all
         * RX packets
         */
-       rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
-
        cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
                port->cause_rx_tx;
 
+       rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
        if (rx_queue) {
                rx_queue = rx_queue - 1;
                if (pp->bm_priv)
index 9c48182..9486cae 100644 (file)
@@ -906,59 +906,59 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
        int len = 0;
 
        mlx4_err(dev, "%s", str);
-       len += snprintf(buf + len, BUF_SIZE - len,
-                       "port = %d prio = 0x%x qp = 0x%x ",
-                       rule->port, rule->priority, rule->qpn);
+       len += scnprintf(buf + len, BUF_SIZE - len,
+                        "port = %d prio = 0x%x qp = 0x%x ",
+                        rule->port, rule->priority, rule->qpn);
 
        list_for_each_entry(cur, &rule->list, list) {
                switch (cur->id) {
                case MLX4_NET_TRANS_RULE_ID_ETH:
-                       len += snprintf(buf + len, BUF_SIZE - len,
-                                       "dmac = %pM ", &cur->eth.dst_mac);
+                       len += scnprintf(buf + len, BUF_SIZE - len,
+                                        "dmac = %pM ", &cur->eth.dst_mac);
                        if (cur->eth.ether_type)
-                               len += snprintf(buf + len, BUF_SIZE - len,
-                                               "ethertype = 0x%x ",
-                                               be16_to_cpu(cur->eth.ether_type));
+                               len += scnprintf(buf + len, BUF_SIZE - len,
+                                                "ethertype = 0x%x ",
+                                                be16_to_cpu(cur->eth.ether_type));
                        if (cur->eth.vlan_id)
-                               len += snprintf(buf + len, BUF_SIZE - len,
-                                               "vlan-id = %d ",
-                                               be16_to_cpu(cur->eth.vlan_id));
+                               len += scnprintf(buf + len, BUF_SIZE - len,
+                                                "vlan-id = %d ",
+                                                be16_to_cpu(cur->eth.vlan_id));
                        break;
 
                case MLX4_NET_TRANS_RULE_ID_IPV4:
                        if (cur->ipv4.src_ip)
-                               len += snprintf(buf + len, BUF_SIZE - len,
-                                               "src-ip = %pI4 ",
-                                               &cur->ipv4.src_ip);
+                               len += scnprintf(buf + len, BUF_SIZE - len,
+                                                "src-ip = %pI4 ",
+                                                &cur->ipv4.src_ip);
                        if (cur->ipv4.dst_ip)
-                               len += snprintf(buf + len, BUF_SIZE - len,
-                                               "dst-ip = %pI4 ",
-                                               &cur->ipv4.dst_ip);
+                               len += scnprintf(buf + len, BUF_SIZE - len,
+                                                "dst-ip = %pI4 ",
+                                                &cur->ipv4.dst_ip);
                        break;
 
                case MLX4_NET_TRANS_RULE_ID_TCP:
                case MLX4_NET_TRANS_RULE_ID_UDP:
                        if (cur->tcp_udp.src_port)
-                               len += snprintf(buf + len, BUF_SIZE - len,
-                                               "src-port = %d ",
-                                               be16_to_cpu(cur->tcp_udp.src_port));
+                               len += scnprintf(buf + len, BUF_SIZE - len,
+                                                "src-port = %d ",
+                                                be16_to_cpu(cur->tcp_udp.src_port));
                        if (cur->tcp_udp.dst_port)
-                               len += snprintf(buf + len, BUF_SIZE - len,
-                                               "dst-port = %d ",
-                                               be16_to_cpu(cur->tcp_udp.dst_port));
+                               len += scnprintf(buf + len, BUF_SIZE - len,
+                                                "dst-port = %d ",
+                                                be16_to_cpu(cur->tcp_udp.dst_port));
                        break;
 
                case MLX4_NET_TRANS_RULE_ID_IB:
-                       len += snprintf(buf + len, BUF_SIZE - len,
-                                       "dst-gid = %pI6\n", cur->ib.dst_gid);
-                       len += snprintf(buf + len, BUF_SIZE - len,
-                                       "dst-gid-mask = %pI6\n",
-                                       cur->ib.dst_gid_msk);
+                       len += scnprintf(buf + len, BUF_SIZE - len,
+                                        "dst-gid = %pI6\n", cur->ib.dst_gid);
+                       len += scnprintf(buf + len, BUF_SIZE - len,
+                                        "dst-gid-mask = %pI6\n",
+                                        cur->ib.dst_gid_msk);
                        break;
 
                case MLX4_NET_TRANS_RULE_ID_VXLAN:
-                       len += snprintf(buf + len, BUF_SIZE - len,
-                                       "VNID = %d ", be32_to_cpu(cur->vxlan.vni));
+                       len += scnprintf(buf + len, BUF_SIZE - len,
+                                        "VNID = %d ", be32_to_cpu(cur->vxlan.vni));
                        break;
                case MLX4_NET_TRANS_RULE_ID_IPV6:
                        break;
@@ -967,7 +967,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
                        break;
                }
        }
-       len += snprintf(buf + len, BUF_SIZE - len, "\n");
+       len += scnprintf(buf + len, BUF_SIZE - len, "\n");
        mlx4_err(dev, "%s", buf);
 
        if (len >= BUF_SIZE)
index 220ef9f..c9606b8 100644 (file)
@@ -371,6 +371,7 @@ enum {
 
 struct mlx5e_sq_wqe_info {
        u8  opcode;
+       u8 num_wqebbs;
 
        /* Auxiliary data for different opcodes. */
        union {
@@ -1059,6 +1060,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
 void mlx5e_activate_rq(struct mlx5e_rq *rq);
 void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
 void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
+void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
 void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
 void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
 
index d3693fa..e54f70d 100644 (file)
@@ -10,8 +10,7 @@
 
 static inline bool cqe_syndrome_needs_recover(u8 syndrome)
 {
-       return syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR ||
-              syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
+       return syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
               syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR ||
               syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
 }
index 6c72b59..a01e2de 100644 (file)
@@ -90,7 +90,7 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
                goto out;
 
        mlx5e_reset_icosq_cc_pc(icosq);
-       mlx5e_free_rx_descs(rq);
+       mlx5e_free_rx_in_progress_descs(rq);
        clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
        mlx5e_activate_icosq(icosq);
        mlx5e_activate_rq(rq);
index a226277..f07b139 100644 (file)
@@ -181,10 +181,12 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
 
 static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
 {
-       if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+       if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
                mlx5_wq_ll_reset(&rq->mpwqe.wq);
-       else
+               rq->mpwqe.actual_wq_head = 0;
+       } else {
                mlx5_wq_cyc_reset(&rq->wqe.wq);
+       }
 }
 
 /* SW parser related functions */
index a3efa29..63116be 100644 (file)
@@ -38,8 +38,8 @@ enum {
 
 enum {
        MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START     = 0,
-       MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 1,
-       MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING  = 2,
+       MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING  = 1,
+       MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 2,
 };
 
 struct mlx5e_ktls_offload_context_tx {
index f260dd9..52a5662 100644 (file)
@@ -218,7 +218,7 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
         *    this packet was already acknowledged and its record info
         *    was released.
         */
-       ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
+       ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
 
        if (unlikely(tls_record_is_start_marker(record))) {
                ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
index 21de476..4ef3dc7 100644 (file)
@@ -813,6 +813,29 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
        return -ETIMEDOUT;
 }
 
+void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
+{
+       struct mlx5_wq_ll *wq;
+       u16 head;
+       int i;
+
+       if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+               return;
+
+       wq = &rq->mpwqe.wq;
+       head = wq->head;
+
+       /* Outstanding UMR WQEs (in progress) start at wq->head */
+       for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
+               rq->dealloc_wqe(rq, head);
+               head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
+       }
+
+       rq->mpwqe.actual_wq_head = wq->head;
+       rq->mpwqe.umr_in_progress = 0;
+       rq->mpwqe.umr_completed = 0;
+}
+
 void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
 {
        __be16 wqe_ix_be;
@@ -820,14 +843,8 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
 
        if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
                struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
-               u16 head = wq->head;
-               int i;
 
-               /* Outstanding UMR WQEs (in progress) start at wq->head */
-               for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
-                       rq->dealloc_wqe(rq, head);
-                       head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
-               }
+               mlx5e_free_rx_in_progress_descs(rq);
 
                while (!mlx5_wq_ll_is_empty(wq)) {
                        struct mlx5e_rx_wqe_ll *wqe;
index 1c3ab69..312d469 100644 (file)
@@ -477,6 +477,7 @@ static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
        /* fill sq frag edge with nops to avoid wqe wrapping two pages */
        for (; wi < edge_wi; wi++) {
                wi->opcode = MLX5_OPCODE_NOP;
+               wi->num_wqebbs = 1;
                mlx5e_post_nop(wq, sq->sqn, &sq->pc);
        }
 }
@@ -525,6 +526,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
        umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
 
        sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
+       sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS;
        sq->db.ico_wqe[pi].umr.rq = rq;
        sq->pc += MLX5E_UMR_WQEBBS;
 
@@ -621,6 +623,7 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
 
                        ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
                        wi = &sq->db.ico_wqe[ci];
+                       sqcc += wi->num_wqebbs;
 
                        if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
                                netdev_WARN_ONCE(cq->channel->netdev,
@@ -631,16 +634,12 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
                                break;
                        }
 
-                       if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
-                               sqcc += MLX5E_UMR_WQEBBS;
+                       if (likely(wi->opcode == MLX5_OPCODE_UMR))
                                wi->umr.rq->mpwqe.umr_completed++;
-                       } else if (likely(wi->opcode == MLX5_OPCODE_NOP)) {
-                               sqcc++;
-                       } else {
+                       else if (unlikely(wi->opcode != MLX5_OPCODE_NOP))
                                netdev_WARN_ONCE(cq->channel->netdev,
                                                 "Bad OPCODE in ICOSQ WQE info: 0x%x\n",
                                                 wi->opcode);
-                       }
 
                } while (!last_wqe);
 
index 74091f7..ec5fc52 100644 (file)
@@ -2476,10 +2476,11 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
                        continue;
 
                if (f->field_bsize == 32) {
-                       mask_be32 = *(__be32 *)&mask;
+                       mask_be32 = (__be32)mask;
                        mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
                } else if (f->field_bsize == 16) {
-                       mask_be16 = *(__be16 *)&mask;
+                       mask_be32 = (__be32)mask;
+                       mask_be16 = *(__be16 *)&mask_be32;
                        mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
                }
 
index 257a7c9..800d34e 100644 (file)
@@ -78,6 +78,7 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
        u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
 
        sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
+       sq->db.ico_wqe[pi].num_wqebbs = 1;
        nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
        mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
 }
index 8e19f6a..93052b0 100644 (file)
@@ -615,8 +615,10 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
                        break;
 
        if (i == MLX5_MAX_PORTS) {
-               if (ldev->nb.notifier_call)
+               if (ldev->nb.notifier_call) {
                        unregister_netdevice_notifier_net(&init_net, &ldev->nb);
+                       ldev->nb.notifier_call = NULL;
+               }
                mlx5_lag_mp_cleanup(ldev);
                cancel_delayed_work_sync(&ldev->bond_work);
                mlx5_lag_dev_free(ldev);
index 6dec2a5..2d93228 100644 (file)
@@ -933,7 +933,6 @@ static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn,
 
        action->rewrite.data = (void *)ops;
        action->rewrite.num_of_actions = i;
-       action->rewrite.chunk->byte_size = i * sizeof(*ops);
 
        ret = mlx5dr_send_postsend_action(dmn, action);
        if (ret) {
index c7f10d4..095ec7b 100644 (file)
@@ -558,7 +558,8 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
        int ret;
 
        send_info.write.addr = (uintptr_t)action->rewrite.data;
-       send_info.write.length = action->rewrite.chunk->byte_size;
+       send_info.write.length = action->rewrite.num_of_actions *
+                                DR_MODIFY_ACTION_SIZE;
        send_info.write.lkey = 0;
        send_info.remote_addr = action->rewrite.chunk->mr_addr;
        send_info.rkey = action->rewrite.chunk->rkey;
index 1faac31..23f879d 100644 (file)
@@ -1071,6 +1071,9 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
                MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
        if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID)
                MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
+       MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
+       MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select,
+                req->cap_mask1_perm);
        err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
 ex:
        kfree(in);
index 914c33e..e9ded1a 100644 (file)
@@ -1322,36 +1322,64 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
                            mbox->mapaddr);
 }
 
-static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
-                             const struct pci_device_id *id)
+static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
+                                   const struct pci_device_id *id,
+                                   u32 *p_sys_status)
 {
        unsigned long end;
-       char mrsr_pl[MLXSW_REG_MRSR_LEN];
-       int err;
+       u32 val;
 
-       mlxsw_reg_mrsr_pack(mrsr_pl);
-       err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
-       if (err)
-               return err;
        if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
                msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
                return 0;
        }
 
-       /* We must wait for the HW to become responsive once again. */
+       /* We must wait for the HW to become responsive. */
        msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
 
        end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
        do {
-               u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
-
+               val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
                if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
                        return 0;
                cond_resched();
        } while (time_before(jiffies, end));
+
+       *p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
+
        return -EBUSY;
 }
 
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
+                             const struct pci_device_id *id)
+{
+       struct pci_dev *pdev = mlxsw_pci->pdev;
+       char mrsr_pl[MLXSW_REG_MRSR_LEN];
+       u32 sys_status;
+       int err;
+
+       err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
+                       sys_status);
+               return err;
+       }
+
+       mlxsw_reg_mrsr_pack(mrsr_pl);
+       err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
+       if (err)
+               return err;
+
+       err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
+                       sys_status);
+               return err;
+       }
+
+       return 0;
+}
+
 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
 {
        int err;
index dd66851..e05d1d1 100644 (file)
@@ -3572,7 +3572,7 @@ MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1);
  * When in bytes mode, value is specified in units of 1000bps.
  * Access: RW
  */
-MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 28);
+MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 31);
 
 /* reg_qeec_de
  * DWRR configuration enable. Enables configuration of the dwrr and
index 5427562..336e5ec 100644 (file)
@@ -637,12 +637,12 @@ static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
        return 0;
 
 err_erif_unresolve:
-       list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list,
-                                        vif_node)
+       list_for_each_entry_continue_reverse(erve, &mr_vif->route_evif_list,
+                                            vif_node)
                mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
 err_irif_unresolve:
-       list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list,
-                                        vif_node)
+       list_for_each_entry_continue_reverse(irve, &mr_vif->route_ivif_list,
+                                            vif_node)
                mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
        mr_vif->rif = NULL;
        return err;
index 58579ba..45cc840 100644 (file)
@@ -157,6 +157,50 @@ static int msg_enable;
  */
 
 /**
+ * ks_check_endian - Check whether endianness of the bus is correct
+ * @ks   : The chip information
+ *
+ * The KS8851-16MLL EESK pin allows selecting the endianness of the 16bit
+ * bus. To maintain optimum performance, the bus endianness should be set
+ * such that it matches the endianness of the CPU.
+ */
+
+static int ks_check_endian(struct ks_net *ks)
+{
+       u16 cider;
+
+       /*
+        * Read CIDER register first, however read it the "wrong" way around.
+        * If the endian strap on the KS8851-16MLL in incorrect and the chip
+        * is operating in different endianness than the CPU, then the meaning
+        * of BE[3:0] byte-enable bits is also swapped such that:
+        *    BE[3,2,1,0] becomes BE[1,0,3,2]
+        *
+        * Luckily for us, the byte-enable bits are the top four MSbits of
+        * the address register and the CIDER register is at offset 0xc0.
+        * Hence, by reading address 0xc0c0, which is not impacted by endian
+        * swapping, we assert either BE[3:2] or BE[1:0] while reading the
+        * CIDER register.
+        *
+        * If the bus configuration is correct, reading 0xc0c0 asserts
+        * BE[3:2] and this read returns 0x0000, because to read register
+        * with bottom two LSbits of address set to 0, BE[1:0] must be
+        * asserted.
+        *
+        * If the bus configuration is NOT correct, reading 0xc0c0 asserts
+        * BE[1:0] and this read returns non-zero 0x8872 value.
+        */
+       iowrite16(BE3 | BE2 | KS_CIDER, ks->hw_addr_cmd);
+       cider = ioread16(ks->hw_addr);
+       if (!cider)
+               return 0;
+
+       netdev_err(ks->netdev, "incorrect EESK endian strap setting\n");
+
+       return -EINVAL;
+}
+
+/**
  * ks_rdreg16 - read 16 bit register from device
  * @ks   : The chip information
  * @offset: The register address
@@ -166,7 +210,7 @@ static int msg_enable;
 
 static u16 ks_rdreg16(struct ks_net *ks, int offset)
 {
-       ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
+       ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
        iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
        return ioread16(ks->hw_addr);
 }
@@ -181,7 +225,7 @@ static u16 ks_rdreg16(struct ks_net *ks, int offset)
 
 static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
 {
-       ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
+       ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
        iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
        iowrite16(value, ks->hw_addr);
 }
@@ -197,7 +241,7 @@ static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
 {
        len >>= 1;
        while (len--)
-               *wptr++ = be16_to_cpu(ioread16(ks->hw_addr));
+               *wptr++ = (u16)ioread16(ks->hw_addr);
 }
 
 /**
@@ -211,7 +255,7 @@ static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
 {
        len >>= 1;
        while (len--)
-               iowrite16(cpu_to_be16(*wptr++), ks->hw_addr);
+               iowrite16(*wptr++, ks->hw_addr);
 }
 
 static void ks_disable_int(struct ks_net *ks)
@@ -1218,6 +1262,10 @@ static int ks8851_probe(struct platform_device *pdev)
                goto err_free;
        }
 
+       err = ks_check_endian(ks);
+       if (err)
+               goto err_free;
+
        netdev->irq = platform_get_irq(pdev, 0);
 
        if ((int)netdev->irq < 0) {
index 86d543a..d3b7373 100644 (file)
@@ -2176,24 +2176,29 @@ static int ocelot_init_timestamp(struct ocelot *ocelot)
        return 0;
 }
 
-static void ocelot_port_set_mtu(struct ocelot *ocelot, int port, size_t mtu)
+/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
+ * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
+ */
+static void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
+       int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN;
        int atop_wm;
 
-       ocelot_port_writel(ocelot_port, mtu, DEV_MAC_MAXLEN_CFG);
+       ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG);
 
        /* Set Pause WM hysteresis
-        * 152 = 6 * mtu / OCELOT_BUFFER_CELL_SZ
-        * 101 = 4 * mtu / OCELOT_BUFFER_CELL_SZ
+        * 152 = 6 * maxlen / OCELOT_BUFFER_CELL_SZ
+        * 101 = 4 * maxlen / OCELOT_BUFFER_CELL_SZ
         */
        ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA |
                         SYS_PAUSE_CFG_PAUSE_STOP(101) |
                         SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, port);
 
        /* Tail dropping watermark */
-       atop_wm = (ocelot->shared_queue_sz - 9 * mtu) / OCELOT_BUFFER_CELL_SZ;
-       ocelot_write_rix(ocelot, ocelot_wm_enc(9 * mtu),
+       atop_wm = (ocelot->shared_queue_sz - 9 * maxlen) /
+                  OCELOT_BUFFER_CELL_SZ;
+       ocelot_write_rix(ocelot, ocelot_wm_enc(9 * maxlen),
                         SYS_ATOP, port);
        ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
 }
@@ -2222,9 +2227,10 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
                           DEV_MAC_HDX_CFG);
 
        /* Set Max Length and maximum tags allowed */
-       ocelot_port_set_mtu(ocelot, port, VLAN_ETH_FRAME_LEN);
+       ocelot_port_set_maxlen(ocelot, port, ETH_DATA_LEN);
        ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
                           DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
+                          DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA |
                           DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
                           DEV_MAC_TAGS_CFG);
 
@@ -2310,18 +2316,18 @@ void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
         * Only one port can be an NPI at the same time.
         */
        if (cpu < ocelot->num_phys_ports) {
-               int mtu = VLAN_ETH_FRAME_LEN + OCELOT_TAG_LEN;
+               int sdu = ETH_DATA_LEN + OCELOT_TAG_LEN;
 
                ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
                             QSYS_EXT_CPU_CFG_EXT_CPU_PORT(cpu),
                             QSYS_EXT_CPU_CFG);
 
                if (injection == OCELOT_TAG_PREFIX_SHORT)
-                       mtu += OCELOT_SHORT_PREFIX_LEN;
+                       sdu += OCELOT_SHORT_PREFIX_LEN;
                else if (injection == OCELOT_TAG_PREFIX_LONG)
-                       mtu += OCELOT_LONG_PREFIX_LEN;
+                       sdu += OCELOT_LONG_PREFIX_LEN;
 
-               ocelot_port_set_mtu(ocelot, cpu, mtu);
+               ocelot_port_set_maxlen(ocelot, cpu, sdu);
        }
 
        /* CPU port Injection/Extraction configuration */
index e678ba3..628fa9b 100644 (file)
@@ -2045,7 +2045,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
        if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) ||  \
            (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
                if ((mask & VXGE_DEBUG_MASK) == mask)                          \
-                       printk(fmt "\n", __VA_ARGS__);                         \
+                       printk(fmt "\n", ##__VA_ARGS__);                       \
 } while (0)
 #else
 #define vxge_debug_ll(level, mask, fmt, ...)
index 59a57ff..9c86f4f 100644 (file)
@@ -452,49 +452,49 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
 
 #if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK)
 #define vxge_debug_ll_config(level, fmt, ...) \
-       vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__)
+       vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__)
 #else
 #define vxge_debug_ll_config(level, fmt, ...)
 #endif
 
 #if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
 #define vxge_debug_init(level, fmt, ...) \
-       vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__)
+       vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__)
 #else
 #define vxge_debug_init(level, fmt, ...)
 #endif
 
 #if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
 #define vxge_debug_tx(level, fmt, ...) \
-       vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__)
+       vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__)
 #else
 #define vxge_debug_tx(level, fmt, ...)
 #endif
 
 #if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
 #define vxge_debug_rx(level, fmt, ...) \
-       vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__)
+       vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__)
 #else
 #define vxge_debug_rx(level, fmt, ...)
 #endif
 
 #if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
 #define vxge_debug_mem(level, fmt, ...) \
-       vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__)
+       vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__)
 #else
 #define vxge_debug_mem(level, fmt, ...)
 #endif
 
 #if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
 #define vxge_debug_entryexit(level, fmt, ...) \
-       vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__)
+       vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__)
 #else
 #define vxge_debug_entryexit(level, fmt, ...)
 #endif
 
 #if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
 #define vxge_debug_intr(level, fmt, ...) \
-       vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__)
+       vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__)
 #else
 #define vxge_debug_intr(level, fmt, ...)
 #endif
index b454db2..684e4e0 100644 (file)
@@ -616,7 +616,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
        if (bar->iomem) {
                int pf;
 
-               msg += snprintf(msg, end - msg, "0.0: General/MSI-X SRAM, ");
+               msg += scnprintf(msg, end - msg, "0.0: General/MSI-X SRAM, ");
                atomic_inc(&bar->refcnt);
                bars_free--;
 
@@ -661,7 +661,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
 
        /* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */
        bar = &nfp->bar[1];
-       msg += snprintf(msg, end - msg, "0.1: PCIe XPB/MSI-X PBA, ");
+       msg += scnprintf(msg, end - msg, "0.1: PCIe XPB/MSI-X PBA, ");
        atomic_inc(&bar->refcnt);
        bars_free--;
 
@@ -680,8 +680,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
                bar->iomem = ioremap(nfp_bar_resource_start(bar),
                                             nfp_bar_resource_len(bar));
                if (bar->iomem) {
-                       msg += snprintf(msg, end - msg,
-                                       "0.%d: Explicit%d, ", 4 + i, i);
+                       msg += scnprintf(msg, end - msg,
+                                        "0.%d: Explicit%d, ", 4 + i, i);
                        atomic_inc(&bar->refcnt);
                        bars_free--;
 
index 54547d5..51adf50 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */
+/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
 /* Copyright (c) 2017-2019 Pensando Systems, Inc.  All rights reserved. */
 
 #ifndef _IONIC_IF_H_
index 191271f..938e19e 100644 (file)
@@ -948,18 +948,18 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
        int i;
 #define REMAIN(__x) (sizeof(buf) - (__x))
 
-       i = snprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
-                    lif->rx_mode, rx_mode);
+       i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
+                     lif->rx_mode, rx_mode);
        if (rx_mode & IONIC_RX_MODE_F_UNICAST)
-               i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
        if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
-               i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
        if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
-               i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
        if (rx_mode & IONIC_RX_MODE_F_PROMISC)
-               i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
        if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
-               i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
        netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
 
        err = ionic_adminq_post_wait(lif, &ctx);
@@ -1688,7 +1688,7 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
        if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
                return -EINVAL;
 
-       down_read(&ionic->vf_op_lock);
+       down_write(&ionic->vf_op_lock);
 
        if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
                ret = -EINVAL;
@@ -1698,7 +1698,7 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
                        ether_addr_copy(ionic->vfs[vf].macaddr, mac);
        }
 
-       up_read(&ionic->vf_op_lock);
+       up_write(&ionic->vf_op_lock);
        return ret;
 }
 
@@ -1719,7 +1719,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
        if (proto != htons(ETH_P_8021Q))
                return -EPROTONOSUPPORT;
 
-       down_read(&ionic->vf_op_lock);
+       down_write(&ionic->vf_op_lock);
 
        if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
                ret = -EINVAL;
@@ -1730,7 +1730,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
                        ionic->vfs[vf].vlanid = vlan;
        }
 
-       up_read(&ionic->vf_op_lock);
+       up_write(&ionic->vf_op_lock);
        return ret;
 }
 
index 03ee5a3..2e174f4 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */
+/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
 /* Copyright (c) 2018-2019 Pensando Systems, Inc.  All rights reserved. */
 
 #ifndef IONIC_REGS_H
index 07f9067..cda5b0a 100644 (file)
@@ -1720,7 +1720,7 @@ static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_d
 
        ahw->reset.seq_error = 0;
        ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL);
-       if (p_dev->ahw->reset.buff == NULL)
+       if (ahw->reset.buff == NULL)
                return -ENOMEM;
 
        p_buff = p_dev->ahw->reset.buff;
index a2168a1..791d99b 100644 (file)
@@ -5194,7 +5194,7 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
                RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
                rtl_lock_config_regs(tp);
                /* fall through */
-       case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24:
+       case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
                flags = PCI_IRQ_LEGACY;
                break;
        default:
@@ -5285,6 +5285,13 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
        if (!tp->phydev) {
                mdiobus_unregister(new_bus);
                return -ENODEV;
+       } else if (!tp->phydev->drv) {
+               /* Most chip versions fail with the genphy driver.
+                * Therefore ensure that the dedicated PHY driver is loaded.
+                */
+               dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
+               mdiobus_unregister(new_bus);
+               return -EUNATCH;
        }
 
        /* PHY will be woken up in rtl_open() */
@@ -5446,15 +5453,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        int chipset, region;
        int jumbo_max, rc;
 
-       /* Some tools for creating an initramfs don't consider softdeps, then
-        * r8169.ko may be in initramfs, but realtek.ko not. Then the generic
-        * PHY driver is used that doesn't work with most chip versions.
-        */
-       if (!driver_find("RTL8201CP Ethernet", &mdio_bus_type)) {
-               dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
-               return -ENOENT;
-       }
-
        dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
        if (!dev)
                return -ENOMEM;
index c705743..2cc8184 100644 (file)
@@ -2277,7 +2277,7 @@ static int __init sxgbe_cmdline_opt(char *str)
        if (!str || !*str)
                return -EINVAL;
        while ((opt = strsep(&str, ",")) != NULL) {
-               if (!strncmp(opt, "eee_timer:", 6)) {
+               if (!strncmp(opt, "eee_timer:", 10)) {
                        if (kstrtoint(opt + 10, 0, &eee_timer))
                                goto err;
                }
index 52113b7..3f16bd8 100644 (file)
@@ -2853,11 +2853,24 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
        }
 
        /* Transmit timestamps are only available for 8XXX series. They result
-        * in three events per packet. These occur in order, and are:
-        *  - the normal completion event
+        * in up to three events per packet. These occur in order, and are:
+        *  - the normal completion event (may be omitted)
         *  - the low part of the timestamp
         *  - the high part of the timestamp
         *
+        * It's possible for multiple completion events to appear before the
+        * corresponding timestamps. So we can for example get:
+        *  COMP N
+        *  COMP N+1
+        *  TS_LO N
+        *  TS_HI N
+        *  TS_LO N+1
+        *  TS_HI N+1
+        *
+        * In addition it's also possible for the adjacent completions to be
+        * merged, so we may not see COMP N above. As such, the completion
+        * events are not very useful here.
+        *
         * Each part of the timestamp is itself split across two 16 bit
         * fields in the event.
         */
@@ -2865,17 +2878,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
 
        switch (tx_ev_type) {
        case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
-               /* In case of Queue flush or FLR, we might have received
-                * the previous TX completion event but not the Timestamp
-                * events.
-                */
-               if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask)
-                       efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
-
-               tx_ev_desc_ptr = EFX_QWORD_FIELD(*event,
-                                                ESF_DZ_TX_DESCR_INDX);
-               tx_queue->completed_desc_ptr =
-                                       tx_ev_desc_ptr & tx_queue->ptr_mask;
+               /* Ignore this event - see above. */
                break;
 
        case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
@@ -2887,8 +2890,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
                ts_part = efx_ef10_extract_event_ts(event);
                tx_queue->completed_timestamp_major = ts_part;
 
-               efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
-               tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+               efx_xmit_done_single(tx_queue);
                break;
 
        default:
index f1bdb04..95395d6 100644 (file)
@@ -20,6 +20,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
                                struct net_device *net_dev);
 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
 int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
                 void *type_data);
 extern unsigned int efx_piobuf_size;
index aeb5e8a..73d4e39 100644 (file)
@@ -583,6 +583,7 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
                if (tx_queue->channel)
                        tx_queue->channel = channel;
                tx_queue->buffer = NULL;
+               tx_queue->cb_page = NULL;
                memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
        }
 
index 2713300..15c731d 100644 (file)
@@ -212,12 +212,14 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
                 * progress on a NIC at any one time.  So no need for locking.
                 */
                for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
-                       bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
-                                         " %08x", le32_to_cpu(hdr[i].u32[0]));
+                       bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+                                          " %08x",
+                                          le32_to_cpu(hdr[i].u32[0]));
 
                for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
-                       bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
-                                         " %08x", le32_to_cpu(inbuf[i].u32[0]));
+                       bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+                                          " %08x",
+                                          le32_to_cpu(inbuf[i].u32[0]));
 
                netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
        }
@@ -302,15 +304,15 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
                 */
                for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
                        efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
-                       bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
-                                         " %08x", le32_to_cpu(hdr.u32[0]));
+                       bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+                                          " %08x", le32_to_cpu(hdr.u32[0]));
                }
 
                for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
                        efx->type->mcdi_read_response(efx, &hdr,
                                        mcdi->resp_hdr_len + (i * 4), 4);
-                       bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
-                                         " %08x", le32_to_cpu(hdr.u32[0]));
+                       bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+                                          " %08x", le32_to_cpu(hdr.u32[0]));
                }
 
                netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
@@ -1417,9 +1419,11 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
        }
 
        ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
-       offset = snprintf(buf, len, "%u.%u.%u.%u",
-                         le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
-                         le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
+       offset = scnprintf(buf, len, "%u.%u.%u.%u",
+                          le16_to_cpu(ver_words[0]),
+                          le16_to_cpu(ver_words[1]),
+                          le16_to_cpu(ver_words[2]),
+                          le16_to_cpu(ver_words[3]));
 
        /* EF10 may have multiple datapath firmware variants within a
         * single version.  Report which variants are running.
@@ -1427,9 +1431,9 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
        if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
                struct efx_ef10_nic_data *nic_data = efx->nic_data;
 
-               offset += snprintf(buf + offset, len - offset, " rx%x tx%x",
-                                  nic_data->rx_dpcpu_fw_id,
-                                  nic_data->tx_dpcpu_fw_id);
+               offset += scnprintf(buf + offset, len - offset, " rx%x tx%x",
+                                   nic_data->rx_dpcpu_fw_id,
+                                   nic_data->tx_dpcpu_fw_id);
 
                /* It's theoretically possible for the string to exceed 31
                 * characters, though in practice the first three version
index 9f9886f..8164f0e 100644 (file)
@@ -208,8 +208,6 @@ struct efx_tx_buffer {
  *     avoid cache-line ping-pong between the xmit path and the
  *     completion path.
  * @merge_events: Number of TX merged completion events
- * @completed_desc_ptr: Most recent completed pointer - only used with
- *      timestamping.
  * @completed_timestamp_major: Top part of the most recent tx timestamp.
  * @completed_timestamp_minor: Low part of the most recent tx timestamp.
  * @insert_count: Current insert pointer
@@ -269,7 +267,6 @@ struct efx_tx_queue {
        unsigned int merge_events;
        unsigned int bytes_compl;
        unsigned int pkts_compl;
-       unsigned int completed_desc_ptr;
        u32 completed_timestamp_major;
        u32 completed_timestamp_minor;
 
index 04d7f41..8aafc54 100644 (file)
@@ -535,6 +535,44 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
        return efx_enqueue_skb(tx_queue, skb);
 }
 
+void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
+{
+       unsigned int pkts_compl = 0, bytes_compl = 0;
+       unsigned int read_ptr;
+       bool finished = false;
+
+       read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+
+       while (!finished) {
+               struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
+
+               if (!efx_tx_buffer_in_use(buffer)) {
+                       struct efx_nic *efx = tx_queue->efx;
+
+                       netif_err(efx, hw, efx->net_dev,
+                                 "TX queue %d spurious single TX completion\n",
+                                 tx_queue->queue);
+                       efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+                       return;
+               }
+
+               /* Need to check the flag before dequeueing. */
+               if (buffer->flags & EFX_TX_BUF_SKB)
+                       finished = true;
+               efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+
+               ++tx_queue->read_count;
+               read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+       }
+
+       tx_queue->pkts_compl += pkts_compl;
+       tx_queue->bytes_compl += bytes_compl;
+
+       EFX_WARN_ON_PARANOID(pkts_compl != 1);
+
+       efx_xmit_done_check_empty(tx_queue);
+}
+
 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
 {
        struct efx_nic *efx = tx_queue->efx;
index b1571e9..70876df 100644 (file)
@@ -80,7 +80,6 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
        tx_queue->xmit_more_available = false;
        tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
                                  tx_queue->channel == efx_ptp_channel(efx));
-       tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
        tx_queue->completed_timestamp_major = 0;
        tx_queue->completed_timestamp_minor = 0;
 
@@ -210,10 +209,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
        while (read_ptr != stop_index) {
                struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
 
-               if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
-                   unlikely(buffer->len == 0)) {
+               if (!efx_tx_buffer_in_use(buffer)) {
                        netif_err(efx, tx_err, efx->net_dev,
-                                 "TX queue %d spurious TX completion id %x\n",
+                                 "TX queue %d spurious TX completion id %d\n",
                                  tx_queue->queue, read_ptr);
                        efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
                        return;
@@ -226,6 +224,19 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
        }
 }
 
+void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
+{
+       if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+               tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
+               if (tx_queue->read_count == tx_queue->old_write_count) {
+                       /* Ensure that read_count is flushed. */
+                       smp_mb();
+                       tx_queue->empty_read_count =
+                               tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
+               }
+       }
+}
+
 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 {
        unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
@@ -256,15 +267,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
                        netif_tx_wake_queue(tx_queue->core_txq);
        }
 
-       /* Check whether the hardware queue is now empty */
-       if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
-               tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
-               if (tx_queue->read_count == tx_queue->old_write_count) {
-                       smp_mb();
-                       tx_queue->empty_read_count =
-                               tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
-               }
-       }
+       efx_xmit_done_check_empty(tx_queue);
 }
 
 /* Remove buffers put into a tx_queue for the current packet.
index f92f1fe..99cf7ce 100644 (file)
@@ -21,6 +21,12 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
                        unsigned int *pkts_compl,
                        unsigned int *bytes_compl);
 
+static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
+{
+       return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION);
+}
+
+void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
 
 void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
index dc50ba1..2d5573b 100644 (file)
@@ -1411,7 +1411,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
 
        ret = rk_gmac_clk_init(plat_dat);
        if (ret)
-               return ret;
+               goto err_remove_config_dt;
 
        ret = rk_gmac_powerup(plat_dat->bsp_priv);
        if (ret)
index d0356fb..5427843 100644 (file)
@@ -24,6 +24,7 @@
 static void dwmac1000_core_init(struct mac_device_info *hw,
                                struct net_device *dev)
 {
+       struct stmmac_priv *priv = netdev_priv(dev);
        void __iomem *ioaddr = hw->pcsr;
        u32 value = readl(ioaddr + GMAC_CONTROL);
        int mtu = dev->mtu;
@@ -35,7 +36,7 @@ static void dwmac1000_core_init(struct mac_device_info *hw,
         * Broadcom tags can look like invalid LLC/SNAP packets and cause the
         * hardware to truncate packets on reception.
         */
-       if (netdev_uses_dsa(dev))
+       if (netdev_uses_dsa(dev) || !priv->plat->enh_desc)
                value &= ~GMAC_CONTROL_ACS;
 
        if (mtu > 1500)
index d10ac54..13fafd9 100644 (file)
@@ -663,16 +663,22 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
         * In case the wake up interrupt is not passed from the platform
         * so the driver will continue to use the mac irq (ndev->irq)
         */
-       stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+       stmmac_res->wol_irq =
+               platform_get_irq_byname_optional(pdev, "eth_wake_irq");
        if (stmmac_res->wol_irq < 0) {
                if (stmmac_res->wol_irq == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
+               dev_info(&pdev->dev, "IRQ eth_wake_irq not found\n");
                stmmac_res->wol_irq = stmmac_res->irq;
        }
 
-       stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
-       if (stmmac_res->lpi_irq == -EPROBE_DEFER)
-               return -EPROBE_DEFER;
+       stmmac_res->lpi_irq =
+               platform_get_irq_byname_optional(pdev, "eth_lpi");
+       if (stmmac_res->lpi_irq < 0) {
+               if (stmmac_res->lpi_irq == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+               dev_info(&pdev->dev, "IRQ eth_lpi not found\n");
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
index 75757e9..09f279c 100644 (file)
@@ -1845,8 +1845,6 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
                if (!net_eq(dev_net(geneve->dev), net))
                        unregister_netdevice_queue(geneve->dev, head);
        }
-
-       WARN_ON_ONCE(!list_empty(&gn->sock_list));
 }
 
 static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
@@ -1861,6 +1859,12 @@ static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
        /* unregister the devices gathered above */
        unregister_netdevice_many(&list);
        rtnl_unlock();
+
+       list_for_each_entry(net, net_list, exit_list) {
+               const struct geneve_net *gn = net_generic(net, geneve_net_id);
+
+               WARN_ON_ONCE(!list_empty(&gn->sock_list));
+       }
 }
 
 static struct pernet_operations geneve_net_ops = {
index 242b9b0..7fe306e 100644 (file)
@@ -75,7 +75,7 @@ static void ifb_ri_tasklet(unsigned long _txp)
        }
 
        while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
-               skb->tc_redirected = 0;
+               skb->redirected = 0;
                skb->tc_skip_classify = 1;
 
                u64_stats_update_begin(&txp->tsync);
@@ -96,7 +96,7 @@ static void ifb_ri_tasklet(unsigned long _txp)
                rcu_read_unlock();
                skb->skb_iif = txp->dev->ifindex;
 
-               if (!skb->tc_from_ingress) {
+               if (!skb->from_ingress) {
                        dev_queue_xmit(skb);
                } else {
                        skb_pull_rcsum(skb, skb->mac_len);
@@ -243,7 +243,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
        txp->rx_bytes += skb->len;
        u64_stats_update_end(&txp->rsync);
 
-       if (!skb->tc_redirected || !skb->skb_iif) {
+       if (!skb->redirected || !skb->skb_iif) {
                dev_kfree_skb(skb);
                dev->stats.rx_dropped++;
                return NETDEV_TX_OK;
index 30cd0c4..8801d09 100644 (file)
@@ -293,6 +293,7 @@ void ipvlan_process_multicast(struct work_struct *work)
                }
                if (dev)
                        dev_put(dev);
+               cond_resched();
        }
 }
 
@@ -498,19 +499,21 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
        struct ethhdr *ethh = eth_hdr(skb);
        int ret = NET_XMIT_DROP;
 
-       /* In this mode we dont care about multicast and broadcast traffic */
-       if (is_multicast_ether_addr(ethh->h_dest)) {
-               pr_debug_ratelimited("Dropped {multi|broad}cast of type=[%x]\n",
-                                    ntohs(skb->protocol));
-               kfree_skb(skb);
-               goto out;
-       }
-
        /* The ipvlan is a pseudo-L2 device, so the packets that we receive
         * will have L2; which need to discarded and processed further
         * in the net-ns of the main-device.
         */
        if (skb_mac_header_was_set(skb)) {
+               /* In this mode we dont care about
+                * multicast and broadcast traffic */
+               if (is_multicast_ether_addr(ethh->h_dest)) {
+                       pr_debug_ratelimited(
+                               "Dropped {multi|broad}cast of type=[%x]\n",
+                               ntohs(skb->protocol));
+                       kfree_skb(skb);
+                       goto out;
+               }
+
                skb_pull(skb, sizeof(*ethh));
                skb->mac_header = (typeof(skb->mac_header))~0U;
                skb_reset_network_header(skb);
index a706622..f195f27 100644 (file)
@@ -164,7 +164,6 @@ static void ipvlan_uninit(struct net_device *dev)
 static int ipvlan_open(struct net_device *dev)
 {
        struct ipvl_dev *ipvlan = netdev_priv(dev);
-       struct net_device *phy_dev = ipvlan->phy_dev;
        struct ipvl_addr *addr;
 
        if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
@@ -178,7 +177,7 @@ static int ipvlan_open(struct net_device *dev)
                ipvlan_ht_addr_add(ipvlan, addr);
        rcu_read_unlock();
 
-       return dev_uc_add(phy_dev, phy_dev->dev_addr);
+       return 0;
 }
 
 static int ipvlan_stop(struct net_device *dev)
@@ -190,8 +189,6 @@ static int ipvlan_stop(struct net_device *dev)
        dev_uc_unsync(phy_dev, dev);
        dev_mc_unsync(phy_dev, dev);
 
-       dev_uc_del(phy_dev, phy_dev->dev_addr);
-
        rcu_read_lock();
        list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
                ipvlan_ht_addr_del(addr);
index 45bfd99..92bc2b2 100644 (file)
@@ -19,6 +19,7 @@
 #include <net/gro_cells.h>
 #include <net/macsec.h>
 #include <linux/phy.h>
+#include <linux/if_arp.h>
 
 #include <uapi/linux/if_macsec.h>
 
@@ -424,6 +425,11 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
        return (struct macsec_eth_header *)skb_mac_header(skb);
 }
 
+static sci_t dev_to_sci(struct net_device *dev, __be16 port)
+{
+       return make_sci(dev->dev_addr, port);
+}
+
 static void __macsec_pn_wrapped(struct macsec_secy *secy,
                                struct macsec_tx_sa *tx_sa)
 {
@@ -3268,6 +3274,20 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
 
 out:
        ether_addr_copy(dev->dev_addr, addr->sa_data);
+       macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
+
+       /* If h/w offloading is available, propagate to the device */
+       if (macsec_is_offloaded(macsec)) {
+               const struct macsec_ops *ops;
+               struct macsec_context ctx;
+
+               ops = macsec_get_ops(macsec, &ctx);
+               if (ops) {
+                       ctx.secy = &macsec->secy;
+                       macsec_offload(ops->mdo_upd_secy, &ctx);
+               }
+       }
+
        return 0;
 }
 
@@ -3342,6 +3362,7 @@ static const struct device_type macsec_type = {
 
 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
        [IFLA_MACSEC_SCI] = { .type = NLA_U64 },
+       [IFLA_MACSEC_PORT] = { .type = NLA_U16 },
        [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
        [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
        [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
@@ -3592,11 +3613,6 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
        return false;
 }
 
-static sci_t dev_to_sci(struct net_device *dev, __be16 port)
-{
-       return make_sci(dev->dev_addr, port);
-}
-
 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
 {
        struct macsec_dev *macsec = macsec_priv(dev);
@@ -3650,6 +3666,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
        real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
        if (!real_dev)
                return -ENODEV;
+       if (real_dev->type != ARPHRD_ETHER)
+               return -EINVAL;
 
        dev->priv_flags |= IFF_MACSEC;
 
index 81aa7ad..e7289d6 100644 (file)
@@ -334,6 +334,8 @@ static void macvlan_process_broadcast(struct work_struct *w)
                if (src)
                        dev_put(src->dev);
                consume_skb(skb);
+
+               cond_resched();
        }
 }
 
index e27fc1a..3811f1b 100644 (file)
@@ -29,9 +29,9 @@ static ssize_t nsim_dbg_netdev_ops_read(struct file *filp,
                return -ENOMEM;
 
        p = buf;
-       p += snprintf(p, bufsize - (p - buf),
-                     "SA count=%u tx=%u\n",
-                     ipsec->count, ipsec->tx);
+       p += scnprintf(p, bufsize - (p - buf),
+                      "SA count=%u tx=%u\n",
+                      ipsec->count, ipsec->tx);
 
        for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) {
                struct nsim_sa *sap = &ipsec->sa[i];
@@ -39,18 +39,18 @@ static ssize_t nsim_dbg_netdev_ops_read(struct file *filp,
                if (!sap->used)
                        continue;
 
-               p += snprintf(p, bufsize - (p - buf),
-                             "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n",
-                             i, (sap->rx ? 'r' : 't'), sap->ipaddr[0],
-                             sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]);
-               p += snprintf(p, bufsize - (p - buf),
-                             "sa[%i]    spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n",
-                             i, be32_to_cpu(sap->xs->id.spi),
-                             sap->xs->id.proto, sap->salt, sap->crypt);
-               p += snprintf(p, bufsize - (p - buf),
-                             "sa[%i]    key=0x%08x %08x %08x %08x\n",
-                             i, sap->key[0], sap->key[1],
-                             sap->key[2], sap->key[3]);
+               p += scnprintf(p, bufsize - (p - buf),
+                              "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n",
+                              i, (sap->rx ? 'r' : 't'), sap->ipaddr[0],
+                              sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]);
+               p += scnprintf(p, bufsize - (p - buf),
+                              "sa[%i]    spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n",
+                              i, be32_to_cpu(sap->xs->id.spi),
+                              sap->xs->id.proto, sap->salt, sap->crypt);
+               p += scnprintf(p, bufsize - (p - buf),
+                              "sa[%i]    key=0x%08x %08x %08x %08x\n",
+                              i, sap->key[0], sap->key[1],
+                              sap->key[2], sap->key[3]);
        }
 
        len = simple_read_from_buffer(buffer, count, ppos, buf, p - buf);
index 23f1958..459fb20 100644 (file)
@@ -73,6 +73,7 @@ static struct phy_driver bcm63xx_driver[] = {
        /* same phy as above, with just a different OUI */
        .phy_id         = 0x002bdc00,
        .phy_id_mask    = 0xfffffc00,
+       .name           = "Broadcom BCM63XX (2)",
        /* PHY_BASIC_FEATURES */
        .flags          = PHY_IS_INTERNAL,
        .config_init    = bcm63xx_config_init,
index 967f57e..9a07ad1 100644 (file)
@@ -28,7 +28,8 @@
 #define DP83867_CTRL           0x1f
 
 /* Extended Registers */
-#define DP83867_CFG4            0x0031
+#define DP83867_FLD_THR_CFG    0x002e
+#define DP83867_CFG4           0x0031
 #define DP83867_CFG4_SGMII_ANEG_MASK (BIT(5) | BIT(6))
 #define DP83867_CFG4_SGMII_ANEG_TIMER_11MS   (3 << 5)
 #define DP83867_CFG4_SGMII_ANEG_TIMER_800US  (2 << 5)
@@ -91,6 +92,7 @@
 #define DP83867_STRAP_STS2_CLK_SKEW_RX_MASK    GENMASK(2, 0)
 #define DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT   0
 #define DP83867_STRAP_STS2_CLK_SKEW_NONE       BIT(2)
+#define DP83867_STRAP_STS2_STRAP_FLD           BIT(10)
 
 /* PHY CTRL bits */
 #define DP83867_PHYCR_TX_FIFO_DEPTH_SHIFT      14
 /* CFG4 bits */
 #define DP83867_CFG4_PORT_MIRROR_EN              BIT(0)
 
+/* FLD_THR_CFG */
+#define DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK       0x7
+
 enum {
        DP83867_PORT_MIRROING_KEEP,
        DP83867_PORT_MIRROING_EN,
@@ -476,6 +481,20 @@ static int dp83867_config_init(struct phy_device *phydev)
                phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
                                   BIT(7));
 
+       bs = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS2);
+       if (bs & DP83867_STRAP_STS2_STRAP_FLD) {
+               /* When using strap to enable FLD, the ENERGY_LOST_FLD_THR will
+                * be set to 0x2. This may causes the PHY link to be unstable -
+                * the default value 0x1 need to be restored.
+                */
+               ret = phy_modify_mmd(phydev, DP83867_DEVADDR,
+                                    DP83867_FLD_THR_CFG,
+                                    DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK,
+                                    0x1);
+               if (ret)
+                       return ret;
+       }
+
        if (phy_interface_is_rgmii(phydev) ||
            phydev->interface == PHY_INTERFACE_MODE_SGMII) {
                val = phy_read(phydev, MII_DP83867_PHYCTRL);
index 4a28fb2..fbd3689 100644 (file)
@@ -242,11 +242,9 @@ static int unimac_mdio_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
-       priv->clk = devm_clk_get(&pdev->dev, NULL);
-       if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
+       priv->clk = devm_clk_get_optional(&pdev->dev, NULL);
+       if (IS_ERR(priv->clk))
                return PTR_ERR(priv->clk);
-       else
-               priv->clk = NULL;
 
        ret = clk_prepare_enable(priv->clk);
        if (ret)
index 88d409e..aad6809 100644 (file)
@@ -288,8 +288,13 @@ static int mdio_mux_iproc_suspend(struct device *dev)
 static int mdio_mux_iproc_resume(struct device *dev)
 {
        struct iproc_mdiomux_desc *md = dev_get_drvdata(dev);
+       int rc;
 
-       clk_prepare_enable(md->core_clk);
+       rc = clk_prepare_enable(md->core_clk);
+       if (rc) {
+               dev_err(md->dev, "failed to enable core clk\n");
+               return rc;
+       }
        mdio_mux_iproc_config(md);
 
        return 0;
index d76e038..355bfde 100644 (file)
@@ -727,7 +727,8 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
                phy_trigger_machine(phydev);
        }
 
-       if (phy_clear_interrupt(phydev))
+       /* did_interrupt() may have cleared the interrupt already */
+       if (!phydev->drv->did_interrupt && phy_clear_interrupt(phydev))
                goto phy_err;
        return IRQ_HANDLED;
 
index c8b0c34..28e3c5c 100644 (file)
@@ -286,6 +286,8 @@ static int mdio_bus_phy_suspend(struct device *dev)
        if (!mdio_bus_phy_may_suspend(phydev))
                return 0;
 
+       phydev->suspended_by_mdio_bus = 1;
+
        return phy_suspend(phydev);
 }
 
@@ -294,9 +296,11 @@ static int mdio_bus_phy_resume(struct device *dev)
        struct phy_device *phydev = to_phy_device(dev);
        int ret;
 
-       if (!mdio_bus_phy_may_suspend(phydev))
+       if (!phydev->suspended_by_mdio_bus)
                goto no_resume;
 
+       phydev->suspended_by_mdio_bus = 0;
+
        ret = phy_resume(phydev);
        if (ret < 0)
                return ret;
index 70b9a14..6e66b8e 100644 (file)
@@ -761,8 +761,14 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
                config.interface = interface;
 
        ret = phylink_validate(pl, supported, &config);
-       if (ret)
+       if (ret) {
+               phylink_warn(pl, "validation of %s with support %*pb and advertisement %*pb failed: %d\n",
+                            phy_modes(config.interface),
+                            __ETHTOOL_LINK_MODE_MASK_NBITS, phy->supported,
+                            __ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising,
+                            ret);
                return ret;
+       }
 
        phy->phylink = pl;
        phy->phy_link_change = phylink_phy_change;
index d949ea7..6900c68 100644 (file)
@@ -572,13 +572,15 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
  * the sfp_bus structure, incrementing its reference count.  This must
  * be put via sfp_bus_put() when done.
  *
- * Returns: on success, a pointer to the sfp_bus structure,
- *         %NULL if no SFP is specified,
- *         on failure, an error pointer value:
- *             corresponding to the errors detailed for
- *             fwnode_property_get_reference_args().
- *             %-ENOMEM if we failed to allocate the bus.
- *             an error from the upstream's connect_phy() method.
+ * Returns:
+ *         - on success, a pointer to the sfp_bus structure,
+ *         - %NULL if no SFP is specified,
+ *         - on failure, an error pointer value:
+ *
+ *           - corresponding to the errors detailed for
+ *             fwnode_property_get_reference_args().
+ *           - %-ENOMEM if we failed to allocate the bus.
+ *           - an error from the upstream's connect_phy() method.
  */
 struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
 {
@@ -612,13 +614,15 @@ EXPORT_SYMBOL_GPL(sfp_bus_find_fwnode);
  * the SFP bus using sfp_register_upstream().  This takes a reference on the
  * bus, so it is safe to put the bus after this call.
  *
- * Returns: on success, a pointer to the sfp_bus structure,
- *         %NULL if no SFP is specified,
- *         on failure, an error pointer value:
- *             corresponding to the errors detailed for
- *             fwnode_property_get_reference_args().
- *             %-ENOMEM if we failed to allocate the bus.
- *             an error from the upstream's connect_phy() method.
+ * Returns:
+ *         - on success, a pointer to the sfp_bus structure,
+ *         - %NULL if no SFP is specified,
+ *         - on failure, an error pointer value:
+ *
+ *           - corresponding to the errors detailed for
+ *             fwnode_property_get_reference_args().
+ *           - %-ENOMEM if we failed to allocate the bus.
+ *           - an error from the upstream's connect_phy() method.
  */
 int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
                         const struct sfp_upstream_ops *ops)
index 58a69f8..f78ceba 100644 (file)
@@ -232,7 +232,7 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
        struct cstate *cs = lcs->next;
        unsigned long deltaS, deltaA;
        short changes = 0;
-       int hlen;
+       int nlen, hlen;
        unsigned char new_seq[16];
        unsigned char *cp = new_seq;
        struct iphdr *ip;
@@ -248,6 +248,8 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
                return isize;
 
        ip = (struct iphdr *) icp;
+       if (ip->version != 4 || ip->ihl < 5)
+               return isize;
 
        /* Bail if this packet isn't TCP, or is an IP fragment */
        if (ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x3fff)) {
@@ -258,10 +260,14 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
                        comp->sls_o_tcp++;
                return isize;
        }
-       /* Extract TCP header */
+       nlen = ip->ihl * 4;
+       if (isize < nlen + sizeof(*th))
+               return isize;
 
-       th = (struct tcphdr *)(((unsigned char *)ip) + ip->ihl*4);
-       hlen = ip->ihl*4 + th->doff*4;
+       th = (struct tcphdr *)(icp + nlen);
+       if (th->doff < sizeof(struct tcphdr) / 4)
+               return isize;
+       hlen = nlen + th->doff * 4;
 
        /*  Bail if the TCP packet isn't `compressible' (i.e., ACK isn't set or
         *  some other control bit is set). Also uncompressible if
index ca70a1d..4004f98 100644 (file)
@@ -2240,6 +2240,8 @@ team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
        [TEAM_ATTR_OPTION_CHANGED]              = { .type = NLA_FLAG },
        [TEAM_ATTR_OPTION_TYPE]                 = { .type = NLA_U8 },
        [TEAM_ATTR_OPTION_DATA]                 = { .type = NLA_BINARY },
+       [TEAM_ATTR_OPTION_PORT_IFINDEX]         = { .type = NLA_U32 },
+       [TEAM_ATTR_OPTION_ARRAY_INDEX]          = { .type = NLA_U32 },
 };
 
 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
index 5754bb6..6c738a2 100644 (file)
@@ -1210,6 +1210,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1435, 0xd182, 5)},    /* Wistron NeWeb D18 */
        {QMI_FIXED_INTF(0x1435, 0xd191, 4)},    /* Wistron NeWeb D19Q1 */
        {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
+       {QMI_FIXED_INTF(0x1690, 0x7588, 4)},    /* ASKEY WWHC050 */
        {QMI_FIXED_INTF(0x16d8, 0x6003, 0)},    /* CMOTech 6003 */
        {QMI_FIXED_INTF(0x16d8, 0x6007, 0)},    /* CMOTech CHE-628S */
        {QMI_FIXED_INTF(0x16d8, 0x6008, 0)},    /* CMOTech CMU-301 */
index 78ddbaf..95b19ce 100644 (file)
@@ -3221,6 +3221,8 @@ static u16 r8153_phy_status(struct r8152 *tp, u16 desired)
                }
 
                msleep(20);
+               if (test_bit(RTL8152_UNPLUG, &tp->flags))
+                       break;
        }
 
        return data;
@@ -5402,7 +5404,10 @@ static void r8153_init(struct r8152 *tp)
                if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
                    AUTOLOAD_DONE)
                        break;
+
                msleep(20);
+               if (test_bit(RTL8152_UNPLUG, &tp->flags))
+                       break;
        }
 
        data = r8153_phy_status(tp, 0);
@@ -5539,7 +5544,10 @@ static void r8153b_init(struct r8152 *tp)
                if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
                    AUTOLOAD_DONE)
                        break;
+
                msleep(20);
+               if (test_bit(RTL8152_UNPLUG, &tp->flags))
+                       break;
        }
 
        data = r8153_phy_status(tp, 0);
index 8cdc441..d4cbb9e 100644 (file)
@@ -328,7 +328,7 @@ static void veth_get_stats64(struct net_device *dev,
        rcu_read_lock();
        peer = rcu_dereference(priv->peer);
        if (peer) {
-               tot->rx_dropped += veth_stats_tx(peer, &packets, &bytes);
+               veth_stats_tx(peer, &packets, &bytes);
                tot->rx_bytes += bytes;
                tot->rx_packets += packets;
 
index d3b08b7..45308b3 100644 (file)
@@ -2779,10 +2779,19 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
 /* Setup stats when device is created */
 static int vxlan_init(struct net_device *dev)
 {
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       int err;
+
        dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!dev->tstats)
                return -ENOMEM;
 
+       err = gro_cells_init(&vxlan->gro_cells, dev);
+       if (err) {
+               free_percpu(dev->tstats);
+               return err;
+       }
+
        return 0;
 }
 
@@ -3043,8 +3052,6 @@ static void vxlan_setup(struct net_device *dev)
 
        vxlan->dev = dev;
 
-       gro_cells_init(&vxlan->gro_cells, dev);
-
        for (h = 0; h < FDB_HASH_SIZE; ++h) {
                spin_lock_init(&vxlan->hash_lock[h]);
                INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
index cdc9696..3ac3f85 100644 (file)
@@ -122,7 +122,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
        u32 mtu;
        int ret;
 
-       if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) {
+       if (unlikely(!wg_check_packet_protocol(skb))) {
                ret = -EPROTONOSUPPORT;
                net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name);
                goto err;
index bda2640..802099c 100644 (file)
@@ -411,11 +411,7 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
 
                peer = wg_peer_create(wg, public_key, preshared_key);
                if (IS_ERR(peer)) {
-                       /* Similar to the above, if the key is invalid, we skip
-                        * it without fanfare, so that services don't need to
-                        * worry about doing key validation themselves.
-                        */
-                       ret = PTR_ERR(peer) == -EKEYREJECTED ? 0 : PTR_ERR(peer);
+                       ret = PTR_ERR(peer);
                        peer = NULL;
                        goto out;
                }
@@ -569,7 +565,7 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
                                                         private_key);
                list_for_each_entry_safe(peer, temp, &wg->peer_list,
                                         peer_list) {
-                       BUG_ON(!wg_noise_precompute_static_static(peer));
+                       wg_noise_precompute_static_static(peer);
                        wg_noise_expire_current_peer_keypairs(peer);
                }
                wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
index 919d9d8..708dc61 100644 (file)
@@ -44,32 +44,23 @@ void __init wg_noise_init(void)
 }
 
 /* Must hold peer->handshake.static_identity->lock */
-bool wg_noise_precompute_static_static(struct wg_peer *peer)
+void wg_noise_precompute_static_static(struct wg_peer *peer)
 {
-       bool ret;
-
        down_write(&peer->handshake.lock);
-       if (peer->handshake.static_identity->has_identity) {
-               ret = curve25519(
-                       peer->handshake.precomputed_static_static,
+       if (!peer->handshake.static_identity->has_identity ||
+           !curve25519(peer->handshake.precomputed_static_static,
                        peer->handshake.static_identity->static_private,
-                       peer->handshake.remote_static);
-       } else {
-               u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 };
-
-               ret = curve25519(empty, empty, peer->handshake.remote_static);
+                       peer->handshake.remote_static))
                memset(peer->handshake.precomputed_static_static, 0,
                       NOISE_PUBLIC_KEY_LEN);
-       }
        up_write(&peer->handshake.lock);
-       return ret;
 }
 
-bool wg_noise_handshake_init(struct noise_handshake *handshake,
-                          struct noise_static_identity *static_identity,
-                          const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
-                          const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
-                          struct wg_peer *peer)
+void wg_noise_handshake_init(struct noise_handshake *handshake,
+                            struct noise_static_identity *static_identity,
+                            const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
+                            const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
+                            struct wg_peer *peer)
 {
        memset(handshake, 0, sizeof(*handshake));
        init_rwsem(&handshake->lock);
@@ -81,7 +72,7 @@ bool wg_noise_handshake_init(struct noise_handshake *handshake,
                       NOISE_SYMMETRIC_KEY_LEN);
        handshake->static_identity = static_identity;
        handshake->state = HANDSHAKE_ZEROED;
-       return wg_noise_precompute_static_static(peer);
+       wg_noise_precompute_static_static(peer);
 }
 
 static void handshake_zero(struct noise_handshake *handshake)
@@ -403,6 +394,19 @@ static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN],
        return true;
 }
 
+static bool __must_check mix_precomputed_dh(u8 chaining_key[NOISE_HASH_LEN],
+                                           u8 key[NOISE_SYMMETRIC_KEY_LEN],
+                                           const u8 precomputed[NOISE_PUBLIC_KEY_LEN])
+{
+       static u8 zero_point[NOISE_PUBLIC_KEY_LEN];
+       if (unlikely(!crypto_memneq(precomputed, zero_point, NOISE_PUBLIC_KEY_LEN)))
+               return false;
+       kdf(chaining_key, key, NULL, precomputed, NOISE_HASH_LEN,
+           NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
+           chaining_key);
+       return true;
+}
+
 static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len)
 {
        struct blake2s_state blake;
@@ -531,10 +535,9 @@ wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst,
                        NOISE_PUBLIC_KEY_LEN, key, handshake->hash);
 
        /* ss */
-       kdf(handshake->chaining_key, key, NULL,
-           handshake->precomputed_static_static, NOISE_HASH_LEN,
-           NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
-           handshake->chaining_key);
+       if (!mix_precomputed_dh(handshake->chaining_key, key,
+                               handshake->precomputed_static_static))
+               goto out;
 
        /* {t} */
        tai64n_now(timestamp);
@@ -595,9 +598,9 @@ wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src,
        handshake = &peer->handshake;
 
        /* ss */
-       kdf(chaining_key, key, NULL, handshake->precomputed_static_static,
-           NOISE_HASH_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
-           chaining_key);
+       if (!mix_precomputed_dh(chaining_key, key,
+                               handshake->precomputed_static_static))
+           goto out;
 
        /* {t} */
        if (!message_decrypt(t, src->encrypted_timestamp,
index 138a07b..f532d59 100644 (file)
@@ -94,11 +94,11 @@ struct noise_handshake {
 struct wg_device;
 
 void wg_noise_init(void);
-bool wg_noise_handshake_init(struct noise_handshake *handshake,
-                          struct noise_static_identity *static_identity,
-                          const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
-                          const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
-                          struct wg_peer *peer);
+void wg_noise_handshake_init(struct noise_handshake *handshake,
+                            struct noise_static_identity *static_identity,
+                            const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
+                            const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
+                            struct wg_peer *peer);
 void wg_noise_handshake_clear(struct noise_handshake *handshake);
 static inline void wg_noise_reset_last_sent_handshake(atomic64_t *handshake_ns)
 {
@@ -116,7 +116,7 @@ void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer);
 void wg_noise_set_static_identity_private_key(
        struct noise_static_identity *static_identity,
        const u8 private_key[NOISE_PUBLIC_KEY_LEN]);
-bool wg_noise_precompute_static_static(struct wg_peer *peer);
+void wg_noise_precompute_static_static(struct wg_peer *peer);
 
 bool
 wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst,
index 071eedf..1d634bd 100644 (file)
@@ -34,11 +34,8 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
                return ERR_PTR(ret);
        peer->device = wg;
 
-       if (!wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
-                                    public_key, preshared_key, peer)) {
-               ret = -EKEYREJECTED;
-               goto err_1;
-       }
+       wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
+                               public_key, preshared_key, peer);
        if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
                goto err_1;
        if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false,
index fecb559..3432232 100644 (file)
@@ -66,7 +66,7 @@ struct packet_cb {
 #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
 
 /* Returns either the correct skb->protocol value, or 0 if invalid. */
-static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
+static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb)
 {
        if (skb_network_header(skb) >= skb->head &&
            (skb_network_header(skb) + sizeof(struct iphdr)) <=
@@ -81,6 +81,12 @@ static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
        return 0;
 }
 
+static inline bool wg_check_packet_protocol(struct sk_buff *skb)
+{
+       __be16 real_protocol = wg_examine_packet_protocol(skb);
+       return real_protocol && skb->protocol == real_protocol;
+}
+
 static inline void wg_reset_packet(struct sk_buff *skb)
 {
        skb_scrub_packet(skb, true);
@@ -94,8 +100,8 @@ static inline void wg_reset_packet(struct sk_buff *skb)
        skb->dev = NULL;
 #ifdef CONFIG_NET_SCHED
        skb->tc_index = 0;
-       skb_reset_tc(skb);
 #endif
+       skb_reset_redirect(skb);
        skb->hdr_len = skb_headroom(skb);
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
index 4a15389..da3b782 100644 (file)
@@ -56,7 +56,7 @@ static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg)
        size_t data_offset, data_len, header_len;
        struct udphdr *udp;
 
-       if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol ||
+       if (unlikely(!wg_check_packet_protocol(skb) ||
                     skb_transport_header(skb) < skb->head ||
                     (skb_transport_header(skb) + sizeof(struct udphdr)) >
                             skb_tail_pointer(skb)))
@@ -388,7 +388,7 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
         */
        skb->ip_summed = CHECKSUM_UNNECESSARY;
        skb->csum_level = ~0; /* All levels */
-       skb->protocol = wg_skb_examine_untrusted_ip_hdr(skb);
+       skb->protocol = wg_examine_packet_protocol(skb);
        if (skb->protocol == htons(ETH_P_IP)) {
                len = ntohs(ip_hdr(skb)->tot_len);
                if (unlikely(len < sizeof(struct iphdr)))
@@ -587,8 +587,7 @@ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
                wg_packet_consume_data(wg, skb);
                break;
        default:
-               net_dbg_skb_ratelimited("%s: Invalid packet from %pISpfsc\n",
-                                       wg->dev->name, skb);
+               WARN(1, "Non-exhaustive parsing of packet header lead to unknown packet type!\n");
                goto err;
        }
        return;
index a22a830..355af47 100644 (file)
@@ -283,6 +283,7 @@ const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = {
         * HT size; mac80211 would otherwise pick the HE max (256) by default.
         */
        .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+       .tx_with_siso_diversity = true,
        .num_rbds = IWL_NUM_RBDS_22000_HE,
 };
 
@@ -309,6 +310,7 @@ const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
         * HT size; mac80211 would otherwise pick the HE max (256) by default.
         */
        .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+       .tx_with_siso_diversity = true,
        .num_rbds = IWL_NUM_RBDS_22000_HE,
 };
 
index 48d375a..ba2aff3 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2017        Intel Deutschland GmbH
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019 - 2020 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2017        Intel Deutschland GmbH
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019 - 2020 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -491,13 +491,13 @@ int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
 }
 IWL_EXPORT_SYMBOL(iwl_validate_sar_geo_profile);
 
-void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
-                     struct iwl_per_chain_offset_group *table)
+int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
+                    struct iwl_per_chain_offset_group *table)
 {
        int ret, i, j;
 
        if (!iwl_sar_geo_support(fwrt))
-               return;
+               return -EOPNOTSUPP;
 
        ret = iwl_sar_get_wgds_table(fwrt);
        if (ret < 0) {
@@ -505,7 +505,7 @@ void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
                                "Geo SAR BIOS table invalid or unavailable. (%d)\n",
                                ret);
                /* we don't fail if the table is not available */
-               return;
+               return -ENOENT;
        }
 
        BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
@@ -530,5 +530,7 @@ void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
                                        i, j, value[1], value[2], value[0]);
                }
        }
+
+       return 0;
 }
 IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
index 4a6e826..5590e5c 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2017        Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019        Intel Corporation
+ * Copyright(c) 2018 - 2020        Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2017        Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019       Intel Corporation
+ * Copyright(c) 2018 - 2020       Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -171,8 +171,9 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
 int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
                                 struct iwl_host_cmd *cmd);
 
-void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
-                     struct iwl_per_chain_offset_group *table);
+int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
+                    struct iwl_per_chain_offset_group *table);
+
 #else /* CONFIG_ACPI */
 
 static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
@@ -243,9 +244,10 @@ static inline int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
        return -ENOENT;
 }
 
-static inline void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
-                                   struct iwl_per_chain_offset_group *table)
+static inline int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
+                                  struct iwl_per_chain_offset_group *table)
 {
+       return -ENOENT;
 }
 
 #endif /* CONFIG_ACPI */
index 91df1ee..8796ab8 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -1409,11 +1409,7 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
                goto out;
        }
 
-       /*
-        * region register have absolute value so apply rxf offset after
-        * reading the registers
-        */
-       offs += rxf_data.offset;
+       offs = rxf_data.offset;
 
        /* Lock fence */
        iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1);
@@ -2494,10 +2490,7 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
                goto out;
        }
 
-       if (iwl_fw_dbg_stop_restart_recording(fwrt, &params, true)) {
-               IWL_ERR(fwrt, "Failed to stop DBGC recording, aborting dump\n");
-               goto out;
-       }
+       iwl_fw_dbg_stop_restart_recording(fwrt, &params, true);
 
        IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n");
        if (iwl_trans_dbg_ini_valid(fwrt->trans))
@@ -2662,14 +2655,14 @@ static int iwl_fw_dbg_restart_recording(struct iwl_trans *trans,
        return 0;
 }
 
-int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
-                                     struct iwl_fw_dbg_params *params,
-                                     bool stop)
+void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
+                                      struct iwl_fw_dbg_params *params,
+                                      bool stop)
 {
        int ret = 0;
 
        if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
-               return 0;
+               return;
 
        if (fw_has_capa(&fwrt->fw->ucode_capa,
                        IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP))
@@ -2686,7 +2679,5 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
                        iwl_fw_set_dbg_rec_on(fwrt);
        }
 #endif
-
-       return ret;
 }
 IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_restart_recording);
index 179f290..9d35132 100644 (file)
@@ -239,9 +239,9 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
        _iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev),         \
                                        iwl_fw_dbg_get_trigger((fwrt)->fw,\
                                                               (trig)))
-int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
-                                     struct iwl_fw_dbg_params *params,
-                                     bool stop);
+void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
+                                      struct iwl_fw_dbg_params *params,
+                                      bool stop);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 static inline void iwl_fw_set_dbg_rec_on(struct iwl_fw_runtime *fwrt)
index 2d1cb46..0481796 100644 (file)
@@ -1467,7 +1467,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                                kmemdup(pieces->dbg_conf_tlv[i],
                                        pieces->dbg_conf_tlv_len[i],
                                        GFP_KERNEL);
-                       if (!pieces->dbg_conf_tlv_len[i])
+                       if (!pieces->dbg_conf_tlv[i])
                                goto out_free_fw;
                }
        }
index 54c094e..98263cd 100644 (file)
@@ -762,10 +762,17 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
        u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
        union geo_tx_power_profiles_cmd cmd;
        u16 len;
+       int ret;
 
        cmd.geo_cmd.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
 
-       iwl_sar_geo_init(&mvm->fwrt, cmd.geo_cmd.table);
+       ret = iwl_sar_geo_init(&mvm->fwrt, cmd.geo_cmd.table);
+       /*
+        * It is a valid scenario to not support SAR, or miss wgds table,
+        * but in that case there is no need to send the command.
+        */
+       if (ret)
+               return 0;
 
        cmd.geo_cmd.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
 
index 70b29bf..60296a7 100644 (file)
@@ -308,7 +308,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
                }
 
                /* PHY_SKU section is mandatory in B0 */
-               if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
+               if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT &&
+                   !mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
                        IWL_ERR(mvm,
                                "Can't parse phy_sku in B0, empty sections\n");
                        return NULL;
index e2cf9e0..ca99a9c 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2017        Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2017        Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -147,7 +147,11 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
             (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
                flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
 
-       /* consider our LDPC support in case of HE */
+       /* consider LDPC support in case of HE */
+       if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[1] &
+           IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+               flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
        if (sband->iftype_data && sband->iftype_data->he_cap.has_he &&
            !(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &
             IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
@@ -191,11 +195,13 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
 {
        u16 supp;
        int i, highest_mcs;
+       u8 nss = sta->rx_nss;
 
-       for (i = 0; i < sta->rx_nss; i++) {
-               if (i == IWL_TLC_NSS_MAX)
-                       break;
+       /* the station support only a single receive chain */
+       if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+               nss = 1;
 
+       for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
                highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
                if (!highest_mcs)
                        continue;
@@ -241,8 +247,13 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
        u16 tx_mcs_160 =
                le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160);
        int i;
+       u8 nss = sta->rx_nss;
+
+       /* the station support only a single receive chain */
+       if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+               nss = 1;
 
-       for (i = 0; i < sta->rx_nss && i < IWL_TLC_NSS_MAX; i++) {
+       for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
                u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
                u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
                u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3;
@@ -303,8 +314,14 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
                cmd->mode = IWL_TLC_MNG_MODE_HT;
                cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_HT_BW_NONE_160] =
                        cpu_to_le16(ht_cap->mcs.rx_mask[0]);
-               cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
-                       cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+
+               /* the station support only a single receive chain */
+               if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+                       cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
+                               0;
+               else
+                       cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
+                               cpu_to_le16(ht_cap->mcs.rx_mask[1]);
        }
 }
 
index c0b420f..1babc4b 100644 (file)
@@ -785,7 +785,9 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
                if (!le32_to_cpu(notif->status)) {
                        iwl_mvm_te_check_disconnect(mvm, vif,
                                                    "Session protection failure");
+                       spin_lock_bh(&mvm->time_event_lock);
                        iwl_mvm_te_clear_data(mvm, te_data);
+                       spin_unlock_bh(&mvm->time_event_lock);
                }
 
                if (le32_to_cpu(notif->start)) {
@@ -801,7 +803,9 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
                         */
                        iwl_mvm_te_check_disconnect(mvm, vif,
                                                    "No beacon heard and the session protection is over already...");
+                       spin_lock_bh(&mvm->time_event_lock);
                        iwl_mvm_te_clear_data(mvm, te_data);
+                       spin_unlock_bh(&mvm->time_event_lock);
                }
 
                goto out_unlock;
index 97f227f..f441b20 100644 (file)
@@ -981,6 +981,9 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
        IWL_DEV_INFO(0x2526, 0x0014, iwl9260_2ac_160_cfg, iwl9260_160_name),
        IWL_DEV_INFO(0x2526, 0x0018, iwl9260_2ac_160_cfg, iwl9260_160_name),
        IWL_DEV_INFO(0x2526, 0x001C, iwl9260_2ac_160_cfg, iwl9260_160_name),
+       IWL_DEV_INFO(0x2526, 0x4010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+       IWL_DEV_INFO(0x2526, 0x4018, iwl9260_2ac_160_cfg, iwl9260_160_name),
+       IWL_DEV_INFO(0x2526, 0x401C, iwl9260_2ac_160_cfg, iwl9260_160_name),
        IWL_DEV_INFO(0x2526, 0x6010, iwl9260_2ac_160_cfg, iwl9260_160_name),
        IWL_DEV_INFO(0x2526, 0x6014, iwl9260_2ac_160_cfg, iwl9260_160_name),
        IWL_DEV_INFO(0x2526, 0x8014, iwl9260_2ac_160_cfg, iwl9260_160_name),
index 6173c80..1847f55 100644 (file)
@@ -447,10 +447,13 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
        struct page *page = virt_to_head_page(data);
        int offset = data - page_address(page);
        struct sk_buff *skb = q->rx_head;
+       struct skb_shared_info *shinfo = skb_shinfo(skb);
 
-       offset += q->buf_offset;
-       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
-                       q->buf_size);
+       if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
+               offset += q->buf_offset;
+               skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
+                               q->buf_size);
+       }
 
        if (more)
                return;
index 9177298..e17f70b 100644 (file)
@@ -561,6 +561,7 @@ static inline void clear_pci_tx_desc_content(__le32 *__pdesc, int _size)
         rxmcs == DESC92C_RATE11M)
 
 struct phy_status_rpt {
+       u8      padding[2];
        u8      ch_corr[2];
        u8      cck_sig_qual_ofdm_pwdb_all;
        u8      cck_agc_rpt_ofdm_cfosho_a;
index ed049c9..f140f7d 100644 (file)
@@ -6274,7 +6274,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
        wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
                                WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
                                WIPHY_FLAG_HAS_CHANNEL_SWITCH |
-+                              WIPHY_FLAG_IBSS_RSN;
+                               WIPHY_FLAG_IBSS_RSN;
 
        wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
 
index 0cc9ac8..ed21231 100644 (file)
@@ -184,7 +184,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
        const struct firmware *fw;
        struct sk_buff *skb;
        unsigned long len;
-       u8 max_size, payload_size;
+       int max_size, payload_size;
        int rc = 0;
 
        if ((type == NCI_PATCH_TYPE_OTP && !info->otp_patch) ||
@@ -207,8 +207,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
 
        while (len) {
 
-               payload_size = min_t(unsigned long, (unsigned long) max_size,
-                                    len);
+               payload_size = min_t(unsigned long, max_size, len);
 
                skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + payload_size),
                                    GFP_KERNEL);
index 3e85c5c..0fe08c4 100644 (file)
@@ -850,9 +850,11 @@ out_free_tagset:
        if (new)
                blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
 out_free_async_qe:
-       nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
-               sizeof(struct nvme_command), DMA_TO_DEVICE);
-       ctrl->async_event_sqe.data = NULL;
+       if (ctrl->async_event_sqe.data) {
+               nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+                       sizeof(struct nvme_command), DMA_TO_DEVICE);
+               ctrl->async_event_sqe.data = NULL;
+       }
 out_free_queue:
        nvme_rdma_free_queue(&ctrl->queues[0]);
        return error;
index af674fc..5bb5342 100644 (file)
@@ -515,7 +515,7 @@ static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
        return 1;
 }
 
-static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
+static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
 {
        struct nvmet_tcp_queue *queue = cmd->queue;
        int ret;
@@ -523,9 +523,15 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
        while (cmd->cur_sg) {
                struct page *page = sg_page(cmd->cur_sg);
                u32 left = cmd->cur_sg->length - cmd->offset;
+               int flags = MSG_DONTWAIT;
+
+               if ((!last_in_batch && cmd->queue->send_list_len) ||
+                   cmd->wbytes_done + left < cmd->req.transfer_len ||
+                   queue->data_digest || !queue->nvme_sq.sqhd_disabled)
+                       flags |= MSG_MORE;
 
                ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
-                                       left, MSG_DONTWAIT | MSG_MORE);
+                                       left, flags);
                if (ret <= 0)
                        return ret;
 
@@ -660,7 +666,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
        }
 
        if (cmd->state == NVMET_TCP_SEND_DATA) {
-               ret = nvmet_try_send_data(cmd);
+               ret = nvmet_try_send_data(cmd, last_in_batch);
                if (ret <= 0)
                        goto done_send;
        }
index 8270bbf..9f982c0 100644 (file)
@@ -306,6 +306,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
                                rc = of_mdiobus_register_phy(mdio, child, addr);
                                if (rc && rc != -ENODEV)
                                        goto unregister;
+                               break;
                        }
                }
        }
index 7b6409e..dce2626 100644 (file)
@@ -1073,13 +1073,26 @@ static int madera_pin_probe(struct platform_device *pdev)
                return ret;
        }
 
+       platform_set_drvdata(pdev, priv);
+
        dev_dbg(priv->dev, "pinctrl probed ok\n");
 
        return 0;
 }
 
+static int madera_pin_remove(struct platform_device *pdev)
+{
+       struct madera_pin_private *priv = platform_get_drvdata(pdev);
+
+       if (priv->madera->pdata.gpio_configs)
+               pinctrl_unregister_mappings(priv->madera->pdata.gpio_configs);
+
+       return 0;
+}
+
 static struct platform_driver madera_pin_driver = {
        .probe = madera_pin_probe,
+       .remove = madera_pin_remove,
        .driver = {
                .name = "madera-pinctrl",
        },
index 446d84f..f23c55e 100644 (file)
@@ -2021,7 +2021,6 @@ static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev)
                return PTR_ERR(pctldev->p);
        }
 
-       kref_get(&pctldev->p->users);
        pctldev->hog_default =
                pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
        if (IS_ERR(pctldev->hog_default)) {
index 73bf1d9..23cf04b 100644 (file)
@@ -23,12 +23,12 @@ struct imx_sc_msg_req_pad_set {
        struct imx_sc_rpc_msg hdr;
        u32 val;
        u16 pad;
-} __packed;
+} __packed __aligned(4);
 
 struct imx_sc_msg_req_pad_get {
        struct imx_sc_rpc_msg hdr;
        u16 pad;
-} __packed;
+} __packed __aligned(4);
 
 struct imx_sc_msg_resp_pad_get {
        struct imx_sc_rpc_msg hdr;
index 1b6e864..2ac921c 100644 (file)
@@ -147,8 +147,8 @@ static const unsigned int sdio_d0_pins[]    = { GPIOX_0 };
 static const unsigned int sdio_d1_pins[]       = { GPIOX_1 };
 static const unsigned int sdio_d2_pins[]       = { GPIOX_2 };
 static const unsigned int sdio_d3_pins[]       = { GPIOX_3 };
-static const unsigned int sdio_cmd_pins[]      = { GPIOX_4 };
-static const unsigned int sdio_clk_pins[]      = { GPIOX_5 };
+static const unsigned int sdio_clk_pins[]      = { GPIOX_4 };
+static const unsigned int sdio_cmd_pins[]      = { GPIOX_5 };
 static const unsigned int sdio_irq_pins[]      = { GPIOX_7 };
 
 static const unsigned int nand_ce0_pins[]      = { BOOT_8 };
index a454f57..62c02b9 100644 (file)
@@ -451,7 +451,7 @@ static int pinctrl_falcon_probe(struct platform_device *pdev)
                falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL);
                if (IS_ERR(falcon_info.clk[*bank])) {
                        dev_err(&ppdev->dev, "failed to get clock\n");
-                       of_node_put(np)
+                       of_node_put(np);
                        return PTR_ERR(falcon_info.clk[*bank]);
                }
                falcon_info.membase[*bank] = devm_ioremap_resource(&pdev->dev,
index 9a8daa2..1a948c3 100644 (file)
@@ -1104,7 +1104,6 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
        pctrl->irq_chip.irq_mask = msm_gpio_irq_mask;
        pctrl->irq_chip.irq_unmask = msm_gpio_irq_unmask;
        pctrl->irq_chip.irq_ack = msm_gpio_irq_ack;
-       pctrl->irq_chip.irq_eoi = irq_chip_eoi_parent;
        pctrl->irq_chip.irq_set_type = msm_gpio_irq_set_type;
        pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
        pctrl->irq_chip.irq_request_resources = msm_gpio_irq_reqres;
@@ -1118,7 +1117,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
                if (!chip->irq.parent_domain)
                        return -EPROBE_DEFER;
                chip->irq.child_to_parent_hwirq = msm_gpio_wakeirq;
-
+               pctrl->irq_chip.irq_eoi = irq_chip_eoi_parent;
                /*
                 * Let's skip handling the GPIOs, if the parent irqchip
                 * is handling the direct connect IRQ of the GPIO.
index fba1d41..338a15d 100644 (file)
@@ -794,7 +794,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
        girq->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
        girq->parent_domain = parent_domain;
        girq->child_to_parent_hwirq = pm8xxx_child_to_parent_hwirq;
-       girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
+       girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_twocell;
        girq->child_offset_to_irq = pm8xxx_child_offset_to_irq;
        girq->child_irq_domain_ops.translate = pm8xxx_domain_translate;
 
index 34c8b6c..8e50388 100644 (file)
@@ -327,6 +327,7 @@ config RTC_DRV_MAX6900
 config RTC_DRV_MAX8907
        tristate "Maxim MAX8907"
        depends on MFD_MAX8907 || COMPILE_TEST
+       select REGMAP_IRQ
        help
          If you say yes here you will get support for the
          RTC of Maxim MAX8907 PMIC.
index 6cca727..cf87eb2 100644 (file)
@@ -178,6 +178,8 @@ struct dasd_block *dasd_alloc_block(void)
                     (unsigned long) block);
        INIT_LIST_HEAD(&block->ccw_queue);
        spin_lock_init(&block->queue_lock);
+       INIT_LIST_HEAD(&block->format_list);
+       spin_lock_init(&block->format_lock);
        timer_setup(&block->timer, dasd_block_timeout, 0);
        spin_lock_init(&block->profile.lock);
 
@@ -1779,20 +1781,26 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
 
        if (dasd_ese_needs_format(cqr->block, irb)) {
                if (rq_data_dir((struct request *)cqr->callback_data) == READ) {
-                       device->discipline->ese_read(cqr);
+                       device->discipline->ese_read(cqr, irb);
                        cqr->status = DASD_CQR_SUCCESS;
                        cqr->stopclk = now;
                        dasd_device_clear_timer(device);
                        dasd_schedule_device_bh(device);
                        return;
                }
-               fcqr = device->discipline->ese_format(device, cqr);
+               fcqr = device->discipline->ese_format(device, cqr, irb);
                if (IS_ERR(fcqr)) {
+                       if (PTR_ERR(fcqr) == -EINVAL) {
+                               cqr->status = DASD_CQR_ERROR;
+                               return;
+                       }
                        /*
                         * If we can't format now, let the request go
                         * one extra round. Maybe we can format later.
                         */
                        cqr->status = DASD_CQR_QUEUED;
+                       dasd_schedule_device_bh(device);
+                       return;
                } else {
                        fcqr->status = DASD_CQR_QUEUED;
                        cqr->status = DASD_CQR_QUEUED;
@@ -2748,11 +2756,13 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
 {
        struct request *req;
        blk_status_t error = BLK_STS_OK;
+       unsigned int proc_bytes;
        int status;
 
        req = (struct request *) cqr->callback_data;
        dasd_profile_end(cqr->block, cqr, req);
 
+       proc_bytes = cqr->proc_bytes;
        status = cqr->block->base->discipline->free_cp(cqr, req);
        if (status < 0)
                error = errno_to_blk_status(status);
@@ -2783,7 +2793,18 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
                blk_mq_end_request(req, error);
                blk_mq_run_hw_queues(req->q, true);
        } else {
-               blk_mq_complete_request(req);
+               /*
+                * Partial completed requests can happen with ESE devices.
+                * During read we might have gotten a NRF error and have to
+                * complete a request partially.
+                */
+               if (proc_bytes) {
+                       blk_update_request(req, BLK_STS_OK,
+                                          blk_rq_bytes(req) - proc_bytes);
+                       blk_mq_requeue_request(req, true);
+               } else {
+                       blk_mq_complete_request(req);
+               }
        }
 }
 
index a28b9ff..ad44d22 100644 (file)
@@ -207,6 +207,45 @@ static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
        geo->head |= head;
 }
 
+/*
+ * calculate failing track from sense data depending if
+ * it is an EAV device or not
+ */
+static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device,
+                                   sector_t *track)
+{
+       struct dasd_eckd_private *private = device->private;
+       u8 *sense = NULL;
+       u32 cyl;
+       u8 head;
+
+       sense = dasd_get_sense(irb);
+       if (!sense) {
+               DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+                             "ESE error no sense data\n");
+               return -EINVAL;
+       }
+       if (!(sense[27] & DASD_SENSE_BIT_2)) {
+               DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+                             "ESE error no valid track data\n");
+               return -EINVAL;
+       }
+
+       if (sense[27] & DASD_SENSE_BIT_3) {
+               /* enhanced addressing */
+               cyl = sense[30] << 20;
+               cyl |= (sense[31] & 0xF0) << 12;
+               cyl |= sense[28] << 8;
+               cyl |= sense[29];
+       } else {
+               cyl = sense[29] << 8;
+               cyl |= sense[30];
+       }
+       head = sense[31] & 0x0F;
+       *track = cyl * private->rdc_data.trk_per_cyl + head;
+       return 0;
+}
+
 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
                     struct dasd_device *device)
 {
@@ -2986,6 +3025,37 @@ static int dasd_eckd_format_device(struct dasd_device *base,
                                             0, NULL);
 }
 
+static bool test_and_set_format_track(struct dasd_format_entry *to_format,
+                                     struct dasd_block *block)
+{
+       struct dasd_format_entry *format;
+       unsigned long flags;
+       bool rc = false;
+
+       spin_lock_irqsave(&block->format_lock, flags);
+       list_for_each_entry(format, &block->format_list, list) {
+               if (format->track == to_format->track) {
+                       rc = true;
+                       goto out;
+               }
+       }
+       list_add_tail(&to_format->list, &block->format_list);
+
+out:
+       spin_unlock_irqrestore(&block->format_lock, flags);
+       return rc;
+}
+
+static void clear_format_track(struct dasd_format_entry *format,
+                             struct dasd_block *block)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&block->format_lock, flags);
+       list_del_init(&format->list);
+       spin_unlock_irqrestore(&block->format_lock, flags);
+}
+
 /*
  * Callback function to free ESE format requests.
  */
@@ -2993,15 +3063,19 @@ static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
 {
        struct dasd_device *device = cqr->startdev;
        struct dasd_eckd_private *private = device->private;
+       struct dasd_format_entry *format = data;
 
+       clear_format_track(format, cqr->basedev->block);
        private->count--;
        dasd_ffree_request(cqr, device);
 }
 
 static struct dasd_ccw_req *
-dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
+dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
+                    struct irb *irb)
 {
        struct dasd_eckd_private *private;
+       struct dasd_format_entry *format;
        struct format_data_t fdata;
        unsigned int recs_per_trk;
        struct dasd_ccw_req *fcqr;
@@ -3011,23 +3085,39 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
        struct request *req;
        sector_t first_trk;
        sector_t last_trk;
+       sector_t curr_trk;
        int rc;
 
        req = cqr->callback_data;
-       base = cqr->block->base;
+       block = cqr->block;
+       base = block->base;
        private = base->private;
-       block = base->block;
        blksize = block->bp_block;
        recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+       format = &startdev->format_entry;
 
        first_trk = blk_rq_pos(req) >> block->s2b_shift;
        sector_div(first_trk, recs_per_trk);
        last_trk =
                (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
        sector_div(last_trk, recs_per_trk);
+       rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
+       if (rc)
+               return ERR_PTR(rc);
 
-       fdata.start_unit = first_trk;
-       fdata.stop_unit = last_trk;
+       if (curr_trk < first_trk || curr_trk > last_trk) {
+               DBF_DEV_EVENT(DBF_WARNING, startdev,
+                             "ESE error track %llu not within range %llu - %llu\n",
+                             curr_trk, first_trk, last_trk);
+               return ERR_PTR(-EINVAL);
+       }
+       format->track = curr_trk;
+       /* test if track is already in formatting by another thread */
+       if (test_and_set_format_track(format, block))
+               return ERR_PTR(-EEXIST);
+
+       fdata.start_unit = curr_trk;
+       fdata.stop_unit = curr_trk;
        fdata.blksize = blksize;
        fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
 
@@ -3044,6 +3134,7 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
                return fcqr;
 
        fcqr->callback = dasd_eckd_ese_format_cb;
+       fcqr->callback_data = (void *) format;
 
        return fcqr;
 }
@@ -3051,29 +3142,87 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
 /*
  * When data is read from an unformatted area of an ESE volume, this function
  * returns zeroed data and thereby mimics a read of zero data.
+ *
+ * The first unformatted track is the one that got the NRF error, the address is
+ * encoded in the sense data.
+ *
+ * All tracks before have returned valid data and should not be touched.
+ * All tracks after the unformatted track might be formatted or not. This is
+ * currently not known, remember the processed data and return the remainder of
+ * the request to the blocklayer in __dasd_cleanup_cqr().
  */
-static void dasd_eckd_ese_read(struct dasd_ccw_req *cqr)
+static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
 {
+       struct dasd_eckd_private *private;
+       sector_t first_trk, last_trk;
+       sector_t first_blk, last_blk;
        unsigned int blksize, off;
+       unsigned int recs_per_trk;
        struct dasd_device *base;
        struct req_iterator iter;
+       struct dasd_block *block;
+       unsigned int skip_block;
+       unsigned int blk_count;
        struct request *req;
        struct bio_vec bv;
+       sector_t curr_trk;
+       sector_t end_blk;
        char *dst;
+       int rc;
 
        req = (struct request *) cqr->callback_data;
        base = cqr->block->base;
        blksize = base->block->bp_block;
+       block =  cqr->block;
+       private = base->private;
+       skip_block = 0;
+       blk_count = 0;
+
+       recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+       first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
+       sector_div(first_trk, recs_per_trk);
+       last_trk = last_blk =
+               (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+       sector_div(last_trk, recs_per_trk);
+       rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
+       if (rc)
+               return rc;
+
+       /* sanity check if the current track from sense data is valid */
+       if (curr_trk < first_trk || curr_trk > last_trk) {
+               DBF_DEV_EVENT(DBF_WARNING, base,
+                             "ESE error track %llu not within range %llu - %llu\n",
+                             curr_trk, first_trk, last_trk);
+               return -EINVAL;
+       }
+
+       /*
+        * if not the first track got the NRF error we have to skip over valid
+        * blocks
+        */
+       if (curr_trk != first_trk)
+               skip_block = curr_trk * recs_per_trk - first_blk;
+
+       /* we have no information beyond the current track */
+       end_blk = (curr_trk + 1) * recs_per_trk;
 
        rq_for_each_segment(bv, req, iter) {
                dst = page_address(bv.bv_page) + bv.bv_offset;
                for (off = 0; off < bv.bv_len; off += blksize) {
-                       if (dst && rq_data_dir(req) == READ) {
+                       if (first_blk + blk_count >= end_blk) {
+                               cqr->proc_bytes = blk_count * blksize;
+                               return 0;
+                       }
+                       if (dst && !skip_block) {
                                dst += off;
                                memset(dst, 0, blksize);
+                       } else {
+                               skip_block--;
                        }
+                       blk_count++;
                }
        }
+       return 0;
 }
 
 /*
index 91c9f95..fa552f9 100644 (file)
@@ -187,6 +187,7 @@ struct dasd_ccw_req {
 
        void (*callback)(struct dasd_ccw_req *, void *data);
        void *callback_data;
+       unsigned int proc_bytes;        /* bytes for partial completion */
 };
 
 /*
@@ -387,8 +388,9 @@ struct dasd_discipline {
        int (*ext_pool_warn_thrshld)(struct dasd_device *);
        int (*ext_pool_oos)(struct dasd_device *);
        int (*ext_pool_exhaust)(struct dasd_device *, struct dasd_ccw_req *);
-       struct dasd_ccw_req *(*ese_format)(struct dasd_device *, struct dasd_ccw_req *);
-       void (*ese_read)(struct dasd_ccw_req *);
+       struct dasd_ccw_req *(*ese_format)(struct dasd_device *,
+                                          struct dasd_ccw_req *, struct irb *);
+       int (*ese_read)(struct dasd_ccw_req *, struct irb *);
 };
 
 extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -474,6 +476,11 @@ struct dasd_profile {
        spinlock_t lock;
 };
 
+struct dasd_format_entry {
+       struct list_head list;
+       sector_t track;
+};
+
 struct dasd_device {
        /* Block device stuff. */
        struct dasd_block *block;
@@ -539,6 +546,7 @@ struct dasd_device {
        struct dentry *debugfs_dentry;
        struct dentry *hosts_dentry;
        struct dasd_profile profile;
+       struct dasd_format_entry format_entry;
 };
 
 struct dasd_block {
@@ -564,6 +572,9 @@ struct dasd_block {
 
        struct dentry *debugfs_dentry;
        struct dasd_profile profile;
+
+       struct list_head format_list;
+       spinlock_t format_lock;
 };
 
 struct dasd_attention_data {
index 9575a62..468cada 100644 (file)
@@ -369,7 +369,7 @@ enum qeth_qdio_info_states {
 struct qeth_buffer_pool_entry {
        struct list_head list;
        struct list_head init_list;
-       void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
+       struct page *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
 };
 
 struct qeth_qdio_buffer_pool {
@@ -983,7 +983,7 @@ extern const struct attribute_group qeth_device_blkt_group;
 extern const struct device_type qeth_generic_devtype;
 
 const char *qeth_get_cardname_short(struct qeth_card *);
-int qeth_realloc_buffer_pool(struct qeth_card *, int);
+int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count);
 int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
 void qeth_core_free_discipline(struct qeth_card *);
 
index 8ca85c8..6d3f2f1 100644 (file)
@@ -65,7 +65,6 @@ static struct lock_class_key qdio_out_skb_queue_key;
 static void qeth_issue_next_read_cb(struct qeth_card *card,
                                    struct qeth_cmd_buffer *iob,
                                    unsigned int data_length);
-static void qeth_free_buffer_pool(struct qeth_card *);
 static int qeth_qdio_establish(struct qeth_card *);
 static void qeth_free_qdio_queues(struct qeth_card *card);
 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
@@ -212,49 +211,121 @@ void qeth_clear_working_pool_list(struct qeth_card *card)
 }
 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
 
+static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
+               if (entry->elements[i])
+                       __free_page(entry->elements[i]);
+       }
+
+       kfree(entry);
+}
+
+static void qeth_free_buffer_pool(struct qeth_card *card)
+{
+       struct qeth_buffer_pool_entry *entry, *tmp;
+
+       list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
+                                init_list) {
+               list_del(&entry->init_list);
+               qeth_free_pool_entry(entry);
+       }
+}
+
+static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
+{
+       struct qeth_buffer_pool_entry *entry;
+       unsigned int i;
+
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return NULL;
+
+       for (i = 0; i < pages; i++) {
+               entry->elements[i] = alloc_page(GFP_KERNEL);
+
+               if (!entry->elements[i]) {
+                       qeth_free_pool_entry(entry);
+                       return NULL;
+               }
+       }
+
+       return entry;
+}
+
 static int qeth_alloc_buffer_pool(struct qeth_card *card)
 {
-       struct qeth_buffer_pool_entry *pool_entry;
-       void *ptr;
-       int i, j;
+       unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+       unsigned int i;
 
        QETH_CARD_TEXT(card, 5, "alocpool");
        for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
-               pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
-               if (!pool_entry) {
+               struct qeth_buffer_pool_entry *entry;
+
+               entry = qeth_alloc_pool_entry(buf_elements);
+               if (!entry) {
                        qeth_free_buffer_pool(card);
                        return -ENOMEM;
                }
-               for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
-                       ptr = (void *) __get_free_page(GFP_KERNEL);
-                       if (!ptr) {
-                               while (j > 0)
-                                       free_page((unsigned long)
-                                                 pool_entry->elements[--j]);
-                               kfree(pool_entry);
-                               qeth_free_buffer_pool(card);
-                               return -ENOMEM;
-                       }
-                       pool_entry->elements[j] = ptr;
-               }
-               list_add(&pool_entry->init_list,
-                        &card->qdio.init_pool.entry_list);
+
+               list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
        }
        return 0;
 }
 
-int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
+int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
 {
+       unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+       struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
+       struct qeth_buffer_pool_entry *entry, *tmp;
+       int delta = count - pool->buf_count;
+       LIST_HEAD(entries);
+
        QETH_CARD_TEXT(card, 2, "realcbp");
 
-       /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
-       qeth_clear_working_pool_list(card);
-       qeth_free_buffer_pool(card);
-       card->qdio.in_buf_pool.buf_count = bufcnt;
-       card->qdio.init_pool.buf_count = bufcnt;
-       return qeth_alloc_buffer_pool(card);
+       /* Defer until queue is allocated: */
+       if (!card->qdio.in_q)
+               goto out;
+
+       /* Remove entries from the pool: */
+       while (delta < 0) {
+               entry = list_first_entry(&pool->entry_list,
+                                        struct qeth_buffer_pool_entry,
+                                        init_list);
+               list_del(&entry->init_list);
+               qeth_free_pool_entry(entry);
+
+               delta++;
+       }
+
+       /* Allocate additional entries: */
+       while (delta > 0) {
+               entry = qeth_alloc_pool_entry(buf_elements);
+               if (!entry) {
+                       list_for_each_entry_safe(entry, tmp, &entries,
+                                                init_list) {
+                               list_del(&entry->init_list);
+                               qeth_free_pool_entry(entry);
+                       }
+
+                       return -ENOMEM;
+               }
+
+               list_add(&entry->init_list, &entries);
+
+               delta--;
+       }
+
+       list_splice(&entries, &pool->entry_list);
+
+out:
+       card->qdio.in_buf_pool.buf_count = count;
+       pool->buf_count = count;
+       return 0;
 }
-EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
+EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
 
 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
 {
@@ -1170,19 +1241,6 @@ void qeth_drain_output_queues(struct qeth_card *card)
 }
 EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
 
-static void qeth_free_buffer_pool(struct qeth_card *card)
-{
-       struct qeth_buffer_pool_entry *pool_entry, *tmp;
-       int i = 0;
-       list_for_each_entry_safe(pool_entry, tmp,
-                                &card->qdio.init_pool.entry_list, init_list){
-               for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
-                       free_page((unsigned long)pool_entry->elements[i]);
-               list_del(&pool_entry->init_list);
-               kfree(pool_entry);
-       }
-}
-
 static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
 {
        unsigned int count = single ? 1 : card->dev->num_tx_queues;
@@ -1204,7 +1262,6 @@ static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
        if (count == 1)
                dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
 
-       card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
        card->qdio.no_out_queues = count;
        return 0;
 }
@@ -2393,7 +2450,6 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
                return;
 
        qeth_free_cq(card);
-       cancel_delayed_work_sync(&card->buffer_reclaim_work);
        for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
                if (card->qdio.in_q->bufs[j].rx_skb)
                        dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
@@ -2575,7 +2631,6 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
        struct list_head *plh;
        struct qeth_buffer_pool_entry *entry;
        int i, free;
-       struct page *page;
 
        if (list_empty(&card->qdio.in_buf_pool.entry_list))
                return NULL;
@@ -2584,7 +2639,7 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
                entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
                free = 1;
                for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
-                       if (page_count(virt_to_page(entry->elements[i])) > 1) {
+                       if (page_count(entry->elements[i]) > 1) {
                                free = 0;
                                break;
                        }
@@ -2599,15 +2654,15 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
        entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
                        struct qeth_buffer_pool_entry, list);
        for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
-               if (page_count(virt_to_page(entry->elements[i])) > 1) {
-                       page = alloc_page(GFP_ATOMIC);
-                       if (!page) {
+               if (page_count(entry->elements[i]) > 1) {
+                       struct page *page = alloc_page(GFP_ATOMIC);
+
+                       if (!page)
                                return NULL;
-                       } else {
-                               free_page((unsigned long)entry->elements[i]);
-                               entry->elements[i] = page_address(page);
-                               QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
-                       }
+
+                       __free_page(entry->elements[i]);
+                       entry->elements[i] = page;
+                       QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
                }
        }
        list_del_init(&entry->list);
@@ -2625,12 +2680,12 @@ static int qeth_init_input_buffer(struct qeth_card *card,
                                               ETH_HLEN +
                                               sizeof(struct ipv6hdr));
                if (!buf->rx_skb)
-                       return 1;
+                       return -ENOMEM;
        }
 
        pool_entry = qeth_find_free_buffer_pool_entry(card);
        if (!pool_entry)
-               return 1;
+               return -ENOBUFS;
 
        /*
         * since the buffer is accessed only from the input_tasklet
@@ -2643,7 +2698,7 @@ static int qeth_init_input_buffer(struct qeth_card *card,
        for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
                buf->buffer->element[i].length = PAGE_SIZE;
                buf->buffer->element[i].addr =
-                       virt_to_phys(pool_entry->elements[i]);
+                       page_to_phys(pool_entry->elements[i]);
                if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
                        buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
                else
@@ -2675,10 +2730,15 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
        /* inbound queue */
        qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
        memset(&card->rx, 0, sizeof(struct qeth_rx));
+
        qeth_initialize_working_pool_list(card);
        /*give only as many buffers to hardware as we have buffer pool entries*/
-       for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
-               qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
+       for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) {
+               rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
+               if (rc)
+                       return rc;
+       }
+
        card->qdio.in_q->next_buf_to_init =
                card->qdio.in_buf_pool.buf_count - 1;
        rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
index 2bd9993..78cae61 100644 (file)
@@ -247,8 +247,8 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct qeth_card *card = dev_get_drvdata(dev);
+       unsigned int cnt;
        char *tmp;
-       int cnt, old_cnt;
        int rc = 0;
 
        mutex_lock(&card->conf_mutex);
@@ -257,13 +257,12 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
                goto out;
        }
 
-       old_cnt = card->qdio.in_buf_pool.buf_count;
        cnt = simple_strtoul(buf, &tmp, 10);
        cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
                ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
-       if (old_cnt != cnt) {
-               rc = qeth_realloc_buffer_pool(card, cnt);
-       }
+
+       rc = qeth_resize_buffer_pool(card, cnt);
+
 out:
        mutex_unlock(&card->conf_mutex);
        return rc ? rc : count;
index 9972d96..8fb2937 100644 (file)
@@ -284,6 +284,7 @@ static void qeth_l2_stop_card(struct qeth_card *card)
        if (card->state == CARD_STATE_SOFTSETUP) {
                qeth_clear_ipacmd_list(card);
                qeth_drain_output_queues(card);
+               cancel_delayed_work_sync(&card->buffer_reclaim_work);
                card->state = CARD_STATE_DOWN;
        }
 
index 317d566..82f800d 100644 (file)
@@ -1178,6 +1178,7 @@ static void qeth_l3_stop_card(struct qeth_card *card)
                qeth_l3_clear_ip_htable(card, 1);
                qeth_clear_ipacmd_list(card);
                qeth_drain_output_queues(card);
+               cancel_delayed_work_sync(&card->buffer_reclaim_work);
                card->state = CARD_STATE_DOWN;
        }
 
index 29f2517..a3d1c3b 100644 (file)
@@ -206,12 +206,11 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
                qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
                if (card->ssqd.qdioac2 & CHSC_AC2_SNIFFER_AVAILABLE) {
                        card->options.sniffer = i;
-                       if (card->qdio.init_pool.buf_count !=
-                                       QETH_IN_BUF_COUNT_MAX)
-                               qeth_realloc_buffer_pool(card,
-                                       QETH_IN_BUF_COUNT_MAX);
-               } else
+                       qeth_resize_buffer_pool(card, QETH_IN_BUF_COUNT_MAX);
+               } else {
                        rc = -EPERM;
+               }
+
                break;
        default:
                rc = -EINVAL;
index a7881f8..1b6eaf8 100644 (file)
@@ -989,6 +989,7 @@ config SCSI_SYM53C8XX_MMIO
 config SCSI_IPR
        tristate "IBM Power Linux RAID adapter support"
        depends on PCI && SCSI && ATA
+       select SATA_HOST
        select FW_LOADER
        select IRQ_POLL
        select SGL_ALLOC
index ae45cbe..cd8db13 100644 (file)
@@ -9950,6 +9950,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
        ioa_cfg->max_devs_supported = ipr_max_devs;
 
        if (ioa_cfg->sis64) {
+               host->max_channel = IPR_MAX_SIS64_BUSES;
                host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
                host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
                if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
@@ -9958,6 +9959,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
                                           + ((sizeof(struct ipr_config_table_entry64)
                                               * ioa_cfg->max_devs_supported)));
        } else {
+               host->max_channel = IPR_VSET_BUS;
                host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
                host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
                if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
@@ -9967,7 +9969,6 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
                                               * ioa_cfg->max_devs_supported)));
        }
 
-       host->max_channel = IPR_VSET_BUS;
        host->unique_id = host->host_no;
        host->max_cmd_len = IPR_MAX_CDB_LEN;
        host->can_queue = ioa_cfg->max_cmds;
index a67baeb..b97aa9a 100644 (file)
@@ -1300,6 +1300,7 @@ struct ipr_resource_entry {
 #define IPR_ARRAY_VIRTUAL_BUS                  0x1
 #define IPR_VSET_VIRTUAL_BUS                   0x2
 #define IPR_IOAFP_VIRTUAL_BUS                  0x3
+#define IPR_MAX_SIS64_BUSES                    0x4
 
 #define IPR_GET_RES_PHYS_LOC(res) \
        (((res)->bus << 24) | ((res)->target << 8) | (res)->lun)
index 5c6a5ef..052ee3a 100644 (file)
@@ -19,6 +19,7 @@ config SCSI_SAS_ATA
        bool "ATA support for libsas (requires libata)"
        depends on SCSI_SAS_LIBSAS
        depends on ATA = y || ATA = SCSI_SAS_LIBSAS
+       select SATA_HOST
        help
                Builds in ATA support into libsas.  Will necessitate
                the loading of libata along with libsas.
index b520a98..7a94e11 100644 (file)
@@ -864,7 +864,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
                goto qc24_fail_command;
        }
 
-       if (atomic_read(&fcport->state) != FCS_ONLINE) {
+       if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
                if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
                        atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
                        ql_dbg(ql_dbg_io, vha, 0x3005,
@@ -946,7 +946,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
                goto qc24_fail_command;
        }
 
-       if (atomic_read(&fcport->state) != FCS_ONLINE) {
+       if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
                if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
                        atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
                        ql_dbg(ql_dbg_io, vha, 0x3077,
index 707f47c..a793cb0 100644 (file)
@@ -3169,9 +3169,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
        if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
                q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
                rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
-       } else
+       } else {
+               q->limits.io_opt = 0;
                rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
                                      (sector_t)BLK_DEF_MAX_SECTORS);
+       }
 
        /* Do not exceed controller limit */
        rw_max = min(rw_max, queue_max_hw_sectors(q));
index abd0e6b..2d70569 100644 (file)
@@ -3884,18 +3884,25 @@ EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
 {
        unsigned long flags;
+       bool update = false;
 
-       if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT))
+       if (!ufshcd_is_auto_hibern8_supported(hba))
                return;
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       if (hba->ahit == ahit)
-               goto out_unlock;
-       hba->ahit = ahit;
-       if (!pm_runtime_suspended(hba->dev))
-               ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
-out_unlock:
+       if (hba->ahit != ahit) {
+               hba->ahit = ahit;
+               update = true;
+       }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       if (update && !pm_runtime_suspended(hba->dev)) {
+               pm_runtime_get_sync(hba->dev);
+               ufshcd_hold(hba, false);
+               ufshcd_auto_hibern8_enable(hba);
+               ufshcd_release(hba);
+               pm_runtime_put(hba->dev);
+       }
 }
 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
 
index e3f5ebc..fc2575f 100644 (file)
@@ -1320,6 +1320,9 @@ static const struct of_device_id qcom_slim_ngd_dt_match[] = {
        {
                .compatible = "qcom,slim-ngd-v1.5.0",
                .data = &ngd_v1_5_offset_info,
+       },{
+               .compatible = "qcom,slim-ngd-v2.1.0",
+               .data = &ngd_v1_5_offset_info,
        },
        {}
 };
index 70014ec..7b642c3 100644 (file)
@@ -233,10 +233,6 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
                goto err_allocate_irqs;
        }
 
-       err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
-       if (err)
-               goto err_register_dpio_irq;
-
        priv->io = dpaa2_io_create(&desc, dev);
        if (!priv->io) {
                dev_err(dev, "dpaa2_io_create failed\n");
@@ -244,6 +240,10 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
                goto err_dpaa2_io_create;
        }
 
+       err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
+       if (err)
+               goto err_register_dpio_irq;
+
        dev_info(dev, "probed\n");
        dev_dbg(dev, "   receives_notifications = %d\n",
                desc.receives_notifications);
index 2dad496..8d4d050 100644 (file)
@@ -59,7 +59,7 @@ static int __init exynos_chipid_early_init(void)
        syscon = of_find_compatible_node(NULL, NULL,
                                         "samsung,exynos4210-chipid");
        if (!syscon)
-               return ENODEV;
+               return -ENODEV;
 
        regmap = device_node_to_regmap(syscon);
        of_node_put(syscon);
index ba6f905..69c6dce 100644 (file)
@@ -19,6 +19,7 @@
 #include <signal.h>
 
 #define MAX_NUM_DEVICES 10
+#define MAX_SYSFS_PREFIX 0x80
 #define MAX_SYSFS_PATH 0x200
 #define CSV_MAX_LINE   0x1000
 #define SYSFS_MAX_INT  0x20
@@ -67,7 +68,7 @@ struct loopback_results {
 };
 
 struct loopback_device {
-       char name[MAX_SYSFS_PATH];
+       char name[MAX_STR_LEN];
        char sysfs_entry[MAX_SYSFS_PATH];
        char debugfs_entry[MAX_SYSFS_PATH];
        struct loopback_results results;
@@ -93,8 +94,8 @@ struct loopback_test {
        int stop_all;
        int poll_count;
        char test_name[MAX_STR_LEN];
-       char sysfs_prefix[MAX_SYSFS_PATH];
-       char debugfs_prefix[MAX_SYSFS_PATH];
+       char sysfs_prefix[MAX_SYSFS_PREFIX];
+       char debugfs_prefix[MAX_SYSFS_PREFIX];
        struct timespec poll_timeout;
        struct loopback_device devices[MAX_NUM_DEVICES];
        struct loopback_results aggregate_results;
@@ -637,7 +638,7 @@ baddir:
 static int open_poll_files(struct loopback_test *t)
 {
        struct loopback_device *dev;
-       char buf[MAX_STR_LEN];
+       char buf[MAX_SYSFS_PATH + MAX_STR_LEN];
        char dummy;
        int fds_idx = 0;
        int i;
@@ -655,7 +656,7 @@ static int open_poll_files(struct loopback_test *t)
                        goto err;
                }
                read(t->fds[fds_idx].fd, &dummy, 1);
-               t->fds[fds_idx].events = EPOLLERR|EPOLLPRI;
+               t->fds[fds_idx].events = POLLERR | POLLPRI;
                t->fds[fds_idx].revents = 0;
                fds_idx++;
        }
@@ -748,7 +749,7 @@ static int wait_for_complete(struct loopback_test *t)
                }
 
                for (i = 0; i < t->poll_count; i++) {
-                       if (t->fds[i].revents & EPOLLPRI) {
+                       if (t->fds[i].revents & POLLPRI) {
                                /* Dummy read to clear the event */
                                read(t->fds[i].fd, &dummy, 1);
                                number_of_events++;
@@ -907,10 +908,10 @@ int main(int argc, char *argv[])
                        t.iteration_max = atoi(optarg);
                        break;
                case 'S':
-                       snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", optarg);
+                       snprintf(t.sysfs_prefix, MAX_SYSFS_PREFIX, "%s", optarg);
                        break;
                case 'D':
-                       snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", optarg);
+                       snprintf(t.debugfs_prefix, MAX_SYSFS_PREFIX, "%s", optarg);
                        break;
                case 'm':
                        t.mask = atol(optarg);
@@ -961,10 +962,10 @@ int main(int argc, char *argv[])
        }
 
        if (!strcmp(t.sysfs_prefix, ""))
-               snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", sysfs_prefix);
+               snprintf(t.sysfs_prefix, MAX_SYSFS_PREFIX, "%s", sysfs_prefix);
 
        if (!strcmp(t.debugfs_prefix, ""))
-               snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", debugfs_prefix);
+               snprintf(t.debugfs_prefix, MAX_SYSFS_PREFIX, "%s", debugfs_prefix);
 
        ret = find_loopback_devices(&t);
        if (ret)
index b5d42f4..845c881 100644 (file)
@@ -38,6 +38,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
        {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
        {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
        {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
+       {USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */
        {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
        {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
        {}      /* Terminating entry */
index 488f253..81ecfd1 100644 (file)
@@ -561,7 +561,7 @@ static u_long get_word(struct vc_data *vc)
                return 0;
        } else if (tmpx < vc->vc_cols - 2 &&
                   (ch == SPACE || ch == 0 || (ch < 0x100 && IS_WDLM(ch))) &&
-                  get_char(vc, (u_short *)&tmp_pos + 1, &temp) > SPACE) {
+                  get_char(vc, (u_short *)tmp_pos + 1, &temp) > SPACE) {
                tmp_pos += 2;
                tmpx++;
        } else {
index 2428363..77bca43 100644 (file)
@@ -140,6 +140,7 @@ int hif_shutdown(struct wfx_dev *wdev)
        else
                control_reg_write(wdev, 0);
        mutex_unlock(&wdev->hif_cmd.lock);
+       mutex_unlock(&wdev->hif_cmd.key_renew_lock);
        kfree(hif);
        return ret;
 }
@@ -289,7 +290,7 @@ int hif_stop_scan(struct wfx_vif *wvif)
 }
 
 int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
-            const struct ieee80211_channel *channel, const u8 *ssidie)
+            struct ieee80211_channel *channel, const u8 *ssid, int ssidlen)
 {
        int ret;
        struct hif_msg *hif;
@@ -307,9 +308,9 @@ int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
        body->basic_rate_set =
                cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
        memcpy(body->bssid, conf->bssid, sizeof(body->bssid));
-       if (!conf->ibss_joined && ssidie) {
-               body->ssid_length = cpu_to_le32(ssidie[1]);
-               memcpy(body->ssid, &ssidie[2], ssidie[1]);
+       if (!conf->ibss_joined && ssid) {
+               body->ssid_length = cpu_to_le32(ssidlen);
+               memcpy(body->ssid, ssid, ssidlen);
        }
        wfx_fill_header(hif, wvif->id, HIF_REQ_ID_JOIN, sizeof(*body));
        ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -427,9 +428,9 @@ int hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
        struct hif_msg *hif;
        struct hif_req_start *body = wfx_alloc_hif(sizeof(*body), &hif);
 
-       body->dtim_period = conf->dtim_period,
-       body->short_preamble = conf->use_short_preamble,
-       body->channel_number = cpu_to_le16(channel->hw_value),
+       body->dtim_period = conf->dtim_period;
+       body->short_preamble = conf->use_short_preamble;
+       body->channel_number = cpu_to_le16(channel->hw_value);
        body->beacon_interval = cpu_to_le32(conf->beacon_int);
        body->basic_rate_set =
                cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
index 20977e4..f8520a1 100644 (file)
@@ -46,7 +46,7 @@ int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211,
             int chan_start, int chan_num);
 int hif_stop_scan(struct wfx_vif *wvif);
 int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
-            const struct ieee80211_channel *channel, const u8 *ssidie);
+            struct ieee80211_channel *channel, const u8 *ssid, int ssidlen);
 int hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout);
 int hif_set_bss_params(struct wfx_vif *wvif,
                       const struct hif_req_set_bss_params *arg);
index bf3769c..26b1406 100644 (file)
@@ -191,10 +191,10 @@ static inline int hif_set_block_ack_policy(struct wfx_vif *wvif,
 }
 
 static inline int hif_set_association_mode(struct wfx_vif *wvif,
-                                          struct ieee80211_bss_conf *info,
-                                          struct ieee80211_sta_ht_cap *ht_cap)
+                                          struct ieee80211_bss_conf *info)
 {
        int basic_rates = wfx_rate_mask_to_hw(wvif->wdev, info->basic_rates);
+       struct ieee80211_sta *sta = NULL;
        struct hif_mib_set_association_mode val = {
                .preambtype_use = 1,
                .mode = 1,
@@ -204,12 +204,17 @@ static inline int hif_set_association_mode(struct wfx_vif *wvif,
                .basic_rate_set = cpu_to_le32(basic_rates)
        };
 
+       rcu_read_lock(); // protect sta
+       if (info->bssid && !info->ibss_joined)
+               sta = ieee80211_find_sta(wvif->vif, info->bssid);
+
        // FIXME: it is strange to not retrieve all information from bss_info
-       if (ht_cap && ht_cap->ht_supported) {
-               val.mpdu_start_spacing = ht_cap->ampdu_density;
+       if (sta && sta->ht_cap.ht_supported) {
+               val.mpdu_start_spacing = sta->ht_cap.ampdu_density;
                if (!(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT))
-                       val.greenfield = !!(ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD);
+                       val.greenfield = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
        }
+       rcu_read_unlock();
 
        return hif_write_mib(wvif->wdev, wvif->id,
                             HIF_MIB_ID_SET_ASSOCIATION_MODE, &val, sizeof(val));
index 03d0f22..af4f4bb 100644 (file)
@@ -491,9 +491,11 @@ static void wfx_set_mfp(struct wfx_vif *wvif,
 static void wfx_do_join(struct wfx_vif *wvif)
 {
        int ret;
-       const u8 *ssidie;
        struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
        struct cfg80211_bss *bss = NULL;
+       u8 ssid[IEEE80211_MAX_SSID_LEN];
+       const u8 *ssidie = NULL;
+       int ssidlen = 0;
 
        wfx_tx_lock_flush(wvif->wdev);
 
@@ -514,11 +516,14 @@ static void wfx_do_join(struct wfx_vif *wvif)
        if (!wvif->beacon_int)
                wvif->beacon_int = 1;
 
-       rcu_read_lock();
+       rcu_read_lock(); // protect ssidie
        if (!conf->ibss_joined)
                ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
-       else
-               ssidie = NULL;
+       if (ssidie) {
+               ssidlen = ssidie[1];
+               memcpy(ssid, &ssidie[2], ssidie[1]);
+       }
+       rcu_read_unlock();
 
        wfx_tx_flush(wvif->wdev);
 
@@ -527,10 +532,8 @@ static void wfx_do_join(struct wfx_vif *wvif)
 
        wfx_set_mfp(wvif, bss);
 
-       /* Perform actual join */
        wvif->wdev->tx_burst_idx = -1;
-       ret = hif_join(wvif, conf, wvif->channel, ssidie);
-       rcu_read_unlock();
+       ret = hif_join(wvif, conf, wvif->channel, ssid, ssidlen);
        if (ret) {
                ieee80211_connection_loss(wvif->vif);
                wvif->join_complete_status = -1;
@@ -605,7 +608,9 @@ int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        int i;
 
        for (i = 0; i < ARRAY_SIZE(sta_priv->buffered); i++)
-               WARN(sta_priv->buffered[i], "release station while Tx is in progress");
+               if (sta_priv->buffered[i])
+                       dev_warn(wvif->wdev->dev, "release station while %d pending frame on queue %d",
+                                sta_priv->buffered[i], i);
        // FIXME: see note in wfx_sta_add()
        if (vif->type == NL80211_IFTYPE_STATION)
                return 0;
@@ -689,6 +694,7 @@ static void wfx_join_finalize(struct wfx_vif *wvif,
                        wfx_rate_mask_to_hw(wvif->wdev, sta->supp_rates[wvif->channel->band]);
        else
                wvif->bss_params.operational_rate_set = -1;
+       rcu_read_unlock();
        if (sta &&
            info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
                hif_dual_cts_protection(wvif, true);
@@ -701,8 +707,7 @@ static void wfx_join_finalize(struct wfx_vif *wvif,
        wvif->bss_params.beacon_lost_count = 20;
        wvif->bss_params.aid = info->aid;
 
-       hif_set_association_mode(wvif, info, sta ? &sta->ht_cap : NULL);
-       rcu_read_unlock();
+       hif_set_association_mode(wvif, info);
 
        if (!info->ibss_joined) {
                hif_keep_alive_period(wvif, 30 /* sec */);
index 0026eb6..27b4cd7 100644 (file)
@@ -139,6 +139,9 @@ static struct amdtee_session *find_session(struct amdtee_context_data *ctxdata,
        u32 index = get_session_index(session);
        struct amdtee_session *sess;
 
+       if (index >= TEE_NUM_SESSIONS)
+               return NULL;
+
        list_for_each_entry(sess, &ctxdata->sess_list, list_node)
                if (ta_handle == sess->ta_handle &&
                    test_bit(index, sess->sess_mask))
index 7d6ecc3..a2ce990 100644 (file)
@@ -954,7 +954,7 @@ static bool tb_port_is_width_supported(struct tb_port *port, int width)
        ret = tb_port_read(port, &phy, TB_CFG_PORT,
                           port->cap_phy + LANE_ADP_CS_0, 1);
        if (ret)
-               return ret;
+               return false;
 
        widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
                LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
index a1453fe..5a6f36b 100644 (file)
@@ -1589,9 +1589,7 @@ void tty_kclose(struct tty_struct *tty)
        tty_debug_hangup(tty, "freeing structure\n");
        /*
         * The release_tty function takes care of the details of clearing
-        * the slots and preserving the termios structure. The tty_unlock_pair
-        * should be safe as we keep a kref while the tty is locked (so the
-        * unlock never unlocks a freed tty).
+        * the slots and preserving the termios structure.
         */
        mutex_lock(&tty_mutex);
        tty_port_set_kopened(tty->port, 0);
@@ -1621,9 +1619,7 @@ void tty_release_struct(struct tty_struct *tty, int idx)
        tty_debug_hangup(tty, "freeing structure\n");
        /*
         * The release_tty function takes care of the details of clearing
-        * the slots and preserving the termios structure. The tty_unlock_pair
-        * should be safe as we keep a kref while the tty is locked (so the
-        * unlock never unlocks a freed tty).
+        * the slots and preserving the termios structure.
         */
        mutex_lock(&tty_mutex);
        release_tty(tty, idx);
@@ -2734,9 +2730,11 @@ static int compat_tty_tiocgserial(struct tty_struct *tty,
        struct serial_struct32 v32;
        struct serial_struct v;
        int err;
-       memset(&v, 0, sizeof(struct serial_struct));
 
-       if (!tty->ops->set_serial)
+       memset(&v, 0, sizeof(v));
+       memset(&v32, 0, sizeof(v32));
+
+       if (!tty->ops->get_serial)
                return -ENOTTY;
        err = tty->ops->get_serial(tty, &v);
        if (!err) {
index ffaf46f..4c4ac30 100644 (file)
@@ -1530,18 +1530,19 @@ static const struct usb_ep_ops usb_ep_ops = {
 static void ci_hdrc_gadget_connect(struct usb_gadget *_gadget, int is_active)
 {
        struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
-       unsigned long flags;
 
        if (is_active) {
                pm_runtime_get_sync(&_gadget->dev);
                hw_device_reset(ci);
-               spin_lock_irqsave(&ci->lock, flags);
+               spin_lock_irq(&ci->lock);
                if (ci->driver) {
                        hw_device_state(ci, ci->ep0out->qh.dma);
                        usb_gadget_set_state(_gadget, USB_STATE_POWERED);
+                       spin_unlock_irq(&ci->lock);
                        usb_udc_vbus_handler(_gadget, true);
+               } else {
+                       spin_unlock_irq(&ci->lock);
                }
-               spin_unlock_irqrestore(&ci->lock, flags);
        } else {
                usb_udc_vbus_handler(_gadget, false);
                if (ci->driver)
index 62f4fb9..47f09a6 100644 (file)
@@ -896,10 +896,10 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
 
        ss->xmit_fifo_size = acm->writesize;
        ss->baud_base = le32_to_cpu(acm->line.dwDTERate);
-       ss->close_delay = acm->port.close_delay / 10;
+       ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
        ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
                                ASYNC_CLOSING_WAIT_NONE :
-                               acm->port.closing_wait / 10;
+                               jiffies_to_msecs(acm->port.closing_wait) / 10;
        return 0;
 }
 
@@ -907,24 +907,32 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
 {
        struct acm *acm = tty->driver_data;
        unsigned int closing_wait, close_delay;
+       unsigned int old_closing_wait, old_close_delay;
        int retval = 0;
 
-       close_delay = ss->close_delay * 10;
+       close_delay = msecs_to_jiffies(ss->close_delay * 10);
        closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
-                       ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
+                       ASYNC_CLOSING_WAIT_NONE :
+                       msecs_to_jiffies(ss->closing_wait * 10);
+
+       /* we must redo the rounding here, so that the values match */
+       old_close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
+       old_closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+                               ASYNC_CLOSING_WAIT_NONE :
+                               jiffies_to_msecs(acm->port.closing_wait) / 10;
 
        mutex_lock(&acm->port.mutex);
 
-       if (!capable(CAP_SYS_ADMIN)) {
-               if ((close_delay != acm->port.close_delay) ||
-                   (closing_wait != acm->port.closing_wait))
+       if ((ss->close_delay != old_close_delay) ||
+            (ss->closing_wait != old_closing_wait)) {
+               if (!capable(CAP_SYS_ADMIN))
                        retval = -EPERM;
-               else
-                       retval = -EOPNOTSUPP;
-       } else {
-               acm->port.close_delay  = close_delay;
-               acm->port.closing_wait = closing_wait;
-       }
+               else {
+                       acm->port.close_delay  = close_delay;
+                       acm->port.closing_wait = closing_wait;
+               }
+       } else
+               retval = -EOPNOTSUPP;
 
        mutex_unlock(&acm->port.mutex);
        return retval;
index 2dac3e7..da30b56 100644 (file)
@@ -378,6 +378,12 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
                        USB_QUIRK_IGNORE_REMOTE_WAKEUP },
 
+       /* Realtek hub in Dell WD19 (Type-C) */
+       { USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
+
+       /* Generic RTL8153 based ethernet adapters */
+       { USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
+
        /* Action Semiconductor flash disk */
        { USB_DEVICE(0x10d6, 0x2200), .driver_info =
                        USB_QUIRK_STRING_FETCH_255 },
index 5e9b537..1fddc41 100644 (file)
@@ -136,7 +136,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                xhci->quirks |= XHCI_AMD_PLL_FIX;
 
        if (pdev->vendor == PCI_VENDOR_ID_AMD &&
-               (pdev->device == 0x15e0 ||
+               (pdev->device == 0x145c ||
+                pdev->device == 0x15e0 ||
                 pdev->device == 0x15e1 ||
                 pdev->device == 0x43bb))
                xhci->quirks |= XHCI_SUSPEND_DELAY;
index d90cd5e..315b455 100644 (file)
@@ -445,6 +445,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
 static struct platform_driver usb_xhci_driver = {
        .probe  = xhci_plat_probe,
        .remove = xhci_plat_remove,
+       .shutdown = usb_hcd_platform_shutdown,
        .driver = {
                .name = "xhci-hcd",
                .pm = &xhci_plat_pm_ops,
index 56eb867..b19582b 100644 (file)
@@ -289,23 +289,12 @@ DECLARE_EVENT_CLASS(xhci_log_urb,
        ),
        TP_printk("ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x",
                        __entry->epnum, __entry->dir_in ? "in" : "out",
-                       ({ char *s;
-                       switch (__entry->type) {
-                       case USB_ENDPOINT_XFER_INT:
-                               s = "intr";
-                               break;
-                       case USB_ENDPOINT_XFER_CONTROL:
-                               s = "control";
-                               break;
-                       case USB_ENDPOINT_XFER_BULK:
-                               s = "bulk";
-                               break;
-                       case USB_ENDPOINT_XFER_ISOC:
-                               s = "isoc";
-                               break;
-                       default:
-                               s = "UNKNOWN";
-                       } s; }), __entry->urb, __entry->pipe, __entry->slot_id,
+                       __print_symbolic(__entry->type,
+                                  { USB_ENDPOINT_XFER_INT,     "intr" },
+                                  { USB_ENDPOINT_XFER_CONTROL, "control" },
+                                  { USB_ENDPOINT_XFER_BULK,    "bulk" },
+                                  { USB_ENDPOINT_XFER_ISOC,    "isoc" }),
+                       __entry->urb, __entry->pipe, __entry->slot_id,
                        __entry->actual, __entry->length, __entry->num_mapped_sgs,
                        __entry->num_sgs, __entry->stream, __entry->flags
                )
index 084cc2f..0b5dcf9 100644 (file)
@@ -1183,6 +1183,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff),    /* Telit ME910G1 */
          .driver_info = NCTRL(0) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110b, 0xff),    /* Telit ME910G1 (ECM) */
+         .driver_info = NCTRL(0) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
index aab737e..c5a2995 100644 (file)
@@ -99,6 +99,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
        { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
        { USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
+       { USB_DEVICE(HP_VENDOR_ID, HP_LD381_PRODUCT_ID) },
        { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
        { USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
        { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
index a019ea7..52db551 100644 (file)
 #define HP_LM920_PRODUCT_ID    0x026b
 #define HP_TD620_PRODUCT_ID    0x0956
 #define HP_LD960_PRODUCT_ID    0x0b39
+#define HP_LD381_PRODUCT_ID    0x0f7f
 #define HP_LCM220_PRODUCT_ID   0x3139
 #define HP_LCM960_PRODUCT_ID   0x3239
 #define HP_LD220_PRODUCT_ID    0x3524
index 0f1273a..048381c 100644 (file)
@@ -271,6 +271,9 @@ void ucsi_displayport_remove_partner(struct typec_altmode *alt)
                return;
 
        dp = typec_altmode_get_drvdata(alt);
+       if (!dp)
+               return;
+
        dp->data.conf = 0;
        dp->data.status = 0;
        dp->initialized = false;
@@ -285,6 +288,8 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
        struct typec_altmode *alt;
        struct ucsi_dp *dp;
 
+       mutex_lock(&con->lock);
+
        /* We can't rely on the firmware with the capabilities. */
        desc->vdo |= DP_CAP_DP_SIGNALING | DP_CAP_RECEPTACLE;
 
@@ -293,12 +298,15 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
        desc->vdo |= all_assignments << 16;
 
        alt = typec_port_register_altmode(con->port, desc);
-       if (IS_ERR(alt))
+       if (IS_ERR(alt)) {
+               mutex_unlock(&con->lock);
                return alt;
+       }
 
        dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL);
        if (!dp) {
                typec_unregister_altmode(alt);
+               mutex_unlock(&con->lock);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -311,5 +319,7 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
        alt->ops = &ucsi_displayport_ops;
        typec_altmode_set_drvdata(alt, dp);
 
+       mutex_unlock(&con->lock);
+
        return alt;
 }
index 7bfe365..341458f 100644 (file)
@@ -959,8 +959,8 @@ out_iput:
        iput(vb->vb_dev_info.inode);
 out_kern_unmount:
        kern_unmount(balloon_mnt);
-#endif
 out_del_vqs:
+#endif
        vdev->config->del_vqs(vdev);
 out_free_vb:
        kfree(vb);
index 867c7eb..58b96ba 100644 (file)
@@ -2203,10 +2203,10 @@ void vring_del_virtqueue(struct virtqueue *_vq)
                                         vq->split.queue_size_in_bytes,
                                         vq->split.vring.desc,
                                         vq->split.queue_dma_addr);
-
-                       kfree(vq->split.desc_state);
                }
        }
+       if (!vq->packed_ring)
+               kfree(vq->split.desc_state);
        list_del(&_vq->list);
        kfree(vq);
 }
index 0f7373b..69e92e6 100644 (file)
@@ -1,10 +1,12 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /* iTCO Vendor Specific Support hooks */
 #ifdef CONFIG_ITCO_VENDOR_SUPPORT
+extern int iTCO_vendorsupport;
 extern void iTCO_vendor_pre_start(struct resource *, unsigned int);
 extern void iTCO_vendor_pre_stop(struct resource *);
 extern int iTCO_vendor_check_noreboot_on(void);
 #else
+#define iTCO_vendorsupport                             0
 #define iTCO_vendor_pre_start(acpibase, heartbeat)     {}
 #define iTCO_vendor_pre_stop(acpibase)                 {}
 #define iTCO_vendor_check_noreboot_on()                        1
index 4f1b96f..cf0eaa0 100644 (file)
 /* Broken BIOS */
 #define BROKEN_BIOS            911
 
-static int vendorsupport;
-module_param(vendorsupport, int, 0);
+int iTCO_vendorsupport;
+EXPORT_SYMBOL(iTCO_vendorsupport);
+
+module_param_named(vendorsupport, iTCO_vendorsupport, int, 0);
 MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default="
                        "0 (none), 1=SuperMicro Pent3, 911=Broken SMI BIOS");
 
@@ -152,7 +154,7 @@ static void broken_bios_stop(struct resource *smires)
 void iTCO_vendor_pre_start(struct resource *smires,
                           unsigned int heartbeat)
 {
-       switch (vendorsupport) {
+       switch (iTCO_vendorsupport) {
        case SUPERMICRO_OLD_BOARD:
                supermicro_old_pre_start(smires);
                break;
@@ -165,7 +167,7 @@ EXPORT_SYMBOL(iTCO_vendor_pre_start);
 
 void iTCO_vendor_pre_stop(struct resource *smires)
 {
-       switch (vendorsupport) {
+       switch (iTCO_vendorsupport) {
        case SUPERMICRO_OLD_BOARD:
                supermicro_old_pre_stop(smires);
                break;
@@ -178,7 +180,7 @@ EXPORT_SYMBOL(iTCO_vendor_pre_stop);
 
 int iTCO_vendor_check_noreboot_on(void)
 {
-       switch (vendorsupport) {
+       switch (iTCO_vendorsupport) {
        case SUPERMICRO_OLD_BOARD:
                return 0;
        default:
@@ -189,13 +191,13 @@ EXPORT_SYMBOL(iTCO_vendor_check_noreboot_on);
 
 static int __init iTCO_vendor_init_module(void)
 {
-       if (vendorsupport == SUPERMICRO_NEW_BOARD) {
+       if (iTCO_vendorsupport == SUPERMICRO_NEW_BOARD) {
                pr_warn("Option vendorsupport=%d is no longer supported, "
                        "please use the w83627hf_wdt driver instead\n",
                        SUPERMICRO_NEW_BOARD);
                return -EINVAL;
        }
-       pr_info("vendor-support=%d\n", vendorsupport);
+       pr_info("vendor-support=%d\n", iTCO_vendorsupport);
        return 0;
 }
 
index 156360e..e707c47 100644 (file)
@@ -459,13 +459,25 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
        if (!p->tco_res)
                return -ENODEV;
 
-       p->smi_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_SMI);
-       if (!p->smi_res)
-               return -ENODEV;
-
        p->iTCO_version = pdata->version;
        p->pci_dev = to_pci_dev(dev->parent);
 
+       p->smi_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_SMI);
+       if (p->smi_res) {
+               /* The TCO logic uses the TCO_EN bit in the SMI_EN register */
+               if (!devm_request_region(dev, p->smi_res->start,
+                                        resource_size(p->smi_res),
+                                        pdev->name)) {
+                       pr_err("I/O address 0x%04llx already in use, device disabled\n",
+                              (u64)SMI_EN(p));
+                       return -EBUSY;
+               }
+       } else if (iTCO_vendorsupport ||
+                  turn_SMI_watchdog_clear_off >= p->iTCO_version) {
+               pr_err("SMI I/O resource is missing\n");
+               return -ENODEV;
+       }
+
        iTCO_wdt_no_reboot_bit_setup(p, pdata);
 
        /*
@@ -492,14 +504,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
        /* Set the NO_REBOOT bit to prevent later reboots, just for sure */
        p->update_no_reboot_bit(p->no_reboot_priv, true);
 
-       /* The TCO logic uses the TCO_EN bit in the SMI_EN register */
-       if (!devm_request_region(dev, p->smi_res->start,
-                                resource_size(p->smi_res),
-                                pdev->name)) {
-               pr_err("I/O address 0x%04llx already in use, device disabled\n",
-                      (u64)SMI_EN(p));
-               return -EBUSY;
-       }
        if (turn_SMI_watchdog_clear_off >= p->iTCO_version) {
                /*
                 * Bit 13: TCO_EN -> 0
index df415c0..de1ae0b 100644 (file)
@@ -19,7 +19,7 @@
 void afs_put_addrlist(struct afs_addr_list *alist)
 {
        if (alist && refcount_dec_and_test(&alist->usage))
-               call_rcu(&alist->rcu, (rcu_callback_t)kfree);
+               kfree_rcu(alist, rcu);
 }
 
 /*
index ff3994a..6765949 100644 (file)
@@ -244,6 +244,17 @@ static void afs_cm_destructor(struct afs_call *call)
 }
 
 /*
+ * Abort a service call from within an action function.
+ */
+static void afs_abort_service_call(struct afs_call *call, u32 abort_code, int error,
+                                  const char *why)
+{
+       rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+                               abort_code, error, why);
+       afs_set_call_complete(call, error, 0);
+}
+
+/*
  * The server supplied a list of callbacks that it wanted to break.
  */
 static void SRXAFSCB_CallBack(struct work_struct *work)
@@ -510,8 +521,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
        if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
                afs_send_empty_reply(call);
        else
-               rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
-                                       1, 1, "K-1");
+               afs_abort_service_call(call, 1, 1, "K-1");
 
        afs_put_call(call);
        _leave("");
index cfe62b1..e1b9ed6 100644 (file)
@@ -145,6 +145,7 @@ static int afs_do_probe_fileserver(struct afs_net *net,
        read_lock(&server->fs_lock);
        ac.alist = rcu_dereference_protected(server->addresses,
                                             lockdep_is_held(&server->fs_lock));
+       afs_get_addrlist(ac.alist);
        read_unlock(&server->fs_lock);
 
        atomic_set(&server->probe_outstanding, ac.alist->nr_addrs);
@@ -163,6 +164,7 @@ static int afs_do_probe_fileserver(struct afs_net *net,
 
        if (!in_progress)
                afs_fs_probe_done(server);
+       afs_put_addrlist(ac.alist);
        return in_progress;
 }
 
index 1d81fc4..ef732dd 100644 (file)
@@ -81,7 +81,7 @@ enum afs_call_state {
  * List of server addresses.
  */
 struct afs_addr_list {
-       struct rcu_head         rcu;            /* Must be first */
+       struct rcu_head         rcu;
        refcount_t              usage;
        u32                     version;        /* Version */
        unsigned char           max_addrs;
@@ -154,7 +154,7 @@ struct afs_call {
        };
        unsigned char           unmarshall;     /* unmarshalling phase */
        unsigned char           addr_ix;        /* Address in ->alist */
-       bool                    incoming;       /* T if incoming call */
+       bool                    drop_ref;       /* T if need to drop ref for incoming call */
        bool                    send_pages;     /* T if data from mapping should be sent */
        bool                    need_attention; /* T if RxRPC poked us */
        bool                    async;          /* T if asynchronous */
@@ -1209,8 +1209,16 @@ static inline void afs_set_call_complete(struct afs_call *call,
                ok = true;
        }
        spin_unlock_bh(&call->state_lock);
-       if (ok)
+       if (ok) {
                trace_afs_call_done(call);
+
+               /* Asynchronous calls have two refs to release - one from the alloc and
+                * one queued with the work item - and we can't just deallocate the
+                * call because the work item may be queued again.
+                */
+               if (call->drop_ref)
+                       afs_put_call(call);
+       }
 }
 
 /*
index 58d3965..1ecc67d 100644 (file)
@@ -18,7 +18,6 @@ struct workqueue_struct *afs_async_calls;
 
 static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
 static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
-static void afs_delete_async_call(struct work_struct *);
 static void afs_process_async_call(struct work_struct *);
 static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
 static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
@@ -169,7 +168,7 @@ void afs_put_call(struct afs_call *call)
        int n = atomic_dec_return(&call->usage);
        int o = atomic_read(&net->nr_outstanding_calls);
 
-       trace_afs_call(call, afs_call_trace_put, n + 1, o,
+       trace_afs_call(call, afs_call_trace_put, n, o,
                       __builtin_return_address(0));
 
        ASSERTCMP(n, >=, 0);
@@ -402,8 +401,10 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
        /* If the call is going to be asynchronous, we need an extra ref for
         * the call to hold itself so the caller need not hang on to its ref.
         */
-       if (call->async)
+       if (call->async) {
                afs_get_call(call, afs_call_trace_get);
+               call->drop_ref = true;
+       }
 
        /* create a call */
        rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
@@ -413,7 +414,8 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
                                          afs_wake_up_async_call :
                                          afs_wake_up_call_waiter),
                                         call->upgrade,
-                                        call->intr,
+                                        (call->intr ? RXRPC_PREINTERRUPTIBLE :
+                                         RXRPC_UNINTERRUPTIBLE),
                                         call->debug_id);
        if (IS_ERR(rxcall)) {
                ret = PTR_ERR(rxcall);
@@ -584,8 +586,6 @@ static void afs_deliver_to_call(struct afs_call *call)
 done:
        if (call->type->done)
                call->type->done(call);
-       if (state == AFS_CALL_COMPLETE && call->incoming)
-               afs_put_call(call);
 out:
        _leave("");
        return;
@@ -604,11 +604,7 @@ call_complete:
 long afs_wait_for_call_to_complete(struct afs_call *call,
                                   struct afs_addr_cursor *ac)
 {
-       signed long rtt2, timeout;
        long ret;
-       bool stalled = false;
-       u64 rtt;
-       u32 life, last_life;
        bool rxrpc_complete = false;
 
        DECLARE_WAITQUEUE(myself, current);
@@ -619,14 +615,6 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
        if (ret < 0)
                goto out;
 
-       rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall);
-       rtt2 = nsecs_to_jiffies64(rtt) * 2;
-       if (rtt2 < 2)
-               rtt2 = 2;
-
-       timeout = rtt2;
-       rxrpc_kernel_check_life(call->net->socket, call->rxcall, &last_life);
-
        add_wait_queue(&call->waitq, &myself);
        for (;;) {
                set_current_state(TASK_UNINTERRUPTIBLE);
@@ -637,37 +625,19 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
                        call->need_attention = false;
                        __set_current_state(TASK_RUNNING);
                        afs_deliver_to_call(call);
-                       timeout = rtt2;
                        continue;
                }
 
                if (afs_check_call_state(call, AFS_CALL_COMPLETE))
                        break;
 
-               if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall, &life)) {
+               if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall)) {
                        /* rxrpc terminated the call. */
                        rxrpc_complete = true;
                        break;
                }
 
-               if (call->intr && timeout == 0 &&
-                   life == last_life && signal_pending(current)) {
-                       if (stalled)
-                               break;
-                       __set_current_state(TASK_RUNNING);
-                       rxrpc_kernel_probe_life(call->net->socket, call->rxcall);
-                       timeout = rtt2;
-                       stalled = true;
-                       continue;
-               }
-
-               if (life != last_life) {
-                       timeout = rtt2;
-                       last_life = life;
-                       stalled = false;
-               }
-
-               timeout = schedule_timeout(timeout);
+               schedule();
        }
 
        remove_wait_queue(&call->waitq, &myself);
@@ -735,7 +705,7 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
 
        u = atomic_fetch_add_unless(&call->usage, 1, 0);
        if (u != 0) {
-               trace_afs_call(call, afs_call_trace_wake, u,
+               trace_afs_call(call, afs_call_trace_wake, u + 1,
                               atomic_read(&call->net->nr_outstanding_calls),
                               __builtin_return_address(0));
 
@@ -745,21 +715,6 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
 }
 
 /*
- * Delete an asynchronous call.  The work item carries a ref to the call struct
- * that we need to release.
- */
-static void afs_delete_async_call(struct work_struct *work)
-{
-       struct afs_call *call = container_of(work, struct afs_call, async_work);
-
-       _enter("");
-
-       afs_put_call(call);
-
-       _leave("");
-}
-
-/*
  * Perform I/O processing on an asynchronous call.  The work item carries a ref
  * to the call struct that we either need to release or to pass on.
  */
@@ -774,16 +729,6 @@ static void afs_process_async_call(struct work_struct *work)
                afs_deliver_to_call(call);
        }
 
-       if (call->state == AFS_CALL_COMPLETE) {
-               /* We have two refs to release - one from the alloc and one
-                * queued with the work item - and we can't just deallocate the
-                * call because the work item may be queued again.
-                */
-               call->async_work.func = afs_delete_async_call;
-               if (!queue_work(afs_async_calls, &call->async_work))
-                       afs_put_call(call);
-       }
-
        afs_put_call(call);
        _leave("");
 }
@@ -810,6 +755,7 @@ void afs_charge_preallocation(struct work_struct *work)
                        if (!call)
                                break;
 
+                       call->drop_ref = true;
                        call->async = true;
                        call->state = AFS_CALL_SV_AWAIT_OP_ID;
                        init_waitqueue_head(&call->waitq);
index 404e050..7f09147 100644 (file)
@@ -856,9 +856,9 @@ static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
                                found_raid1c34 = true;
                        up_read(&sinfo->groups_sem);
                }
-               if (found_raid56)
+               if (!found_raid56)
                        btrfs_clear_fs_incompat(fs_info, RAID56);
-               if (found_raid1c34)
+               if (!found_raid1c34)
                        btrfs_clear_fs_incompat(fs_info, RAID1C34);
        }
 }
index 27076eb..d267eb5 100644 (file)
@@ -9496,6 +9496,10 @@ out_fail:
                ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx);
                if (ret)
                        commit_transaction = true;
+       } else if (sync_log) {
+               mutex_lock(&root->log_mutex);
+               list_del(&ctx.list);
+               mutex_unlock(&root->log_mutex);
        }
        if (commit_transaction) {
                ret = btrfs_commit_transaction(trans);
index 7e0190b..5a478cd 100644 (file)
@@ -1415,10 +1415,13 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_osd_client *osdc = &fsc->client->osdc;
        struct ceph_cap_flush *prealloc_cf;
        ssize_t count, written = 0;
        int err, want, got;
        bool direct_lock = false;
+       u32 map_flags;
+       u64 pool_flags;
        loff_t pos;
        loff_t limit = max(i_size_read(inode), fsc->max_file_size);
 
@@ -1481,8 +1484,12 @@ retry_snap:
                        goto out;
        }
 
-       /* FIXME: not complete since it doesn't account for being at quota */
-       if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL)) {
+       down_read(&osdc->lock);
+       map_flags = osdc->osdmap->flags;
+       pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
+       up_read(&osdc->lock);
+       if ((map_flags & CEPH_OSDMAP_FULL) ||
+           (pool_flags & CEPH_POOL_FLAG_FULL)) {
                err = -ENOSPC;
                goto out;
        }
@@ -1575,7 +1582,8 @@ retry_snap:
        }
 
        if (written >= 0) {
-               if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_NEARFULL))
+               if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
+                   (pool_flags & CEPH_POOL_FLAG_NEARFULL))
                        iocb->ki_flags |= IOCB_DSYNC;
                written = generic_write_sync(iocb, written);
        }
index ccfcc66..923be93 100644 (file)
@@ -1155,5 +1155,6 @@ void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc)
                        pr_err("snapid map %llx -> %x still in use\n",
                               sm->snap, sm->dev);
                }
+               kfree(sm);
        }
 }
index 0ef0994..36e7b2f 100644 (file)
@@ -555,7 +555,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
                if (server->ops->close)
                        server->ops->close(xid, tcon, &fid);
                cifs_del_pending_open(&open);
-               fput(file);
                rc = -ENOMEM;
        }
 
index 3b942ec..8f9d849 100644 (file)
@@ -1169,7 +1169,8 @@ try_again:
        rc = posix_lock_file(file, flock, NULL);
        up_write(&cinode->lock_sem);
        if (rc == FILE_LOCK_DEFERRED) {
-               rc = wait_event_interruptible(flock->fl_wait, !flock->fl_blocker);
+               rc = wait_event_interruptible(flock->fl_wait,
+                                       list_empty(&flock->fl_blocked_member));
                if (!rc)
                        goto try_again;
                locks_delete_block(flock);
index 1e8a4b1..b16f8d2 100644 (file)
@@ -2191,7 +2191,7 @@ int cifs_getattr(const struct path *path, struct kstat *stat,
                if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID))
                        stat->gid = current_fsgid();
        }
-       return rc;
+       return 0;
 }
 
 int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start,
index c31e84e..cfe9b80 100644 (file)
@@ -2222,6 +2222,8 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
                goto qdf_free;
        }
 
+       atomic_inc(&tcon->num_remote_opens);
+
        qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base;
        if (qd_rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
                trace_smb3_query_dir_done(xid, fid->persistent_fid,
@@ -3417,7 +3419,7 @@ static int smb3_fiemap(struct cifs_tcon *tcon,
        if (rc)
                goto out;
 
-       if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
+       if (out_data_len && out_data_len < sizeof(struct file_allocated_range_buffer)) {
                rc = -EINVAL;
                goto out;
        }
index 65cb09f..08c9f21 100644 (file)
@@ -539,6 +539,15 @@ int fscrypt_drop_inode(struct inode *inode)
        mk = ci->ci_master_key->payload.data[0];
 
        /*
+        * With proper, non-racy use of FS_IOC_REMOVE_ENCRYPTION_KEY, all inodes
+        * protected by the key were cleaned by sync_filesystem().  But if
+        * userspace is still using the files, inodes can be dirtied between
+        * then and now.  We mustn't lose any writes, so skip dirty inodes here.
+        */
+       if (inode->i_state & I_DIRTY_ALL)
+               return 0;
+
+       /*
         * Note: since we aren't holding ->mk_secret_sem, the result here can
         * immediately become outdated.  But there's no correctness problem with
         * unnecessarily evicting.  Nor is there a correctness problem with not
index b041b66..eee3c92 100644 (file)
@@ -1854,9 +1854,9 @@ fetch_events:
                waiter = true;
                init_waitqueue_entry(&wait, current);
 
-               spin_lock_irq(&ep->wq.lock);
+               write_lock_irq(&ep->lock);
                __add_wait_queue_exclusive(&ep->wq, &wait);
-               spin_unlock_irq(&ep->wq.lock);
+               write_unlock_irq(&ep->lock);
        }
 
        for (;;) {
@@ -1904,9 +1904,9 @@ send_events:
                goto fetch_events;
 
        if (waiter) {
-               spin_lock_irq(&ep->wq.lock);
+               write_lock_irq(&ep->lock);
                __remove_wait_queue(&ep->wq, &wait);
-               spin_unlock_irq(&ep->wq.lock);
+               write_unlock_irq(&ep->lock);
        }
 
        return res;
index a364e1a..c8a4e4c 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -540,9 +540,14 @@ static int alloc_fd(unsigned start, unsigned flags)
        return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
 }
 
+int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
+{
+       return __alloc_fd(current->files, 0, nofile, flags);
+}
+
 int get_unused_fd_flags(unsigned flags)
 {
-       return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags);
+       return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
 }
 EXPORT_SYMBOL(get_unused_fd_flags);
 
index 8e02d76..97eec75 100644 (file)
@@ -276,12 +276,10 @@ static void flush_bg_queue(struct fuse_conn *fc)
 void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
 {
        struct fuse_iqueue *fiq = &fc->iq;
-       bool async;
 
        if (test_and_set_bit(FR_FINISHED, &req->flags))
                goto put_request;
 
-       async = req->args->end;
        /*
         * test_and_set_bit() implies smp_mb() between bit
         * changing and below intr_entry check. Pairs with
@@ -324,7 +322,7 @@ void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
                wake_up(&req->waitq);
        }
 
-       if (async)
+       if (test_bit(FR_ASYNC, &req->flags))
                req->args->end(fc, req->args, req->out.h.error);
 put_request:
        fuse_put_request(fc, req);
@@ -471,6 +469,8 @@ static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
        req->in.h.opcode = args->opcode;
        req->in.h.nodeid = args->nodeid;
        req->args = args;
+       if (args->end)
+               __set_bit(FR_ASYNC, &req->flags);
 }
 
 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
index aa75e23..ca344bf 100644 (file)
@@ -301,6 +301,7 @@ struct fuse_io_priv {
  * FR_SENT:            request is in userspace, waiting for an answer
  * FR_FINISHED:                request is finished
  * FR_PRIVATE:         request is on private list
+ * FR_ASYNC:           request is asynchronous
  */
 enum fuse_req_flag {
        FR_ISREPLY,
@@ -314,6 +315,7 @@ enum fuse_req_flag {
        FR_SENT,
        FR_FINISHED,
        FR_PRIVATE,
+       FR_ASYNC,
 };
 
 /**
index 2716d56..8294851 100644 (file)
@@ -1248,7 +1248,7 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
                if (!(file->f_mode & FMODE_OPENED))
                        return finish_no_open(file, d);
                dput(d);
-               return 0;
+               return excl && (flags & O_CREAT) ? -EEXIST : 0;
        }
 
        BUG_ON(d != NULL);
index 7d57068..93d9252 100644 (file)
@@ -138,6 +138,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
        inode->i_sb = sb;
        inode->i_blkbits = sb->s_blocksize_bits;
        inode->i_flags = 0;
+       atomic64_set(&inode->i_sequence, 0);
        atomic_set(&inode->i_count, 1);
        inode->i_op = &empty_iops;
        inode->i_fop = &no_open_fops;
index c06082b..3affd96 100644 (file)
@@ -191,7 +191,6 @@ struct fixed_file_data {
        struct llist_head               put_llist;
        struct work_struct              ref_work;
        struct completion               done;
-       struct rcu_head                 rcu;
 };
 
 struct io_ring_ctx {
@@ -344,6 +343,7 @@ struct io_accept {
        struct sockaddr __user          *addr;
        int __user                      *addr_len;
        int                             flags;
+       unsigned long                   nofile;
 };
 
 struct io_sync {
@@ -398,6 +398,7 @@ struct io_open {
        struct filename                 *filename;
        struct statx __user             *buffer;
        struct open_how                 how;
+       unsigned long                   nofile;
 };
 
 struct io_files_update {
@@ -2578,6 +2579,7 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                return ret;
        }
 
+       req->open.nofile = rlimit(RLIMIT_NOFILE);
        req->flags |= REQ_F_NEED_CLEANUP;
        return 0;
 }
@@ -2619,6 +2621,7 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                return ret;
        }
 
+       req->open.nofile = rlimit(RLIMIT_NOFILE);
        req->flags |= REQ_F_NEED_CLEANUP;
        return 0;
 }
@@ -2637,7 +2640,7 @@ static int io_openat2(struct io_kiocb *req, struct io_kiocb **nxt,
        if (ret)
                goto err;
 
-       ret = get_unused_fd_flags(req->open.how.flags);
+       ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
        if (ret < 0)
                goto err;
 
@@ -3322,6 +3325,7 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
        accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
        accept->flags = READ_ONCE(sqe->accept_flags);
+       accept->nofile = rlimit(RLIMIT_NOFILE);
        return 0;
 #else
        return -EOPNOTSUPP;
@@ -3338,7 +3342,8 @@ static int __io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
 
        file_flags = force_nonblock ? O_NONBLOCK : 0;
        ret = __sys_accept4_file(req->file, file_flags, accept->addr,
-                                       accept->addr_len, accept->flags);
+                                       accept->addr_len, accept->flags,
+                                       accept->nofile);
        if (ret == -EAGAIN && force_nonblock)
                return -EAGAIN;
        if (ret == -ERESTARTSYS)
@@ -4132,6 +4137,9 @@ static int io_req_defer_prep(struct io_kiocb *req,
 {
        ssize_t ret = 0;
 
+       if (!sqe)
+               return 0;
+
        if (io_op_defs[req->opcode].file_table) {
                ret = io_grab_files(req);
                if (unlikely(ret))
@@ -4908,6 +4916,11 @@ err_req:
                if (sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
                        req->flags |= REQ_F_LINK;
                        INIT_LIST_HEAD(&req->link_list);
+
+                       if (io_alloc_async_ctx(req)) {
+                               ret = -EAGAIN;
+                               goto err_req;
+                       }
                        ret = io_req_defer_prep(req, sqe);
                        if (ret)
                                req->flags |= REQ_F_FAIL_LINK;
@@ -5331,24 +5344,21 @@ static void io_file_ref_kill(struct percpu_ref *ref)
        complete(&data->done);
 }
 
-static void __io_file_ref_exit_and_free(struct rcu_head *rcu)
+static void io_file_ref_exit_and_free(struct work_struct *work)
 {
-       struct fixed_file_data *data = container_of(rcu, struct fixed_file_data,
-                                                       rcu);
-       percpu_ref_exit(&data->refs);
-       kfree(data);
-}
+       struct fixed_file_data *data;
+
+       data = container_of(work, struct fixed_file_data, ref_work);
 
-static void io_file_ref_exit_and_free(struct rcu_head *rcu)
-{
        /*
-        * We need to order our exit+free call against the potentially
-        * existing call_rcu() for switching to atomic. One way to do that
-        * is to have this rcu callback queue the final put and free, as we
-        * could otherwise have a pre-existing atomic switch complete _after_
-        * the free callback we queued.
+        * Ensure any percpu-ref atomic switch callback has run, it could have
+        * been in progress when the files were being unregistered. Once
+        * that's done, we can safely exit and free the ref and containing
+        * data structure.
         */
-       call_rcu(rcu, __io_file_ref_exit_and_free);
+       rcu_barrier();
+       percpu_ref_exit(&data->refs);
+       kfree(data);
 }
 
 static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
@@ -5369,7 +5379,8 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
        for (i = 0; i < nr_tables; i++)
                kfree(data->table[i].files);
        kfree(data->table);
-       call_rcu(&data->rcu, io_file_ref_exit_and_free);
+       INIT_WORK(&data->ref_work, io_file_ref_exit_and_free);
+       queue_work(system_wq, &data->ref_work);
        ctx->file_data = NULL;
        ctx->nr_user_files = 0;
        return 0;
index 426b55d..b8a31c1 100644 (file)
@@ -725,7 +725,6 @@ static void __locks_delete_block(struct file_lock *waiter)
 {
        locks_delete_global_blocked(waiter);
        list_del_init(&waiter->fl_blocked_member);
-       waiter->fl_blocker = NULL;
 }
 
 static void __locks_wake_up_blocks(struct file_lock *blocker)
@@ -740,6 +739,13 @@ static void __locks_wake_up_blocks(struct file_lock *blocker)
                        waiter->fl_lmops->lm_notify(waiter);
                else
                        wake_up(&waiter->fl_wait);
+
+               /*
+                * The setting of fl_blocker to NULL marks the "done"
+                * point in deleting a block. Paired with acquire at the top
+                * of locks_delete_block().
+                */
+               smp_store_release(&waiter->fl_blocker, NULL);
        }
 }
 
@@ -753,11 +759,42 @@ int locks_delete_block(struct file_lock *waiter)
 {
        int status = -ENOENT;
 
+       /*
+        * If fl_blocker is NULL, it won't be set again as this thread "owns"
+        * the lock and is the only one that might try to claim the lock.
+        *
+        * We use acquire/release to manage fl_blocker so that we can
+        * optimize away taking the blocked_lock_lock in many cases.
+        *
+        * The smp_load_acquire guarantees two things:
+        *
+        * 1/ that fl_blocked_requests can be tested locklessly. If something
+        * was recently added to that list it must have been in a locked region
+        * *before* the locked region when fl_blocker was set to NULL.
+        *
+        * 2/ that no other thread is accessing 'waiter', so it is safe to free
+        * it.  __locks_wake_up_blocks is careful not to touch waiter after
+        * fl_blocker is released.
+        *
+        * If a lockless check of fl_blocker shows it to be NULL, we know that
+        * no new locks can be inserted into its fl_blocked_requests list, and
+        * can avoid doing anything further if the list is empty.
+        */
+       if (!smp_load_acquire(&waiter->fl_blocker) &&
+           list_empty(&waiter->fl_blocked_requests))
+               return status;
+
        spin_lock(&blocked_lock_lock);
        if (waiter->fl_blocker)
                status = 0;
        __locks_wake_up_blocks(waiter);
        __locks_delete_block(waiter);
+
+       /*
+        * The setting of fl_blocker to NULL marks the "done" point in deleting
+        * a block. Paired with acquire at the top of this function.
+        */
+       smp_store_release(&waiter->fl_blocker, NULL);
        spin_unlock(&blocked_lock_lock);
        return status;
 }
@@ -1350,7 +1387,8 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
                error = posix_lock_inode(inode, fl, NULL);
                if (error != FILE_LOCK_DEFERRED)
                        break;
-               error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
+               error = wait_event_interruptible(fl->fl_wait,
+                                       list_empty(&fl->fl_blocked_member));
                if (error)
                        break;
        }
@@ -1435,7 +1473,8 @@ int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
                error = posix_lock_inode(inode, &fl, NULL);
                if (error != FILE_LOCK_DEFERRED)
                        break;
-               error = wait_event_interruptible(fl.fl_wait, !fl.fl_blocker);
+               error = wait_event_interruptible(fl.fl_wait,
+                                       list_empty(&fl.fl_blocked_member));
                if (!error) {
                        /*
                         * If we've been sleeping someone might have
@@ -1638,7 +1677,8 @@ restart:
 
        locks_dispose_list(&dispose);
        error = wait_event_interruptible_timeout(new_fl->fl_wait,
-                                               !new_fl->fl_blocker, break_time);
+                                       list_empty(&new_fl->fl_blocked_member),
+                                       break_time);
 
        percpu_down_read(&file_rwsem);
        spin_lock(&ctx->flc_lock);
@@ -2122,7 +2162,8 @@ static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
                error = flock_lock_inode(inode, fl);
                if (error != FILE_LOCK_DEFERRED)
                        break;
-               error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
+               error = wait_event_interruptible(fl->fl_wait,
+                               list_empty(&fl->fl_blocked_member));
                if (error)
                        break;
        }
@@ -2399,7 +2440,8 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd,
                error = vfs_lock_file(filp, cmd, fl, NULL);
                if (error != FILE_LOCK_DEFERRED)
                        break;
-               error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
+               error = wait_event_interruptible(fl->fl_wait,
+                                       list_empty(&fl->fl_blocked_member));
                if (error)
                        break;
        }
index 989c30c..f1ff307 100644 (file)
@@ -153,6 +153,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
        if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL)
                goto error_0;
 
+       clp->cl_minorversion = cl_init->minorversion;
        clp->cl_nfs_mod = cl_init->nfs_mod;
        if (!try_module_get(clp->cl_nfs_mod->owner))
                goto error_dealloc;
index e1b9384..e113fcb 100644 (file)
@@ -832,6 +832,8 @@ static int nfs_parse_source(struct fs_context *fc,
        if (len > maxnamlen)
                goto out_hostname;
 
+       kfree(ctx->nfs_server.hostname);
+
        /* N.B. caller will free nfs_server.hostname in all cases */
        ctx->nfs_server.hostname = kmemdup_nul(dev_name, len, GFP_KERNEL);
        if (!ctx->nfs_server.hostname)
@@ -1240,6 +1242,13 @@ static int nfs_fs_context_validate(struct fs_context *fc)
                }
                ctx->nfs_mod = nfs_mod;
        }
+
+       /* Ensure the filesystem context has the correct fs_type */
+       if (fc->fs_type != ctx->nfs_mod->nfs_fs) {
+               module_put(fc->fs_type->owner);
+               __module_get(ctx->nfs_mod->nfs_fs->owner);
+               fc->fs_type = ctx->nfs_mod->nfs_fs;
+       }
        return 0;
 
 out_no_device_name:
index 52270bf..1abf126 100644 (file)
@@ -31,6 +31,7 @@ static DEFINE_SPINLOCK(nfs_fscache_keys_lock);
 struct nfs_server_key {
        struct {
                uint16_t        nfsversion;             /* NFS protocol version */
+               uint32_t        minorversion;           /* NFSv4 minor version */
                uint16_t        family;                 /* address family */
                __be16          port;                   /* IP port */
        } hdr;
@@ -55,6 +56,7 @@ void nfs_fscache_get_client_cookie(struct nfs_client *clp)
 
        memset(&key, 0, sizeof(key));
        key.hdr.nfsversion = clp->rpc_ops->version;
+       key.hdr.minorversion = clp->cl_minorversion;
        key.hdr.family = clp->cl_addr.ss_family;
 
        switch (clp->cl_addr.ss_family) {
index ad60774..f3ece8e 100644 (file)
@@ -153,7 +153,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
        /* Open a new filesystem context, transferring parameters from the
         * parent superblock, including the network namespace.
         */
-       fc = fs_context_for_submount(&nfs_fs_type, path->dentry);
+       fc = fs_context_for_submount(path->mnt->mnt_sb->s_type, path->dentry);
        if (IS_ERR(fc))
                return ERR_CAST(fc);
 
index 0cd767e..0bd77cc 100644 (file)
@@ -216,7 +216,6 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
        INIT_LIST_HEAD(&clp->cl_ds_clients);
        rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
        clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
-       clp->cl_minorversion = cl_init->minorversion;
        clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
        clp->cl_mig_gen = 1;
 #if IS_ENABLED(CONFIG_NFS_V4_1)
index 0788b37..b69d6ee 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -860,9 +860,6 @@ cleanup_file:
  * the return value of d_splice_alias(), then the caller needs to perform dput()
  * on it after finish_open().
  *
- * On successful return @file is a fully instantiated open file.  After this, if
- * an error occurs in ->atomic_open(), it needs to clean up with fput().
- *
  * Returns zero on success or -errno if the open failed.
  */
 int finish_open(struct file *file, struct dentry *dentry,
index 444e2da..714c14c 100644 (file)
@@ -93,6 +93,7 @@ config OVERLAY_FS_XINO_AUTO
        bool "Overlayfs: auto enable inode number mapping"
        default n
        depends on OVERLAY_FS
+       depends on 64BIT
        help
          If this config option is enabled then overlay filesystems will use
          unused high bits in undelying filesystem inode numbers to map all
index a531721..87c362f 100644 (file)
@@ -244,6 +244,9 @@ static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req)
        if (iocb->ki_flags & IOCB_WRITE) {
                struct inode *inode = file_inode(orig_iocb->ki_filp);
 
+               /* Actually acquired in ovl_write_iter() */
+               __sb_writers_acquired(file_inode(iocb->ki_filp)->i_sb,
+                                     SB_FREEZE_WRITE);
                file_end_write(iocb->ki_filp);
                ovl_copyattr(ovl_inode_real(inode), inode);
        }
@@ -346,6 +349,9 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
                        goto out;
 
                file_start_write(real.file);
+               /* Pacify lockdep, same trick as done in aio_write() */
+               __sb_writers_release(file_inode(real.file)->i_sb,
+                                    SB_FREEZE_WRITE);
                aio_req->fd = real;
                real.flags = 0;
                aio_req->orig_iocb = iocb;
index 3623d28..3d3f2b8 100644 (file)
@@ -318,7 +318,12 @@ static inline unsigned int ovl_xino_bits(struct super_block *sb)
        return ovl_same_dev(sb) ? OVL_FS(sb)->xino_mode : 0;
 }
 
-static inline int ovl_inode_lock(struct inode *inode)
+static inline void ovl_inode_lock(struct inode *inode)
+{
+       mutex_lock(&OVL_I(inode)->lock);
+}
+
+static inline int ovl_inode_lock_interruptible(struct inode *inode)
 {
        return mutex_lock_interruptible(&OVL_I(inode)->lock);
 }
index 319fe0d..ac967f1 100644 (file)
@@ -1411,6 +1411,8 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
                if (ofs->config.xino == OVL_XINO_ON)
                        pr_info("\"xino=on\" is useless with all layers on same fs, ignore.\n");
                ofs->xino_mode = 0;
+       } else if (ofs->config.xino == OVL_XINO_OFF) {
+               ofs->xino_mode = -1;
        } else if (ofs->config.xino == OVL_XINO_ON && ofs->xino_mode < 0) {
                /*
                 * This is a roundup of number of bits needed for encoding
@@ -1623,8 +1625,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_stack_depth = 0;
        sb->s_maxbytes = MAX_LFS_FILESIZE;
        /* Assume underlaying fs uses 32bit inodes unless proven otherwise */
-       if (ofs->config.xino != OVL_XINO_OFF)
+       if (ofs->config.xino != OVL_XINO_OFF) {
                ofs->xino_mode = BITS_PER_LONG - 32;
+               if (!ofs->xino_mode) {
+                       pr_warn("xino not supported on 32bit kernel, falling back to xino=off.\n");
+                       ofs->config.xino = OVL_XINO_OFF;
+               }
+       }
 
        /* alloc/destroy_inode needed for setting up traps in inode cache */
        sb->s_op = &ovl_super_operations;
index ea00508..042f7eb 100644 (file)
@@ -509,7 +509,7 @@ int ovl_copy_up_start(struct dentry *dentry, int flags)
        struct inode *inode = d_inode(dentry);
        int err;
 
-       err = ovl_inode_lock(inode);
+       err = ovl_inode_lock_interruptible(inode);
        if (!err && ovl_already_copied_up_locked(dentry, flags)) {
                err = 1; /* Already copied up */
                ovl_inode_unlock(inode);
@@ -764,7 +764,7 @@ int ovl_nlink_start(struct dentry *dentry)
                        return err;
        }
 
-       err = ovl_inode_lock(inode);
+       err = ovl_inode_lock_interruptible(inode);
        if (err)
                return err;
 
index 69aee3d..3ce9829 100644 (file)
@@ -178,7 +178,8 @@ static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
  * amount of readable data in the zone.
  */
 static loff_t zonefs_check_zone_condition(struct inode *inode,
-                                         struct blk_zone *zone, bool warn)
+                                         struct blk_zone *zone, bool warn,
+                                         bool mount)
 {
        struct zonefs_inode_info *zi = ZONEFS_I(inode);
 
@@ -196,13 +197,26 @@ static loff_t zonefs_check_zone_condition(struct inode *inode,
                zone->wp = zone->start;
                return 0;
        case BLK_ZONE_COND_READONLY:
-               /* Do not allow writes in read-only zones */
+               /*
+                * The write pointer of read-only zones is invalid. If such a
+                * zone is found during mount, the file size cannot be retrieved
+                * so we treat the zone as offline (mount == true case).
+                * Otherwise, keep the file size as it was when last updated
+                * so that the user can recover data. In both cases, writes are
+                * always disabled for the zone.
+                */
                if (warn)
                        zonefs_warn(inode->i_sb, "inode %lu: read-only zone\n",
                                    inode->i_ino);
                inode->i_flags |= S_IMMUTABLE;
+               if (mount) {
+                       zone->cond = BLK_ZONE_COND_OFFLINE;
+                       inode->i_mode &= ~0777;
+                       zone->wp = zone->start;
+                       return 0;
+               }
                inode->i_mode &= ~0222;
-               /* fallthrough */
+               return i_size_read(inode);
        default:
                if (zi->i_ztype == ZONEFS_ZTYPE_CNV)
                        return zi->i_max_size;
@@ -231,7 +245,7 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
         * as there is no inconsistency between the inode size and the amount of
         * data writen in the zone (data_size).
         */
-       data_size = zonefs_check_zone_condition(inode, zone, true);
+       data_size = zonefs_check_zone_condition(inode, zone, true, false);
        isize = i_size_read(inode);
        if (zone->cond != BLK_ZONE_COND_OFFLINE &&
            zone->cond != BLK_ZONE_COND_READONLY &&
@@ -274,7 +288,7 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
                if (zone->cond != BLK_ZONE_COND_OFFLINE) {
                        zone->cond = BLK_ZONE_COND_OFFLINE;
                        data_size = zonefs_check_zone_condition(inode, zone,
-                                                               false);
+                                                               false, false);
                }
        } else if (zone->cond == BLK_ZONE_COND_READONLY ||
                   sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO) {
@@ -283,7 +297,7 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
                if (zone->cond != BLK_ZONE_COND_READONLY) {
                        zone->cond = BLK_ZONE_COND_READONLY;
                        data_size = zonefs_check_zone_condition(inode, zone,
-                                                               false);
+                                                               false, false);
                }
        }
 
@@ -975,7 +989,7 @@ static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
        zi->i_zsector = zone->start;
        zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE,
                               zone->len << SECTOR_SHIFT);
-       zi->i_wpoffset = zonefs_check_zone_condition(inode, zone, true);
+       zi->i_wpoffset = zonefs_check_zone_condition(inode, zone, true, true);
 
        inode->i_uid = sbi->s_uid;
        inode->i_gid = sbi->s_gid;
index 4e6dc84..9ecb3c1 100644 (file)
@@ -33,7 +33,8 @@ bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE],
                             const u8 secret[CURVE25519_KEY_SIZE],
                             const u8 basepoint[CURVE25519_KEY_SIZE])
 {
-       if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
+       if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519) &&
+           (!IS_ENABLED(CONFIG_CRYPTO_CURVE25519_X86) || IS_ENABLED(CONFIG_AS_ADX)))
                curve25519_arch(mypublic, secret, basepoint);
        else
                curve25519_generic(mypublic, secret, basepoint);
@@ -49,7 +50,8 @@ __must_check curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE],
                                    CURVE25519_KEY_SIZE)))
                return false;
 
-       if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
+       if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519) &&
+           (!IS_ENABLED(CONFIG_CRYPTO_CURVE25519_X86) || IS_ENABLED(CONFIG_AS_ADX)))
                curve25519_base_arch(pub, secret);
        else
                curve25519_generic(pub, secret, curve25519_base_point);
index bcb39da..41725d8 100644 (file)
@@ -81,7 +81,7 @@ struct drm_dp_vcpi {
  * &drm_dp_mst_topology_mgr.base.lock.
  * @num_sdp_stream_sinks: Number of stream sinks. Protected by
  * &drm_dp_mst_topology_mgr.base.lock.
- * @available_pbn: Available bandwidth for this port. Protected by
+ * @full_pbn: Max possible bandwidth for this port. Protected by
  * &drm_dp_mst_topology_mgr.base.lock.
  * @next: link to next port on this branch device
  * @aux: i2c aux transport to talk to device connected to this port, protected
@@ -126,7 +126,7 @@ struct drm_dp_mst_port {
        u8 dpcd_rev;
        u8 num_sdp_streams;
        u8 num_sdp_stream_sinks;
-       uint16_t available_pbn;
+       uint16_t full_pbn;
        struct list_head next;
        /**
         * @mstb: the branch device connected to this port, if there is one.
index 0f2b842..65ac6eb 100644 (file)
 #define IMX8MN_CLK_I2C1                                105
 #define IMX8MN_CLK_I2C2                                106
 #define IMX8MN_CLK_I2C3                                107
-#define IMX8MN_CLK_I2C4                                118
-#define IMX8MN_CLK_UART1                       119
+#define IMX8MN_CLK_I2C4                                108
+#define IMX8MN_CLK_UART1                       109
 #define IMX8MN_CLK_UART2                       110
 #define IMX8MN_CLK_UART3                       111
 #define IMX8MN_CLK_UART4                       112
index 49b1a70..212991f 100644 (file)
@@ -160,6 +160,7 @@ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
 }
 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
                           bool lock_src);
+int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
 
 struct bpf_offload_dev;
 struct bpf_offloaded_map;
index c4458dc..76371aa 100644 (file)
@@ -175,9 +175,10 @@ struct ceph_msg_data {
 #endif /* CONFIG_BLOCK */
                struct ceph_bvec_iter   bvec_pos;
                struct {
-                       struct page     **pages;        /* NOT OWNER. */
+                       struct page     **pages;
                        size_t          length;         /* total # bytes */
                        unsigned int    alignment;      /* first page */
+                       bool            own_pages;
                };
                struct ceph_pagelist    *pagelist;
        };
@@ -356,8 +357,8 @@ extern void ceph_con_keepalive(struct ceph_connection *con);
 extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
                                       unsigned long interval);
 
-extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
-                               size_t length, size_t alignment);
+void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
+                            size_t length, size_t alignment, bool own_pages);
 extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
                                struct ceph_pagelist *pagelist);
 #ifdef CONFIG_BLOCK
index e081b56..5e60197 100644 (file)
@@ -37,6 +37,9 @@ int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs);
 #define CEPH_POOL_FLAG_HASHPSPOOL      (1ULL << 0) /* hash pg seed and pool id
                                                       together */
 #define CEPH_POOL_FLAG_FULL            (1ULL << 1) /* pool is full */
+#define CEPH_POOL_FLAG_FULL_QUOTA      (1ULL << 10) /* pool ran out of quota,
+                                                       will set FULL too */
+#define CEPH_POOL_FLAG_NEARFULL                (1ULL << 11) /* pool is nearfull */
 
 struct ceph_pg_pool_info {
        struct rb_node node;
@@ -304,5 +307,6 @@ extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
 
 extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
 extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
+u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id);
 
 #endif
index 59bdfd4..88ed3c5 100644 (file)
@@ -143,8 +143,10 @@ extern const char *ceph_osd_state_name(int s);
 /*
  * osd map flag bits
  */
-#define CEPH_OSDMAP_NEARFULL (1<<0)  /* sync writes (near ENOSPC) */
-#define CEPH_OSDMAP_FULL     (1<<1)  /* no data writes (ENOSPC) */
+#define CEPH_OSDMAP_NEARFULL (1<<0)  /* sync writes (near ENOSPC),
+                                       not set since ~luminous */
+#define CEPH_OSDMAP_FULL     (1<<1)  /* no data writes (ENOSPC),
+                                       not set since ~luminous */
 #define CEPH_OSDMAP_PAUSERD  (1<<2)  /* pause all reads */
 #define CEPH_OSDMAP_PAUSEWR  (1<<3)  /* pause all writes */
 #define CEPH_OSDMAP_PAUSEREC (1<<4)  /* pause recovery */
index d7ddebd..e75d219 100644 (file)
@@ -62,6 +62,7 @@ struct css_task_iter {
        struct list_head                *mg_tasks_head;
        struct list_head                *dying_tasks_head;
 
+       struct list_head                *cur_tasks_head;
        struct css_set                  *cur_cset;
        struct css_set                  *cur_dcset;
        struct task_struct              *cur_task;
index 952ac03..bd1ee90 100644 (file)
@@ -522,9 +522,9 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
  * @clk_gate_flags: gate-specific flags for this clock
  * @lock: shared register lock for this clock
  */
-#define clk_hw_register_gate_parent_hw(dev, name, parent_name, flags, reg,    \
+#define clk_hw_register_gate_parent_hw(dev, name, parent_hw, flags, reg,      \
                                       bit_idx, clk_gate_flags, lock)         \
-       __clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL,      \
+       __clk_hw_register_gate((dev), NULL, (name), NULL, (parent_hw),        \
                               NULL, (flags), (reg), (bit_idx),               \
                               (clk_gate_flags), (lock))
 /**
@@ -539,10 +539,10 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
  * @clk_gate_flags: gate-specific flags for this clock
  * @lock: shared register lock for this clock
  */
-#define clk_hw_register_gate_parent_data(dev, name, parent_name, flags, reg,  \
+#define clk_hw_register_gate_parent_data(dev, name, parent_data, flags, reg,  \
                                       bit_idx, clk_gate_flags, lock)         \
-       __clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL,      \
-                              NULL, (flags), (reg), (bit_idx),               \
+       __clk_hw_register_gate((dev), NULL, (name), NULL, NULL, (parent_data), \
+                              (flags), (reg), (bit_idx),                     \
                               (clk_gate_flags), (lock))
 void clk_unregister_gate(struct clk *clk);
 void clk_hw_unregister_gate(struct clk_hw *hw);
index f64ca27..d7bf029 100644 (file)
@@ -69,19 +69,23 @@ struct dmar_pci_notify_info {
 extern struct rw_semaphore dmar_global_lock;
 extern struct list_head dmar_drhd_units;
 
-#define for_each_drhd_unit(drhd) \
-       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)
+#define for_each_drhd_unit(drhd)                                       \
+       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list,           \
+                               dmar_rcu_check())
 
 #define for_each_active_drhd_unit(drhd)                                        \
-       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)           \
+       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list,           \
+                               dmar_rcu_check())                       \
                if (drhd->ignored) {} else
 
 #define for_each_active_iommu(i, drhd)                                 \
-       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)           \
+       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list,           \
+                               dmar_rcu_check())                       \
                if (i=drhd->iommu, drhd->ignored) {} else
 
 #define for_each_iommu(i, drhd)                                                \
-       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)           \
+       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list,           \
+                               dmar_rcu_check())                       \
                if (i=drhd->iommu, 0) {} else 
 
 static inline bool dmar_rcu_check(void)
index 0aa803c..c620d91 100644 (file)
@@ -28,8 +28,6 @@ int dsa_8021q_rx_switch_id(u16 vid);
 
 int dsa_8021q_rx_source_port(u16 vid);
 
-struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb);
-
 #else
 
 int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
@@ -64,11 +62,6 @@ int dsa_8021q_rx_source_port(u16 vid)
        return 0;
 }
 
-struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
-{
-       return NULL;
-}
-
 #endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */
 
 #endif /* _NET_DSA_8021Q_H */
index c6c7b24..142d102 100644 (file)
@@ -85,6 +85,7 @@ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
 extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
 extern void set_close_on_exec(unsigned int fd, int flag);
 extern bool get_close_on_exec(unsigned int fd);
+extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile);
 extern int get_unused_fd_flags(unsigned flags);
 extern void put_unused_fd(unsigned int fd);
 
index 561b35e..593e911 100644 (file)
@@ -698,6 +698,7 @@ struct inode {
                struct rcu_head         i_rcu;
        };
        atomic64_t              i_version;
+       atomic64_t              i_sequence; /* see futex */
        atomic_t                i_count;
        atomic_t                i_dio_count;
        atomic_t                i_writecount;
index 5cc3fed..b70df27 100644 (file)
@@ -31,23 +31,26 @@ struct task_struct;
 
 union futex_key {
        struct {
+               u64 i_seq;
                unsigned long pgoff;
-               struct inode *inode;
-               int offset;
+               unsigned int offset;
        } shared;
        struct {
+               union {
+                       struct mm_struct *mm;
+                       u64 __tmp;
+               };
                unsigned long address;
-               struct mm_struct *mm;
-               int offset;
+               unsigned int offset;
        } private;
        struct {
+               u64 ptr;
                unsigned long word;
-               void *ptr;
-               int offset;
+               unsigned int offset;
        } both;
 };
 
-#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
+#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } }
 
 #ifdef CONFIG_FUTEX
 enum {
index 85b9e25..9b3fffd 100644 (file)
@@ -246,18 +246,6 @@ static inline bool disk_part_scan_enabled(struct gendisk *disk)
                !(disk->flags & GENHD_FL_NO_PART_SCAN);
 }
 
-static inline bool disk_has_partitions(struct gendisk *disk)
-{
-       bool ret = false;
-
-       rcu_read_lock();
-       if (rcu_dereference(disk->part_tbl)->len > 1)
-               ret = true;
-       rcu_read_unlock();
-
-       return ret;
-}
-
 static inline dev_t disk_devt(struct gendisk *disk)
 {
        return MKDEV(disk->major, disk->first_minor);
@@ -296,6 +284,7 @@ extern void disk_part_iter_init(struct disk_part_iter *piter,
                                 struct gendisk *disk, unsigned int flags);
 extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter);
 extern void disk_part_iter_exit(struct disk_part_iter *piter);
+extern bool disk_has_partitions(struct gendisk *disk);
 
 /* block/genhd.c */
 extern void device_add_disk(struct device *parent, struct gendisk *disk,
index f834687..f6b9421 100644 (file)
@@ -506,7 +506,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
  * @smbus_xfer_atomic: same as @smbus_xfer. Yet, only using atomic context
  *   so e.g. PMICs can be accessed very late before shutdown. Optional.
  * @functionality: Return the flags that this algorithm/adapter pair supports
- *   from the I2C_FUNC_* flags.
+ *   from the ``I2C_FUNC_*`` flags.
  * @reg_slave: Register given client to I2C slave mode of this adapter
  * @unreg_slave: Unregister given client from I2C slave mode of this adapter
  *
@@ -515,7 +515,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
  * be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584
  * to name two of the most common.
  *
- * The return codes from the @master_xfer{_atomic} fields should indicate the
+ * The return codes from the ``master_xfer{_atomic}`` fields should indicate the
  * type of error code that occurred during the transfer, as documented in the
  * Kernel Documentation file Documentation/i2c/fault-codes.rst.
  */
index 7d3f2ce..73c66a3 100644 (file)
@@ -2102,14 +2102,14 @@ ieee80211_he_spr_size(const u8 *he_spr_ie)
 {
        struct ieee80211_he_spr *he_spr = (void *)he_spr_ie;
        u8 spr_len = sizeof(struct ieee80211_he_spr);
-       u32 he_spr_params;
+       u8 he_spr_params;
 
        /* Make sure the input is not NULL */
        if (!he_spr_ie)
                return 0;
 
        /* Calc required length */
-       he_spr_params = le32_to_cpu(he_spr->he_sr_control);
+       he_spr_params = he_spr->he_sr_control;
        if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
                spr_len++;
        if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
index 39faaaf..c91cf2d 100644 (file)
@@ -2,15 +2,10 @@
 #ifndef _INET_DIAG_H_
 #define _INET_DIAG_H_ 1
 
+#include <net/netlink.h>
 #include <uapi/linux/inet_diag.h>
 
-struct net;
-struct sock;
 struct inet_hashinfo;
-struct nlattr;
-struct nlmsghdr;
-struct sk_buff;
-struct netlink_callback;
 
 struct inet_diag_handler {
        void            (*dump)(struct sk_buff *skb,
@@ -62,6 +57,17 @@ int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
 
 void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
 
+static inline size_t inet_diag_msg_attrs_size(void)
+{
+       return    nla_total_size(1)  /* INET_DIAG_SHUTDOWN */
+               + nla_total_size(1)  /* INET_DIAG_TOS */
+#if IS_ENABLED(CONFIG_IPV6)
+               + nla_total_size(1)  /* INET_DIAG_TCLASS */
+               + nla_total_size(1)  /* INET_DIAG_SKV6ONLY */
+#endif
+               + nla_total_size(4)  /* INET_DIAG_MARK */
+               + nla_total_size(4); /* INET_DIAG_CLASS_ID */
+}
 int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
                             struct inet_diag_msg *r, int ext,
                             struct user_namespace *user_ns, bool net_admin);
index 4a16b39..980234a 100644 (file)
 
 #define dmar_readq(a) readq(a)
 #define dmar_writeq(a,v) writeq(v,a)
+#define dmar_readl(a) readl(a)
+#define dmar_writel(a, v) writel(v, a)
 
 #define DMAR_VER_MAJOR(v)              (((v) & 0xf0) >> 4)
 #define DMAR_VER_MINOR(v)              ((v) & 0x0f)
index 2ca9b70..cffa471 100644 (file)
@@ -57,8 +57,6 @@
 #define VPRINTK(fmt, args...)
 #endif /* ATA_DEBUG */
 
-#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-
 #define ata_print_version_once(dev, version)                   \
 ({                                                             \
        static bool __print_once;                               \
@@ -176,6 +174,7 @@ enum {
        ATA_DEV_NONE            = 11,   /* no device */
 
        /* struct ata_link flags */
+       /* NOTE: struct ata_force_param currently stores lflags in u16 */
        ATA_LFLAG_NO_HRST       = (1 << 1), /* avoid hardreset */
        ATA_LFLAG_NO_SRST       = (1 << 2), /* avoid softreset */
        ATA_LFLAG_ASSUME_ATA    = (1 << 3), /* assume ATA class */
@@ -531,12 +530,14 @@ typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes,
                              unsigned long deadline);
 typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes);
 
-extern struct device_attribute dev_attr_link_power_management_policy;
 extern struct device_attribute dev_attr_unload_heads;
+#ifdef CONFIG_SATA_HOST
+extern struct device_attribute dev_attr_link_power_management_policy;
 extern struct device_attribute dev_attr_ncq_prio_enable;
 extern struct device_attribute dev_attr_em_message_type;
 extern struct device_attribute dev_attr_em_message;
 extern struct device_attribute dev_attr_sw_activity;
+#endif
 
 enum sw_activity {
        OFF,
@@ -1020,10 +1021,6 @@ struct ata_timing {
 /*
  * Core layer - drivers/ata/libata-core.c
  */
-extern const unsigned long sata_deb_timing_normal[];
-extern const unsigned long sata_deb_timing_hotplug[];
-extern const unsigned long sata_deb_timing_long[];
-
 extern struct ata_port_operations ata_dummy_port_ops;
 extern const struct ata_port_info ata_dummy_port_info;
 
@@ -1061,33 +1058,14 @@ static inline int is_multi_taskfile(struct ata_taskfile *tf)
               (tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT);
 }
 
-static inline const unsigned long *
-sata_ehc_deb_timing(struct ata_eh_context *ehc)
-{
-       if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
-               return sata_deb_timing_hotplug;
-       else
-               return sata_deb_timing_normal;
-}
-
 static inline int ata_port_is_dummy(struct ata_port *ap)
 {
        return ap->ops == &ata_dummy_port_ops;
 }
 
-extern int sata_set_spd(struct ata_link *link);
 extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
 extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
                                int (*check_ready)(struct ata_link *link));
-extern int sata_link_debounce(struct ata_link *link,
-                       const unsigned long *params, unsigned long deadline);
-extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
-                           unsigned long deadline);
-extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
-                            bool spm_wakeup);
-extern int sata_link_hardreset(struct ata_link *link,
-                       const unsigned long *timing, unsigned long deadline,
-                       bool *online, int (*check_ready)(struct ata_link *));
 extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
                              unsigned long deadline);
 extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
@@ -1095,7 +1073,6 @@ extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
 extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
 extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
                        const struct ata_port_info * const * ppi, int n_ports);
-extern int ata_slave_link_init(struct ata_port *ap);
 extern void ata_host_get(struct ata_host *host);
 extern void ata_host_put(struct ata_host *host);
 extern int ata_host_start(struct ata_host *host);
@@ -1117,22 +1094,6 @@ extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd,
 extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
 extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev,
                            unsigned int cmd, void __user *arg);
-extern void ata_sas_port_destroy(struct ata_port *);
-extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
-                                          struct ata_port_info *, struct Scsi_Host *);
-extern void ata_sas_async_probe(struct ata_port *ap);
-extern int ata_sas_sync_probe(struct ata_port *ap);
-extern int ata_sas_port_init(struct ata_port *);
-extern int ata_sas_port_start(struct ata_port *ap);
-extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
-extern void ata_sas_tport_delete(struct ata_port *ap);
-extern void ata_sas_port_stop(struct ata_port *ap);
-extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
-extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
-extern int sata_scr_valid(struct ata_link *link);
-extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
-extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
-extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
 extern bool ata_link_online(struct ata_link *link);
 extern bool ata_link_offline(struct ata_link *link);
 #ifdef CONFIG_PM
@@ -1153,9 +1114,6 @@ extern void ata_msleep(struct ata_port *ap, unsigned int msecs);
 extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask,
                        u32 val, unsigned long interval, unsigned long timeout);
 extern int atapi_cmd_type(u8 opcode);
-extern void ata_tf_to_fis(const struct ata_taskfile *tf,
-                         u8 pmp, int is_cmd, u8 *fis);
-extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
 extern unsigned long ata_pack_xfermask(unsigned long pio_mask,
                        unsigned long mwdma_mask, unsigned long udma_mask);
 extern void ata_unpack_xfermask(unsigned long xfer_mask,
@@ -1179,7 +1137,6 @@ extern void ata_id_c_string(const u16 *id, unsigned char *s,
 extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
                                        struct ata_taskfile *tf, u16 *id);
 extern void ata_qc_complete(struct ata_queued_cmd *qc);
-extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active);
 extern u64 ata_qc_get_active(struct ata_port *ap);
 extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
 extern int ata_std_bios_param(struct scsi_device *sdev,
@@ -1196,7 +1153,96 @@ extern struct ata_device *ata_dev_pair(struct ata_device *adev);
 extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
 extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
 extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
+
+/*
+ * SATA specific code - drivers/ata/libata-sata.c
+ */
+#ifdef CONFIG_SATA_HOST
+extern const unsigned long sata_deb_timing_normal[];
+extern const unsigned long sata_deb_timing_hotplug[];
+extern const unsigned long sata_deb_timing_long[];
+
+static inline const unsigned long *
+sata_ehc_deb_timing(struct ata_eh_context *ehc)
+{
+       if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
+               return sata_deb_timing_hotplug;
+       else
+               return sata_deb_timing_normal;
+}
+
+extern int sata_scr_valid(struct ata_link *link);
+extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
+extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
+extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
+extern int sata_set_spd(struct ata_link *link);
+extern int sata_link_hardreset(struct ata_link *link,
+                       const unsigned long *timing, unsigned long deadline,
+                       bool *online, int (*check_ready)(struct ata_link *));
+extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
+                           unsigned long deadline);
+extern void ata_eh_analyze_ncq_error(struct ata_link *link);
+#else
+static inline const unsigned long *
+sata_ehc_deb_timing(struct ata_eh_context *ehc)
+{
+       return NULL;
+}
+static inline int sata_scr_valid(struct ata_link *link) { return 0; }
+static inline int sata_scr_read(struct ata_link *link, int reg, u32 *val)
+{
+       return -EOPNOTSUPP;
+}
+static inline int sata_scr_write(struct ata_link *link, int reg, u32 val)
+{
+       return -EOPNOTSUPP;
+}
+static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
+{
+       return -EOPNOTSUPP;
+}
+static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; }
+static inline int sata_link_hardreset(struct ata_link *link,
+                                     const unsigned long *timing,
+                                     unsigned long deadline,
+                                     bool *online,
+                                     int (*check_ready)(struct ata_link *))
+{
+       if (online)
+               *online = false;
+       return -EOPNOTSUPP;
+}
+static inline int sata_link_resume(struct ata_link *link,
+                                  const unsigned long *params,
+                                  unsigned long deadline)
+{
+       return -EOPNOTSUPP;
+}
+static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { }
+#endif
+extern int sata_link_debounce(struct ata_link *link,
+                       const unsigned long *params, unsigned long deadline);
+extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+                            bool spm_wakeup);
+extern int ata_slave_link_init(struct ata_port *ap);
+extern void ata_sas_port_destroy(struct ata_port *);
+extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
+                                          struct ata_port_info *, struct Scsi_Host *);
+extern void ata_sas_async_probe(struct ata_port *ap);
+extern int ata_sas_sync_probe(struct ata_port *ap);
+extern int ata_sas_port_init(struct ata_port *);
+extern int ata_sas_port_start(struct ata_port *ap);
+extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
+extern void ata_sas_tport_delete(struct ata_port *ap);
+extern void ata_sas_port_stop(struct ata_port *ap);
+extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
+extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
+extern void ata_tf_to_fis(const struct ata_taskfile *tf,
+                         u8 pmp, int is_cmd, u8 *fis);
+extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
+extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active);
 extern bool sata_lpm_ignore_phy_events(struct ata_link *link);
+extern int sata_async_notification(struct ata_port *ap);
 
 extern int ata_cable_40wire(struct ata_port *ap);
 extern int ata_cable_80wire(struct ata_port *ap);
@@ -1206,12 +1252,6 @@ extern int ata_cable_unknown(struct ata_port *ap);
 
 /* Timing helpers */
 extern unsigned int ata_pio_need_iordy(const struct ata_device *);
-extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode);
-extern int ata_timing_compute(struct ata_device *, unsigned short,
-                             struct ata_timing *, int, int);
-extern void ata_timing_merge(const struct ata_timing *,
-                            const struct ata_timing *, struct ata_timing *,
-                            unsigned int);
 extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle);
 
 /* PCI */
@@ -1295,14 +1335,12 @@ extern void ata_port_wait_eh(struct ata_port *ap);
 extern int ata_link_abort(struct ata_link *link);
 extern int ata_port_abort(struct ata_port *ap);
 extern int ata_port_freeze(struct ata_port *ap);
-extern int sata_async_notification(struct ata_port *ap);
 
 extern void ata_eh_freeze_port(struct ata_port *ap);
 extern void ata_eh_thaw_port(struct ata_port *ap);
 
 extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
 extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
-extern void ata_eh_analyze_ncq_error(struct ata_link *link);
 
 extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
                      ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
@@ -1343,7 +1381,7 @@ extern struct device_attribute *ata_common_sdev_attrs[];
  * edge driver's module reference, otherwise the driver can be unloaded
  * even if the scsi_device is being accessed.
  */
-#define ATA_BASE_SHT(drv_name)                                 \
+#define __ATA_BASE_SHT(drv_name)                               \
        .module                 = THIS_MODULE,                  \
        .name                   = drv_name,                     \
        .ioctl                  = ata_scsi_ioctl,               \
@@ -1357,12 +1395,20 @@ extern struct device_attribute *ata_common_sdev_attrs[];
        .slave_configure        = ata_scsi_slave_config,        \
        .slave_destroy          = ata_scsi_slave_destroy,       \
        .bios_param             = ata_std_bios_param,           \
-       .unlock_native_capacity = ata_scsi_unlock_native_capacity, \
+       .unlock_native_capacity = ata_scsi_unlock_native_capacity
+
+#define ATA_BASE_SHT(drv_name)                                 \
+       __ATA_BASE_SHT(drv_name),                               \
        .sdev_attrs             = ata_common_sdev_attrs
 
+#ifdef CONFIG_SATA_HOST
+extern struct device_attribute *ata_ncq_sdev_attrs[];
+
 #define ATA_NCQ_SHT(drv_name)                                  \
-       ATA_BASE_SHT(drv_name),                                 \
+       __ATA_BASE_SHT(drv_name),                               \
+       .sdev_attrs             = ata_ncq_sdev_attrs,           \
        .change_queue_depth     = ata_scsi_change_queue_depth
+#endif
 
 /*
  * PMP helpers
@@ -1635,6 +1681,8 @@ extern struct ata_device *ata_dev_next(struct ata_device *dev,
  */
 static inline int ata_ncq_enabled(struct ata_device *dev)
 {
+       if (!IS_ENABLED(CONFIG_SATA_HOST))
+               return 0;
        return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
                              ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
 }
@@ -1804,6 +1852,16 @@ static inline int ata_dma_enabled(struct ata_device *adev)
 }
 
 /**************************************************************************
+ * PATA timings - drivers/ata/libata-pata-timings.c
+ */
+extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode);
+extern int ata_timing_compute(struct ata_device *, unsigned short,
+                             struct ata_timing *, int, int);
+extern void ata_timing_merge(const struct ata_timing *,
+                            const struct ata_timing *, struct ata_timing *,
+                            unsigned int);
+
+/**************************************************************************
  * PMP - drivers/ata/libata-pmp.c
  */
 #ifdef CONFIG_SATA_PMP
index a7a0a1a..e9ba013 100644 (file)
@@ -695,6 +695,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
                        int val);
 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
+void mod_memcg_obj_state(void *p, int idx, int val);
 
 static inline void mod_lruvec_state(struct lruvec *lruvec,
                                    enum node_stat_item idx, int val)
@@ -1123,6 +1124,10 @@ static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
        __mod_node_page_state(page_pgdat(page), idx, val);
 }
 
+static inline void mod_memcg_obj_state(void *p, int idx, int val)
+{
+}
+
 static inline
 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
                                            gfp_t gfp_mask,
@@ -1427,6 +1432,8 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
        return memcg ? memcg->kmemcg_id : -1;
 }
 
+struct mem_cgroup *mem_cgroup_from_obj(void *p);
+
 #else
 
 static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
@@ -1468,6 +1475,11 @@ static inline void memcg_put_cache_ids(void)
 {
 }
 
+static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
+{
+       return NULL;
+}
+
 #endif /* CONFIG_MEMCG_KMEM */
 
 #endif /* _LINUX_MEMCONTROL_H */
index ba70338..4c5eb3a 100644 (file)
@@ -333,6 +333,7 @@ struct mmc_host {
                                 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \
                                 MMC_CAP_UHS_DDR50)
 #define MMC_CAP_SYNC_RUNTIME_PM        (1 << 21)       /* Synced runtime PM suspends. */
+#define MMC_CAP_NEED_RSP_BUSY  (1 << 22)       /* Commands with R1B can't use R1. */
 #define MMC_CAP_DRIVER_TYPE_A  (1 << 23)       /* Host supports Driver Type A */
 #define MMC_CAP_DRIVER_TYPE_C  (1 << 24)       /* Host supports Driver Type C */
 #define MMC_CAP_DRIVER_TYPE_D  (1 << 25)       /* Host supports Driver Type D */
index 205fa7b..60739d0 100644 (file)
@@ -115,6 +115,19 @@ static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack,
 {
        u64 __cookie = cookie;
 
+       if (!extack)
+               return;
+       memcpy(extack->cookie, &__cookie, sizeof(__cookie));
+       extack->cookie_len = sizeof(__cookie);
+}
+
+static inline void nl_set_extack_cookie_u32(struct netlink_ext_ack *extack,
+                                           u32 cookie)
+{
+       u32 __cookie = cookie;
+
+       if (!extack)
+               return;
        memcpy(extack->cookie, &__cookie, sizeof(__cookie));
        extack->cookie_len = sizeof(__cookie);
 }
index c86fcad..31b73a0 100644 (file)
@@ -11,17 +11,17 @@ struct of_device_id;
 
 #if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF)
 
-unsigned int of_clk_get_parent_count(struct device_node *np);
-const char *of_clk_get_parent_name(struct device_node *np, int index);
+unsigned int of_clk_get_parent_count(const struct device_node *np);
+const char *of_clk_get_parent_name(const struct device_node *np, int index);
 void of_clk_init(const struct of_device_id *matches);
 
 #else /* !CONFIG_COMMON_CLK || !CONFIG_OF */
 
-static inline unsigned int of_clk_get_parent_count(struct device_node *np)
+static inline unsigned int of_clk_get_parent_count(const struct device_node *np)
 {
        return 0;
 }
-static inline const char *of_clk_get_parent_name(struct device_node *np,
+static inline const char *of_clk_get_parent_name(const struct device_node *np,
                                                 int index)
 {
        return NULL;
index 1bf83c8..77de28b 100644 (file)
@@ -311,7 +311,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
 
 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
-PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
+PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
 PAGEFLAG(Referenced, referenced, PF_HEAD)
        TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
        __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
index 352c0d7..977e668 100644 (file)
 
 /* Vendors and devices.  Sort key: vendor first, device next. */
 
+#define PCI_VENDOR_ID_LOONGSON         0x0014
+
 #define PCI_VENDOR_ID_TTTECH           0x0357
 #define PCI_DEVICE_ID_TTTECH_MC322     0x000a
 
index c570e16..452e8ba 100644 (file)
@@ -357,6 +357,7 @@ struct macsec_ops;
  * is_gigabit_capable: Set to true if PHY supports 1000Mbps
  * has_fixups: Set to true if this phy has fixups/quirks.
  * suspended: Set to true if this phy has been suspended successfully.
+ * suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus.
  * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
  * loopback_enabled: Set true if this phy has been loopbacked successfully.
  * state: state of the PHY for management purposes
@@ -396,6 +397,7 @@ struct phy_device {
        unsigned is_gigabit_capable:1;
        unsigned has_fixups:1;
        unsigned suspended:1;
+       unsigned suspended_by_mdio_bus:1;
        unsigned sysfs_links:1;
        unsigned loopback_enabled:1;
 
@@ -557,6 +559,7 @@ struct phy_driver {
        /*
         * Checks if the PHY generated an interrupt.
         * For multi-PHY devices with shared PHY interrupt pin
+        * Set interrupt bits have to be cleared.
         */
        int (*did_interrupt)(struct phy_device *phydev);
 
index 276a03c..041bfa4 100644 (file)
@@ -24,7 +24,7 @@ struct platform_device {
        int             id;
        bool            id_auto;
        struct device   dev;
-       u64             dma_mask;
+       u64             platform_dma_mask;
        u32             num_resources;
        struct resource *resource;
 
index beb9a9d..70ebef8 100644 (file)
@@ -972,9 +972,9 @@ static inline int rhashtable_lookup_insert_key(
 /**
  * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
  * @ht:                hash table
+ * @key:       key
  * @obj:       pointer to hash head inside object
  * @params:    hash table parameters
- * @data:      pointer to element data already in hashes
  *
  * Just like rhashtable_lookup_insert_key(), but this function returns the
  * object if it exists, NULL if it does not and the insertion was successful,
index 5b50278..e596202 100644 (file)
@@ -645,8 +645,8 @@ typedef unsigned char *sk_buff_data_t;
  *     @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
  *     @tc_skip_classify: do not classify packet. set by IFB device
  *     @tc_at_ingress: used within tc_classify to distinguish in/egress
- *     @tc_redirected: packet was redirected by a tc action
- *     @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
+ *     @redirected: packet was redirected by packet classifier
+ *     @from_ingress: packet was redirected from the ingress path
  *     @peeked: this packet has been seen already, so stats have been
  *             done for it, don't do them again
  *     @nf_trace: netfilter packet trace flag
@@ -848,8 +848,10 @@ struct sk_buff {
 #ifdef CONFIG_NET_CLS_ACT
        __u8                    tc_skip_classify:1;
        __u8                    tc_at_ingress:1;
-       __u8                    tc_redirected:1;
-       __u8                    tc_from_ingress:1;
+#endif
+#ifdef CONFIG_NET_REDIRECT
+       __u8                    redirected:1;
+       __u8                    from_ingress:1;
 #endif
 #ifdef CONFIG_TLS_DEVICE
        __u8                    decrypted:1;
@@ -4579,5 +4581,31 @@ static inline __wsum lco_csum(struct sk_buff *skb)
        return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
 }
 
+static inline bool skb_is_redirected(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_REDIRECT
+       return skb->redirected;
+#else
+       return false;
+#endif
+}
+
+static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
+{
+#ifdef CONFIG_NET_REDIRECT
+       skb->redirected = 1;
+       skb->from_ingress = from_ingress;
+       if (skb->from_ingress)
+               skb->tstamp = 0;
+#endif
+}
+
+static inline void skb_reset_redirect(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_REDIRECT
+       skb->redirected = 0;
+#endif
+}
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SKBUFF_H */
index 2d23134..15f3412 100644 (file)
@@ -401,7 +401,8 @@ extern int __sys_sendto(int fd, void __user *buff, size_t len,
                        int addr_len);
 extern int __sys_accept4_file(struct file *file, unsigned file_flags,
                        struct sockaddr __user *upeer_sockaddr,
-                        int __user *upeer_addrlen, int flags);
+                        int __user *upeer_addrlen, int flags,
+                        unsigned long nofile);
 extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
                         int __user *upeer_addrlen, int flags);
 extern int __sys_socket(int family, int type, int protocol);
index ec38132..0507a16 100644 (file)
@@ -141,8 +141,9 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
 
 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
                                                        unsigned long pgoff);
-void vmalloc_sync_all(void);
+void vmalloc_sync_mappings(void);
+void vmalloc_sync_unmappings(void);
+
 /*
  *     Lowlevel-APIs (not for driver use!)
  */
index 4261d1c..e48554e 100644 (file)
@@ -487,6 +487,19 @@ extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
  *
  * We queue the work to the CPU on which it was submitted, but if the CPU dies
  * it can be processed by another CPU.
+ *
+ * Memory-ordering properties:  If it returns %true, guarantees that all stores
+ * preceding the call to queue_work() in the program order will be visible from
+ * the CPU which will execute @work by the time such work executes, e.g.,
+ *
+ * { x is initially 0 }
+ *
+ *   CPU0                              CPU1
+ *
+ *   WRITE_ONCE(x, 1);                 [ @work is being executed ]
+ *   r0 = queue_work(wq, work);                  r1 = READ_ONCE(x);
+ *
+ * Forbids: r0 == true && r1 == 0
  */
 static inline bool queue_work(struct workqueue_struct *wq,
                              struct work_struct *work)
@@ -546,6 +559,9 @@ static inline bool schedule_work_on(int cpu, struct work_struct *work)
  * This puts a job in the kernel-global workqueue if it was not already
  * queued and leaves it in the same position on the kernel-global
  * workqueue otherwise.
+ *
+ * Shares the same memory-ordering properties of queue_work(), cf. the
+ * DocBook header of queue_work().
  */
 static inline bool schedule_work(struct work_struct *work)
 {
index 1abae3c..04e97ba 100644 (file)
@@ -16,6 +16,12 @@ struct sock;
 struct socket;
 struct rxrpc_call;
 
+enum rxrpc_interruptibility {
+       RXRPC_INTERRUPTIBLE,    /* Call is interruptible */
+       RXRPC_PREINTERRUPTIBLE, /* Call can be cancelled whilst waiting for a slot */
+       RXRPC_UNINTERRUPTIBLE,  /* Call should not be interruptible at all */
+};
+
 /*
  * Debug ID counter for tracing.
  */
@@ -41,7 +47,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
                                           gfp_t,
                                           rxrpc_notify_rx_t,
                                           bool,
-                                          bool,
+                                          enum rxrpc_interruptibility,
                                           unsigned int);
 int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
                           struct msghdr *, size_t,
@@ -58,9 +64,7 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
                               rxrpc_user_attach_call_t, unsigned long, gfp_t,
                               unsigned int);
 void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
-bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *,
-                            u32 *);
-void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
+bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
 u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
 bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
                                 ktime_t *);
index 54e227e..a259050 100644 (file)
@@ -108,6 +108,7 @@ struct fib_rule_notifier_info {
        [FRA_OIFNAME]   = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
        [FRA_PRIORITY]  = { .type = NLA_U32 }, \
        [FRA_FWMARK]    = { .type = NLA_U32 }, \
+       [FRA_TUN_ID]    = { .type = NLA_U64 }, \
        [FRA_FWMASK]    = { .type = NLA_U32 }, \
        [FRA_TABLE]     = { .type = NLA_U32 }, \
        [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
index 1512087..c30f914 100644 (file)
@@ -675,22 +675,6 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb,
                               const struct qdisc_size_table *stab);
 int skb_do_redirect(struct sk_buff *);
 
-static inline void skb_reset_tc(struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_CLS_ACT
-       skb->tc_redirected = 0;
-#endif
-}
-
-static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_CLS_ACT
-       return skb->tc_redirected;
-#else
-       return false;
-#endif
-}
-
 static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
 {
 #ifdef CONFIG_NET_CLS_ACT
index 0a50d53..7c08437 100644 (file)
@@ -74,7 +74,7 @@
 #define DEV_MAC_TAGS_CFG_TAG_ID_M                         GENMASK(31, 16)
 #define DEV_MAC_TAGS_CFG_TAG_ID_X(x)                      (((x) & GENMASK(31, 16)) >> 16)
 #define DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA                 BIT(2)
-#define DEV_MAC_TAGS_CFG_PB_ENA                           BIT(1)
+#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA                 BIT(1)
 #define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA                     BIT(0)
 
 #define DEV_MAC_ADV_CHK_CFG                               0x2c
index 564ba1b..c612cab 100644 (file)
@@ -233,7 +233,7 @@ enum afs_cb_break_reason {
        EM(afs_call_trace_get,                  "GET  ") \
        EM(afs_call_trace_put,                  "PUT  ") \
        EM(afs_call_trace_wake,                 "WAKE ") \
-       E_(afs_call_trace_work,                 "WORK ")
+       E_(afs_call_trace_work,                 "QUEUE")
 
 #define afs_server_traces \
        EM(afs_server_trace_alloc,              "ALLOC    ") \
index 1521073..8533bf0 100644 (file)
@@ -74,6 +74,8 @@ enum {
 #define IPPROTO_UDPLITE                IPPROTO_UDPLITE
   IPPROTO_MPLS = 137,          /* MPLS in IP (RFC 4023)                */
 #define IPPROTO_MPLS           IPPROTO_MPLS
+  IPPROTO_ETHERNET = 143,      /* Ethernet-within-IPv6 Encapsulation   */
+#define IPPROTO_ETHERNET       IPPROTO_ETHERNET
   IPPROTO_RAW = 255,           /* Raw IP packets                       */
 #define IPPROTO_RAW            IPPROTO_RAW
   IPPROTO_MPTCP = 262,         /* Multipath TCP connection             */
index 0f1db1c..6923dc7 100644 (file)
 /* Electronic privacy screen control */
 #define KEY_PRIVACY_SCREEN_TOGGLE      0x279
 
+/* Select an area of screen to be copied */
+#define KEY_SELECTIVE_SCREENSHOT       0x27a
+
 /*
  * Some keyboards have keys which do not have a defined meaning, these keys
  * are intended to be programmed / bound to macros by the user. For most
index 50e9919..ed2a96f 100644 (file)
@@ -9,7 +9,7 @@
 #ifndef _UAPI_SERIO_H
 #define _UAPI_SERIO_H
 
-
+#include <linux/const.h>
 #include <linux/ioctl.h>
 
 #define SPIOCSTYPE     _IOW('q', 0x01, unsigned long)
 /*
  * bit masks for use in "interrupt" flags (3rd argument)
  */
-#define SERIO_TIMEOUT  BIT(0)
-#define SERIO_PARITY   BIT(1)
-#define SERIO_FRAME    BIT(2)
-#define SERIO_OOB_DATA BIT(3)
+#define SERIO_TIMEOUT  _BITUL(0)
+#define SERIO_PARITY   _BITUL(1)
+#define SERIO_FRAME    _BITUL(2)
+#define SERIO_OOB_DATA _BITUL(3)
 
 /*
  * Serio types
index 20a6ac3..4f717bf 100644 (file)
@@ -767,8 +767,7 @@ config ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
        bool
 
 config CC_HAS_INT128
-       def_bool y
-       depends on !$(cc-option,-D__SIZEOF_INT128__=0)
+       def_bool !$(cc-option,$(m64-flag) -D__SIZEOF_INT128__=0) && 64BIT
 
 #
 # For architectures that know their GCC __int128 support is sound
index 042f955..68a89a9 100644 (file)
@@ -482,13 +482,21 @@ static int bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
        prev_state = cmpxchg(&st_map->kvalue.state,
                             BPF_STRUCT_OPS_STATE_INUSE,
                             BPF_STRUCT_OPS_STATE_TOBEFREE);
-       if (prev_state == BPF_STRUCT_OPS_STATE_INUSE) {
+       switch (prev_state) {
+       case BPF_STRUCT_OPS_STATE_INUSE:
                st_map->st_ops->unreg(&st_map->kvalue.data);
                if (refcount_dec_and_test(&st_map->kvalue.refcnt))
                        bpf_map_put(map);
+               return 0;
+       case BPF_STRUCT_OPS_STATE_TOBEFREE:
+               return -EINPROGRESS;
+       case BPF_STRUCT_OPS_STATE_INIT:
+               return -ENOENT;
+       default:
+               WARN_ON_ONCE(1);
+               /* Should never happen.  Treat it as not found. */
+               return -ENOENT;
        }
-
-       return 0;
 }
 
 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
index 7871400..7787bdc 100644 (file)
@@ -2418,7 +2418,7 @@ static int btf_enum_check_member(struct btf_verifier_env *env,
 
        struct_size = struct_type->size;
        bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
-       if (struct_size - bytes_offset < sizeof(int)) {
+       if (struct_size - bytes_offset < member_type->size) {
                btf_verifier_log_member(env, struct_type, member,
                                        "Member exceeds struct_size");
                return -EINVAL;
@@ -4564,7 +4564,7 @@ int btf_get_info_by_fd(const struct btf *btf,
                       union bpf_attr __user *uattr)
 {
        struct bpf_btf_info __user *uinfo;
-       struct bpf_btf_info info = {};
+       struct bpf_btf_info info;
        u32 info_copy, btf_copy;
        void __user *ubtf;
        u32 uinfo_len;
@@ -4573,6 +4573,7 @@ int btf_get_info_by_fd(const struct btf *btf,
        uinfo_len = attr->info.info_len;
 
        info_copy = min_t(u32, uinfo_len, sizeof(info));
+       memset(&info, 0, sizeof(info));
        if (copy_from_user(&info, uinfo, info_copy))
                return -EFAULT;
 
index 9a500fa..4f14724 100644 (file)
@@ -227,6 +227,9 @@ cleanup:
        for (i = 0; i < NR; i++)
                bpf_prog_array_free(arrays[i]);
 
+       for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
+               cgroup_bpf_put(p);
+
        percpu_ref_exit(&cgrp->bpf.refcnt);
 
        return -ENOMEM;
@@ -302,8 +305,8 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
        u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
        struct list_head *progs = &cgrp->bpf.progs[type];
        struct bpf_prog *old_prog = NULL;
-       struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
-               *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL};
+       struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
+       struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
        struct bpf_prog_list *pl, *replace_pl = NULL;
        enum bpf_cgroup_storage_type stype;
        int err;
index a91ad51..966b7b3 100644 (file)
@@ -696,14 +696,15 @@ int bpf_get_file_flag(int flags)
                   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
                   sizeof(attr->CMD##_LAST_FIELD)) != NULL
 
-/* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes.
- * Return 0 on success and < 0 on error.
+/* dst and src must have at least "size" number of bytes.
+ * Return strlen on success and < 0 on error.
  */
-static int bpf_obj_name_cpy(char *dst, const char *src)
+int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
 {
-       const char *end = src + BPF_OBJ_NAME_LEN;
+       const char *end = src + size;
+       const char *orig_src = src;
 
-       memset(dst, 0, BPF_OBJ_NAME_LEN);
+       memset(dst, 0, size);
        /* Copy all isalnum(), '_' and '.' chars. */
        while (src < end && *src) {
                if (!isalnum(*src) &&
@@ -712,11 +713,11 @@ static int bpf_obj_name_cpy(char *dst, const char *src)
                *dst++ = *src++;
        }
 
-       /* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */
+       /* No '\0' found in "size" number of bytes */
        if (src == end)
                return -EINVAL;
 
-       return 0;
+       return src - orig_src;
 }
 
 int map_check_no_btf(const struct bpf_map *map,
@@ -810,8 +811,9 @@ static int map_create(union bpf_attr *attr)
        if (IS_ERR(map))
                return PTR_ERR(map);
 
-       err = bpf_obj_name_cpy(map->name, attr->map_name);
-       if (err)
+       err = bpf_obj_name_cpy(map->name, attr->map_name,
+                              sizeof(attr->map_name));
+       if (err < 0)
                goto free_map;
 
        atomic64_set(&map->refcnt, 1);
@@ -1510,6 +1512,11 @@ static int map_freeze(const union bpf_attr *attr)
        if (IS_ERR(map))
                return PTR_ERR(map);
 
+       if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
+               fdput(f);
+               return -ENOTSUPP;
+       }
+
        mutex_lock(&map->freeze_mutex);
 
        if (map->writecnt) {
@@ -2093,8 +2100,9 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
                goto free_prog;
 
        prog->aux->load_time = ktime_get_boottime_ns();
-       err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name);
-       if (err)
+       err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
+                              sizeof(attr->prog_name));
+       if (err < 0)
                goto free_prog;
 
        /* run eBPF verifier */
@@ -2787,7 +2795,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
                                   union bpf_attr __user *uattr)
 {
        struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
-       struct bpf_prog_info info = {};
+       struct bpf_prog_info info;
        u32 info_len = attr->info.info_len;
        struct bpf_prog_stats stats;
        char __user *uinsns;
@@ -2799,6 +2807,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
                return err;
        info_len = min_t(u32, sizeof(info), info_len);
 
+       memset(&info, 0, sizeof(info));
        if (copy_from_user(&info, uinfo, info_len))
                return -EFAULT;
 
@@ -3062,7 +3071,7 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map,
                                  union bpf_attr __user *uattr)
 {
        struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
-       struct bpf_map_info info = {};
+       struct bpf_map_info info;
        u32 info_len = attr->info.info_len;
        int err;
 
@@ -3071,6 +3080,7 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map,
                return err;
        info_len = min_t(u32, sizeof(info), info_len);
 
+       memset(&info, 0, sizeof(info));
        info.type = map->map_type;
        info.id = map->id;
        info.key_size = map->key_size;
@@ -3354,7 +3364,7 @@ err_put:
 
 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
 {
-       union bpf_attr attr = {};
+       union bpf_attr attr;
        int err;
 
        if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
@@ -3366,6 +3376,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
        size = min_t(u32, size, sizeof(attr));
 
        /* copy attributes from user space, may be less than sizeof(bpf_attr) */
+       memset(&attr, 0, sizeof(attr));
        if (copy_from_user(&attr, uattr, size) != 0)
                return -EFAULT;
 
index be1a1c8..f2d7cea 100644 (file)
@@ -471,6 +471,7 @@ static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
         */
        p++;
        if (p >= end) {
+               (*pos)++;
                return NULL;
        } else {
                *pos = *p;
@@ -782,7 +783,7 @@ void cgroup1_release_agent(struct work_struct *work)
 
        pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
        agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
-       if (!pathbuf || !agentbuf)
+       if (!pathbuf || !agentbuf || !strlen(agentbuf))
                goto out;
 
        spin_lock_irq(&css_set_lock);
index 75f6873..3dead04 100644 (file)
@@ -3542,21 +3542,21 @@ static int cpu_stat_show(struct seq_file *seq, void *v)
 static int cgroup_io_pressure_show(struct seq_file *seq, void *v)
 {
        struct cgroup *cgrp = seq_css(seq)->cgroup;
-       struct psi_group *psi = cgroup_id(cgrp) == 1 ? &psi_system : &cgrp->psi;
+       struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
 
        return psi_show(seq, psi, PSI_IO);
 }
 static int cgroup_memory_pressure_show(struct seq_file *seq, void *v)
 {
        struct cgroup *cgrp = seq_css(seq)->cgroup;
-       struct psi_group *psi = cgroup_id(cgrp) == 1 ? &psi_system : &cgrp->psi;
+       struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
 
        return psi_show(seq, psi, PSI_MEM);
 }
 static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v)
 {
        struct cgroup *cgrp = seq_css(seq)->cgroup;
-       struct psi_group *psi = cgroup_id(cgrp) == 1 ? &psi_system : &cgrp->psi;
+       struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
 
        return psi_show(seq, psi, PSI_CPU);
 }
@@ -4400,12 +4400,16 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
                }
        } while (!css_set_populated(cset) && list_empty(&cset->dying_tasks));
 
-       if (!list_empty(&cset->tasks))
+       if (!list_empty(&cset->tasks)) {
                it->task_pos = cset->tasks.next;
-       else if (!list_empty(&cset->mg_tasks))
+               it->cur_tasks_head = &cset->tasks;
+       } else if (!list_empty(&cset->mg_tasks)) {
                it->task_pos = cset->mg_tasks.next;
-       else
+               it->cur_tasks_head = &cset->mg_tasks;
+       } else {
                it->task_pos = cset->dying_tasks.next;
+               it->cur_tasks_head = &cset->dying_tasks;
+       }
 
        it->tasks_head = &cset->tasks;
        it->mg_tasks_head = &cset->mg_tasks;
@@ -4463,10 +4467,14 @@ repeat:
                else
                        it->task_pos = it->task_pos->next;
 
-               if (it->task_pos == it->tasks_head)
+               if (it->task_pos == it->tasks_head) {
                        it->task_pos = it->mg_tasks_head->next;
-               if (it->task_pos == it->mg_tasks_head)
+                       it->cur_tasks_head = it->mg_tasks_head;
+               }
+               if (it->task_pos == it->mg_tasks_head) {
                        it->task_pos = it->dying_tasks_head->next;
+                       it->cur_tasks_head = it->dying_tasks_head;
+               }
                if (it->task_pos == it->dying_tasks_head)
                        css_task_iter_advance_css_set(it);
        } else {
@@ -4485,11 +4493,12 @@ repeat:
                        goto repeat;
 
                /* and dying leaders w/o live member threads */
-               if (!atomic_read(&task->signal->live))
+               if (it->cur_tasks_head == it->dying_tasks_head &&
+                   !atomic_read(&task->signal->live))
                        goto repeat;
        } else {
                /* skip all dying ones */
-               if (task->flags & PF_EXITING)
+               if (it->cur_tasks_head == it->dying_tasks_head)
                        goto repeat;
        }
 }
@@ -4595,6 +4604,9 @@ static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos)
        struct kernfs_open_file *of = s->private;
        struct css_task_iter *it = of->priv;
 
+       if (pos)
+               (*pos)++;
+
        return css_task_iter_next(it);
 }
 
@@ -4610,7 +4622,7 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
         * from position 0, so we can simply keep iterating on !0 *pos.
         */
        if (!it) {
-               if (WARN_ON_ONCE((*pos)++))
+               if (WARN_ON_ONCE((*pos)))
                        return ERR_PTR(-EINVAL);
 
                it = kzalloc(sizeof(*it), GFP_KERNEL);
@@ -4618,10 +4630,11 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
                        return ERR_PTR(-ENOMEM);
                of->priv = it;
                css_task_iter_start(&cgrp->self, iter_flags, it);
-       } else if (!(*pos)++) {
+       } else if (!(*pos)) {
                css_task_iter_end(it);
                css_task_iter_start(&cgrp->self, iter_flags, it);
-       }
+       } else
+               return it->cur_task;
 
        return cgroup_procs_next(s, NULL, NULL);
 }
@@ -6258,6 +6271,10 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
                return;
        }
 
+       /* Don't associate the sock with unrelated interrupted task's cgroup. */
+       if (in_interrupt())
+               return;
+
        rcu_read_lock();
 
        while (true) {
index 8642530..d90af13 100644 (file)
@@ -397,8 +397,8 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
                mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
                                    THREAD_SIZE / 1024 * account);
 
-               mod_memcg_page_state(first_page, MEMCG_KERNEL_STACK_KB,
-                                    account * (THREAD_SIZE / 1024));
+               mod_memcg_obj_state(stack, MEMCG_KERNEL_STACK_KB,
+                                   account * (THREAD_SIZE / 1024));
        }
 }
 
index 0cf84c8..82dfacb 100644 (file)
@@ -385,9 +385,9 @@ static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
  */
 static struct futex_hash_bucket *hash_futex(union futex_key *key)
 {
-       u32 hash = jhash2((u32*)&key->both.word,
-                         (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
+       u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
                          key->both.offset);
+
        return &futex_queues[hash & (futex_hashsize - 1)];
 }
 
@@ -429,7 +429,7 @@ static void get_futex_key_refs(union futex_key *key)
 
        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
        case FUT_OFF_INODE:
-               ihold(key->shared.inode); /* implies smp_mb(); (B) */
+               smp_mb();               /* explicit smp_mb(); (B) */
                break;
        case FUT_OFF_MMSHARED:
                futex_get_mm(key); /* implies smp_mb(); (B) */
@@ -463,7 +463,6 @@ static void drop_futex_key_refs(union futex_key *key)
 
        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
        case FUT_OFF_INODE:
-               iput(key->shared.inode);
                break;
        case FUT_OFF_MMSHARED:
                mmdrop(key->private.mm);
@@ -505,6 +504,46 @@ futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
        return timeout;
 }
 
+/*
+ * Generate a machine wide unique identifier for this inode.
+ *
+ * This relies on u64 not wrapping in the life-time of the machine; which with
+ * 1ns resolution means almost 585 years.
+ *
+ * This further relies on the fact that a well formed program will not unmap
+ * the file while it has a (shared) futex waiting on it. This mapping will have
+ * a file reference which pins the mount and inode.
+ *
+ * If for some reason an inode gets evicted and read back in again, it will get
+ * a new sequence number and will _NOT_ match, even though it is the exact same
+ * file.
+ *
+ * It is important that match_futex() will never have a false-positive, esp.
+ * for PI futexes that can mess up the state. The above argues that false-negatives
+ * are only possible for malformed programs.
+ */
+static u64 get_inode_sequence_number(struct inode *inode)
+{
+       static atomic64_t i_seq;
+       u64 old;
+
+       /* Does the inode already have a sequence number? */
+       old = atomic64_read(&inode->i_sequence);
+       if (likely(old))
+               return old;
+
+       for (;;) {
+               u64 new = atomic64_add_return(1, &i_seq);
+               if (WARN_ON_ONCE(!new))
+                       continue;
+
+               old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
+               if (old)
+                       return old;
+               return new;
+       }
+}
+
 /**
  * get_futex_key() - Get parameters which are the keys for a futex
  * @uaddr:     virtual address of the futex
@@ -517,9 +556,15 @@ futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
  *
  * The key words are stored in @key on success.
  *
- * For shared mappings, it's (page->index, file_inode(vma->vm_file),
- * offset_within_page).  For private mappings, it's (uaddr, current->mm).
- * We can usually work out the index without swapping in the page.
+ * For shared mappings (when @fshared), the key is:
+ *   ( inode->i_sequence, page->index, offset_within_page )
+ * [ also see get_inode_sequence_number() ]
+ *
+ * For private mappings (or when !@fshared), the key is:
+ *   ( current->mm, address, 0 )
+ *
+ * This allows (cross process, where applicable) identification of the futex
+ * without keeping the page pinned for the duration of the FUTEX_WAIT.
  *
  * lock_page() might sleep, the caller should not hold a spinlock.
  */
@@ -659,8 +704,6 @@ again:
                key->private.mm = mm;
                key->private.address = address;
 
-               get_futex_key_refs(key); /* implies smp_mb(); (B) */
-
        } else {
                struct inode *inode;
 
@@ -692,40 +735,14 @@ again:
                        goto again;
                }
 
-               /*
-                * Take a reference unless it is about to be freed. Previously
-                * this reference was taken by ihold under the page lock
-                * pinning the inode in place so i_lock was unnecessary. The
-                * only way for this check to fail is if the inode was
-                * truncated in parallel which is almost certainly an
-                * application bug. In such a case, just retry.
-                *
-                * We are not calling into get_futex_key_refs() in file-backed
-                * cases, therefore a successful atomic_inc return below will
-                * guarantee that get_futex_key() will still imply smp_mb(); (B).
-                */
-               if (!atomic_inc_not_zero(&inode->i_count)) {
-                       rcu_read_unlock();
-                       put_page(page);
-
-                       goto again;
-               }
-
-               /* Should be impossible but lets be paranoid for now */
-               if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
-                       err = -EFAULT;
-                       rcu_read_unlock();
-                       iput(inode);
-
-                       goto out;
-               }
-
                key->both.offset |= FUT_OFF_INODE; /* inode-based key */
-               key->shared.inode = inode;
+               key->shared.i_seq = get_inode_sequence_number(inode);
                key->shared.pgoff = basepage_index(tail);
                rcu_read_unlock();
        }
 
+       get_futex_key_refs(key); /* implies smp_mb(); (B) */
+
 out:
        put_page(page);
        return err;
index 7eee98c..fe40c65 100644 (file)
@@ -323,7 +323,11 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
 
        if (desc->affinity_notify) {
                kref_get(&desc->affinity_notify->kref);
-               schedule_work(&desc->affinity_notify->work);
+               if (!schedule_work(&desc->affinity_notify->work)) {
+                       /* Work was already scheduled, drop our extra ref */
+                       kref_put(&desc->affinity_notify->kref,
+                                desc->affinity_notify->release);
+               }
        }
        irqd_set(data, IRQD_AFFINITY_SET);
 
@@ -423,7 +427,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 
        if (old_notify) {
-               cancel_work_sync(&old_notify->work);
+               if (cancel_work_sync(&old_notify->work)) {
+                       /* Pending work had a ref, put that one too */
+                       kref_put(&old_notify->kref, old_notify->release);
+               }
                kref_put(&old_notify->kref, old_notify->release);
        }
 
index 63d7501..5989bbb 100644 (file)
@@ -519,7 +519,7 @@ NOKPROBE_SYMBOL(notify_die);
 
 int register_die_notifier(struct notifier_block *nb)
 {
-       vmalloc_sync_all();
+       vmalloc_sync_mappings();
        return atomic_notifier_chain_register(&die_chain, nb);
 }
 EXPORT_SYMBOL_GPL(register_die_notifier);
index 0f4ecb5..647b4bb 100644 (file)
@@ -247,6 +247,16 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
                tmp = tmp->parent;
        }
 
+       /*
+        * ENOMEM is not the most obvious choice especially for the case
+        * where the child subreaper has already exited and the pid
+        * namespace denies the creation of any new processes. But ENOMEM
+        * is what we have exposed to userspace for a long time and it is
+        * documented behavior for pid namespaces. So we can't easily
+        * change it even if there were an error code better suited.
+        */
+       retval = -ENOMEM;
+
        if (unlikely(is_child_reaper(pid))) {
                if (pid_ns_prepare_proc(ns))
                        goto out_free;
index f9bc5c3..d325f3a 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/syscalls.h>
 #include <linux/kprobes.h>
 #include <linux/user_namespace.h>
+#include <linux/time_namespace.h>
 #include <linux/binfmts.h>
 
 #include <linux/sched.h>
@@ -2546,6 +2547,7 @@ static int do_sysinfo(struct sysinfo *info)
        memset(info, 0, sizeof(struct sysinfo));
 
        ktime_get_boottime_ts64(&tp);
+       timens_add_boottime(&tp);
        info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
 
        get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
index 19e793a..68250d4 100644 (file)
@@ -732,7 +732,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
        if (unlikely(!nmi_uaccess_okay()))
                return -EPERM;
 
-       if (in_nmi()) {
+       if (irqs_disabled()) {
                /* Do an early check on signal validity. Otherwise,
                 * the error is lost in deferred irq_work.
                 */
index 3f7ee10..fd81c7d 100644 (file)
@@ -1547,6 +1547,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
                rec = bsearch(&key, pg->records, pg->index,
                              sizeof(struct dyn_ftrace),
                              ftrace_cmp_recs);
+               if (rec)
+                       break;
        }
        return rec;
 }
index 301db44..4e01c44 100644 (file)
@@ -1411,14 +1411,16 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
                return;
        rcu_read_lock();
 retry:
-       if (req_cpu == WORK_CPU_UNBOUND)
-               cpu = wq_select_unbound_cpu(raw_smp_processor_id());
-
        /* pwq which will be used unless @work is executing elsewhere */
-       if (!(wq->flags & WQ_UNBOUND))
-               pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
-       else
+       if (wq->flags & WQ_UNBOUND) {
+               if (req_cpu == WORK_CPU_UNBOUND)
+                       cpu = wq_select_unbound_cpu(raw_smp_processor_id());
                pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
+       } else {
+               if (req_cpu == WORK_CPU_UNBOUND)
+                       cpu = raw_smp_processor_id();
+               pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
+       }
 
        /*
         * If @work was previously on a different pool, it might still be
index c391a91..fa43ded 100644 (file)
@@ -9028,10 +9028,15 @@ bool __init chacha20poly1305_selftest(void)
             && total_len <= 1 << 10; ++total_len) {
                for (i = 0; i <= total_len; ++i) {
                        for (j = i; j <= total_len; ++j) {
+                               k = 0;
                                sg_init_table(sg_src, 3);
-                               sg_set_buf(&sg_src[0], input, i);
-                               sg_set_buf(&sg_src[1], input + i, j - i);
-                               sg_set_buf(&sg_src[2], input + j, total_len - j);
+                               if (i)
+                                       sg_set_buf(&sg_src[k++], input, i);
+                               if (j - i)
+                                       sg_set_buf(&sg_src[k++], input + i, j - i);
+                               if (total_len - j)
+                                       sg_set_buf(&sg_src[k++], input + j, total_len - j);
+                               sg_init_marker(sg_src, k);
                                memset(computed_output, 0, total_len);
                                memset(input, 0, total_len);
 
index e434b05..5280bcf 100644 (file)
@@ -240,8 +240,7 @@ again:
        if (!page_counter_try_charge(&h_cg->hugepage[idx], nr_pages,
                                     &counter)) {
                ret = -ENOMEM;
-               hugetlb_event(hugetlb_cgroup_from_counter(counter, idx), idx,
-                             HUGETLB_MAX);
+               hugetlb_event(h_cg, idx, HUGETLB_MAX);
        }
        css_put(&h_cg->css);
 done:
index 43b47d3..4bb30ed 100644 (file)
@@ -335,12 +335,14 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
                }
 
                page = pmd_page(orig_pmd);
+
+               /* Do not interfere with other mappings of this page */
+               if (page_mapcount(page) != 1)
+                       goto huge_unlock;
+
                if (next - addr != HPAGE_PMD_SIZE) {
                        int err;
 
-                       if (page_mapcount(page) != 1)
-                               goto huge_unlock;
-
                        get_page(page);
                        spin_unlock(ptl);
                        lock_page(page);
@@ -426,6 +428,10 @@ regular_page:
                        continue;
                }
 
+               /* Do not interfere with other mappings of this page */
+               if (page_mapcount(page) != 1)
+                       continue;
+
                VM_BUG_ON_PAGE(PageTransCompound(page), page);
 
                if (pte_young(ptent)) {
index d09776c..7ddf91c 100644 (file)
@@ -777,6 +777,17 @@ void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
        rcu_read_unlock();
 }
 
+void mod_memcg_obj_state(void *p, int idx, int val)
+{
+       struct mem_cgroup *memcg;
+
+       rcu_read_lock();
+       memcg = mem_cgroup_from_obj(p);
+       if (memcg)
+               mod_memcg_state(memcg, idx, val);
+       rcu_read_unlock();
+}
+
 /**
  * __count_memcg_events - account VM events in a cgroup
  * @memcg: the memory cgroup
@@ -2297,28 +2308,41 @@ static void high_work_func(struct work_struct *work)
  #define MEMCG_DELAY_SCALING_SHIFT 14
 
 /*
- * Scheduled by try_charge() to be executed from the userland return path
- * and reclaims memory over the high limit.
+ * Get the number of jiffies that we should penalise a mischievous cgroup which
+ * is exceeding its memory.high by checking both it and its ancestors.
  */
-void mem_cgroup_handle_over_high(void)
+static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
+                                         unsigned int nr_pages)
 {
-       unsigned long usage, high, clamped_high;
-       unsigned long pflags;
-       unsigned long penalty_jiffies, overage;
-       unsigned int nr_pages = current->memcg_nr_pages_over_high;
-       struct mem_cgroup *memcg;
+       unsigned long penalty_jiffies;
+       u64 max_overage = 0;
 
-       if (likely(!nr_pages))
-               return;
+       do {
+               unsigned long usage, high;
+               u64 overage;
 
-       memcg = get_mem_cgroup_from_mm(current->mm);
-       reclaim_high(memcg, nr_pages, GFP_KERNEL);
-       current->memcg_nr_pages_over_high = 0;
+               usage = page_counter_read(&memcg->memory);
+               high = READ_ONCE(memcg->high);
+
+               /*
+                * Prevent division by 0 in overage calculation by acting as if
+                * it was a threshold of 1 page
+                */
+               high = max(high, 1UL);
+
+               overage = usage - high;
+               overage <<= MEMCG_DELAY_PRECISION_SHIFT;
+               overage = div64_u64(overage, high);
+
+               if (overage > max_overage)
+                       max_overage = overage;
+       } while ((memcg = parent_mem_cgroup(memcg)) &&
+                !mem_cgroup_is_root(memcg));
+
+       if (!max_overage)
+               return 0;
 
        /*
-        * memory.high is breached and reclaim is unable to keep up. Throttle
-        * allocators proactively to slow down excessive growth.
-        *
         * We use overage compared to memory.high to calculate the number of
         * jiffies to sleep (penalty_jiffies). Ideally this value should be
         * fairly lenient on small overages, and increasingly harsh when the
@@ -2326,24 +2350,9 @@ void mem_cgroup_handle_over_high(void)
         * its crazy behaviour, so we exponentially increase the delay based on
         * overage amount.
         */
-
-       usage = page_counter_read(&memcg->memory);
-       high = READ_ONCE(memcg->high);
-
-       if (usage <= high)
-               goto out;
-
-       /*
-        * Prevent division by 0 in overage calculation by acting as if it was a
-        * threshold of 1 page
-        */
-       clamped_high = max(high, 1UL);
-
-       overage = div_u64((u64)(usage - high) << MEMCG_DELAY_PRECISION_SHIFT,
-                         clamped_high);
-
-       penalty_jiffies = ((u64)overage * overage * HZ)
-               >> (MEMCG_DELAY_PRECISION_SHIFT + MEMCG_DELAY_SCALING_SHIFT);
+       penalty_jiffies = max_overage * max_overage * HZ;
+       penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
+       penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
 
        /*
         * Factor in the task's own contribution to the overage, such that four
@@ -2360,7 +2369,32 @@ void mem_cgroup_handle_over_high(void)
         * application moving forwards and also permit diagnostics, albeit
         * extremely slowly.
         */
-       penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
+       return min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
+}
+
+/*
+ * Scheduled by try_charge() to be executed from the userland return path
+ * and reclaims memory over the high limit.
+ */
+void mem_cgroup_handle_over_high(void)
+{
+       unsigned long penalty_jiffies;
+       unsigned long pflags;
+       unsigned int nr_pages = current->memcg_nr_pages_over_high;
+       struct mem_cgroup *memcg;
+
+       if (likely(!nr_pages))
+               return;
+
+       memcg = get_mem_cgroup_from_mm(current->mm);
+       reclaim_high(memcg, nr_pages, GFP_KERNEL);
+       current->memcg_nr_pages_over_high = 0;
+
+       /*
+        * memory.high is breached and reclaim is unable to keep up. Throttle
+        * allocators proactively to slow down excessive growth.
+        */
+       penalty_jiffies = calculate_high_delay(memcg, nr_pages);
 
        /*
         * Don't sleep if the amount of jiffies this memcg owes us is so low
@@ -2638,6 +2672,33 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
 }
 
 #ifdef CONFIG_MEMCG_KMEM
+/*
+ * Returns a pointer to the memory cgroup to which the kernel object is charged.
+ *
+ * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
+ * cgroup_mutex, etc.
+ */
+struct mem_cgroup *mem_cgroup_from_obj(void *p)
+{
+       struct page *page;
+
+       if (mem_cgroup_disabled())
+               return NULL;
+
+       page = virt_to_head_page(p);
+
+       /*
+        * Slab pages don't have page->mem_cgroup set because corresponding
+        * kmem caches can be reparented during the lifetime. That's why
+        * memcg_from_slab_page() should be used instead.
+        */
+       if (PageSlab(page))
+               return memcg_from_slab_page(page);
+
+       /* All other pages use page->mem_cgroup */
+       return page->mem_cgroup;
+}
+
 static int memcg_alloc_cache_id(void)
 {
        int id, size;
@@ -4027,7 +4088,7 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
        struct mem_cgroup_thresholds *thresholds;
        struct mem_cgroup_threshold_ary *new;
        unsigned long usage;
-       int i, j, size;
+       int i, j, size, entries;
 
        mutex_lock(&memcg->thresholds_lock);
 
@@ -4047,14 +4108,20 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
        __mem_cgroup_threshold(memcg, type == _MEMSWAP);
 
        /* Calculate new number of threshold */
-       size = 0;
+       size = entries = 0;
        for (i = 0; i < thresholds->primary->size; i++) {
                if (thresholds->primary->entries[i].eventfd != eventfd)
                        size++;
+               else
+                       entries++;
        }
 
        new = thresholds->spare;
 
+       /* If no items related to eventfd have been cleared, nothing to do */
+       if (!entries)
+               goto unlock;
+
        /* Set thresholds array to NULL if we don't have thresholds */
        if (!size) {
                kfree(new);
@@ -6682,19 +6749,9 @@ void mem_cgroup_sk_alloc(struct sock *sk)
        if (!mem_cgroup_sockets_enabled)
                return;
 
-       /*
-        * Socket cloning can throw us here with sk_memcg already
-        * filled. It won't however, necessarily happen from
-        * process context. So the test for root memcg given
-        * the current task's memcg won't help us in this case.
-        *
-        * Respecting the original socket's memcg is a better
-        * decision in this case.
-        */
-       if (sk->sk_memcg) {
-               css_get(&sk->sk_memcg->css);
+       /* Do not associate the sock with unrelated interrupted task's memcg. */
+       if (in_interrupt())
                return;
-       }
 
        rcu_read_lock();
        memcg = mem_cgroup_from_task(current);
index ef3973a..06852b8 100644 (file)
@@ -307,7 +307,8 @@ static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
         * ->release returns.
         */
        id = srcu_read_lock(&srcu);
-       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist)
+       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu))
                /*
                 * If ->release runs before mmu_notifier_unregister it must be
                 * handled, as it's the only way for the driver to flush all
@@ -370,7 +371,8 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
 
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                if (subscription->ops->clear_flush_young)
                        young |= subscription->ops->clear_flush_young(
                                subscription, mm, start, end);
@@ -389,7 +391,8 @@ int __mmu_notifier_clear_young(struct mm_struct *mm,
 
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                if (subscription->ops->clear_young)
                        young |= subscription->ops->clear_young(subscription,
                                                                mm, start, end);
@@ -407,7 +410,8 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
 
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                if (subscription->ops->test_young) {
                        young = subscription->ops->test_young(subscription, mm,
                                                              address);
@@ -428,7 +432,8 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
 
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                if (subscription->ops->change_pte)
                        subscription->ops->change_pte(subscription, mm, address,
                                                      pte);
@@ -476,7 +481,8 @@ static int mn_hlist_invalidate_range_start(
        int id;
 
        id = srcu_read_lock(&srcu);
-       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist) {
+       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                const struct mmu_notifier_ops *ops = subscription->ops;
 
                if (ops->invalidate_range_start) {
@@ -528,7 +534,8 @@ mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
        int id;
 
        id = srcu_read_lock(&srcu);
-       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist) {
+       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                /*
                 * Call invalidate_range here too to avoid the need for the
                 * subsystem of having to register an invalidate_range_end
@@ -582,7 +589,8 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm,
 
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                if (subscription->ops->invalidate_range)
                        subscription->ops->invalidate_range(subscription, mm,
                                                            start, end);
@@ -714,7 +722,8 @@ find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
 
        spin_lock(&mm->notifier_subscriptions->lock);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                lockdep_is_held(&mm->notifier_subscriptions->lock)) {
                if (subscription->ops != ops)
                        continue;
 
index bd2b4e5..318df4e 100644 (file)
@@ -370,10 +370,14 @@ void vm_unmap_aliases(void)
 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
 
 /*
- * Implement a stub for vmalloc_sync_all() if the architecture chose not to
- * have one.
+ * Implement a stub for vmalloc_sync_[un]mapping() if the architecture
+ * chose not to have one.
  */
-void __weak vmalloc_sync_all(void)
+void __weak vmalloc_sync_mappings(void)
+{
+}
+
+void __weak vmalloc_sync_unmappings(void)
 {
 }
 
index 17dc00e..6589b41 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1973,8 +1973,6 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
 
        if (node == NUMA_NO_NODE)
                searchnode = numa_mem_id();
-       else if (!node_present_pages(node))
-               searchnode = node_to_mem_node(node);
 
        object = get_partial_node(s, get_node(s, searchnode), c, flags);
        if (object || node != NUMA_NO_NODE)
@@ -2563,17 +2561,27 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
        struct page *page;
 
        page = c->page;
-       if (!page)
+       if (!page) {
+               /*
+                * if the node is not online or has no normal memory, just
+                * ignore the node constraint
+                */
+               if (unlikely(node != NUMA_NO_NODE &&
+                            !node_state(node, N_NORMAL_MEMORY)))
+                       node = NUMA_NO_NODE;
                goto new_slab;
+       }
 redo:
 
        if (unlikely(!node_match(page, node))) {
-               int searchnode = node;
-
-               if (node != NUMA_NO_NODE && !node_present_pages(node))
-                       searchnode = node_to_mem_node(node);
-
-               if (unlikely(!node_match(page, searchnode))) {
+               /*
+                * same as above but node_match() being false already
+                * implies node != NUMA_NO_NODE
+                */
+               if (!node_state(node, N_NORMAL_MEMORY)) {
+                       node = NUMA_NO_NODE;
+                       goto redo;
+               } else {
                        stat(s, ALLOC_NODE_MISMATCH);
                        deactivate_slab(s, page, c->freelist, c);
                        goto new_slab;
@@ -2997,11 +3005,13 @@ redo:
        barrier();
 
        if (likely(page == c->page)) {
-               set_freepointer(s, tail_obj, c->freelist);
+               void **freelist = READ_ONCE(c->freelist);
+
+               set_freepointer(s, tail_obj, freelist);
 
                if (unlikely(!this_cpu_cmpxchg_double(
                                s->cpu_slab->freelist, s->cpu_slab->tid,
-                               c->freelist, tid,
+                               freelist, tid,
                                head, next_tid(tid)))) {
 
                        note_cmpxchg_failure("slab_free", s, tid);
@@ -3175,6 +3185,15 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 
                if (unlikely(!object)) {
                        /*
+                        * We may have removed an object from c->freelist using
+                        * the fastpath in the previous iteration; in that case,
+                        * c->tid has not been bumped yet.
+                        * Since ___slab_alloc() may reenable interrupts while
+                        * allocating memory, we should bump c->tid now.
+                        */
+                       c->tid = next_tid(c->tid);
+
+                       /*
                         * Invoking slow path likely have side-effect
                         * of re-populating per CPU c->freelist
                         */
index 596b2a4..65599e8 100644 (file)
@@ -734,6 +734,7 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
        struct mem_section *ms = __pfn_to_section(pfn);
        bool section_is_early = early_section(ms);
        struct page *memmap = NULL;
+       bool empty;
        unsigned long *subsection_map = ms->usage
                ? &ms->usage->subsection_map[0] : NULL;
 
@@ -764,7 +765,8 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
         * For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified
         */
        bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
-       if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION)) {
+       empty = bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION);
+       if (empty) {
                unsigned long section_nr = pfn_to_section_nr(pfn);
 
                /*
@@ -779,13 +781,21 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
                        ms->usage = NULL;
                }
                memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
-               ms->section_mem_map = (unsigned long)NULL;
+               /*
+                * Mark the section invalid so that valid_section()
+                * return false. This prevents code from dereferencing
+                * ms->usage array.
+                */
+               ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
        }
 
        if (section_is_early && memmap)
                free_map_bootmem(memmap);
        else
                depopulate_section_memmap(pfn, nr_pages, altmap);
+
+       if (empty)
+               ms->section_mem_map = (unsigned long)NULL;
 }
 
 static struct page * __meminit section_activate(int nid, unsigned long pfn,
index b2a2e45..be33e61 100644 (file)
@@ -2899,10 +2899,6 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
                p->bdev = inode->i_sb->s_bdev;
        }
 
-       inode_lock(inode);
-       if (IS_SWAPFILE(inode))
-               return -EBUSY;
-
        return 0;
 }
 
@@ -3157,36 +3153,41 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
        mapping = swap_file->f_mapping;
        inode = mapping->host;
 
-       /* will take i_rwsem; */
        error = claim_swapfile(p, inode);
        if (unlikely(error))
                goto bad_swap;
 
+       inode_lock(inode);
+       if (IS_SWAPFILE(inode)) {
+               error = -EBUSY;
+               goto bad_swap_unlock_inode;
+       }
+
        /*
         * Read the swap header.
         */
        if (!mapping->a_ops->readpage) {
                error = -EINVAL;
-               goto bad_swap;
+               goto bad_swap_unlock_inode;
        }
        page = read_mapping_page(mapping, 0, swap_file);
        if (IS_ERR(page)) {
                error = PTR_ERR(page);
-               goto bad_swap;
+               goto bad_swap_unlock_inode;
        }
        swap_header = kmap(page);
 
        maxpages = read_swap_header(p, swap_header, inode);
        if (unlikely(!maxpages)) {
                error = -EINVAL;
-               goto bad_swap;
+               goto bad_swap_unlock_inode;
        }
 
        /* OK, set up the swap map and apply the bad block list */
        swap_map = vzalloc(maxpages);
        if (!swap_map) {
                error = -ENOMEM;
-               goto bad_swap;
+               goto bad_swap_unlock_inode;
        }
 
        if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
@@ -3211,7 +3212,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
                                        GFP_KERNEL);
                if (!cluster_info) {
                        error = -ENOMEM;
-                       goto bad_swap;
+                       goto bad_swap_unlock_inode;
                }
 
                for (ci = 0; ci < nr_cluster; ci++)
@@ -3220,7 +3221,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
                p->percpu_cluster = alloc_percpu(struct percpu_cluster);
                if (!p->percpu_cluster) {
                        error = -ENOMEM;
-                       goto bad_swap;
+                       goto bad_swap_unlock_inode;
                }
                for_each_possible_cpu(cpu) {
                        struct percpu_cluster *cluster;
@@ -3234,13 +3235,13 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
 
        error = swap_cgroup_swapon(p->type, maxpages);
        if (error)
-               goto bad_swap;
+               goto bad_swap_unlock_inode;
 
        nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
                cluster_info, maxpages, &span);
        if (unlikely(nr_extents < 0)) {
                error = nr_extents;
-               goto bad_swap;
+               goto bad_swap_unlock_inode;
        }
        /* frontswap enabled? set up bit-per-page map for frontswap */
        if (IS_ENABLED(CONFIG_FRONTSWAP))
@@ -3280,7 +3281,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
 
        error = init_swap_address_space(p->type, maxpages);
        if (error)
-               goto bad_swap;
+               goto bad_swap_unlock_inode;
 
        /*
         * Flush any pending IO and dirty mappings before we start using this
@@ -3290,7 +3291,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
        error = inode_drain_writes(inode);
        if (error) {
                inode->i_flags &= ~S_SWAPFILE;
-               goto bad_swap;
+               goto bad_swap_unlock_inode;
        }
 
        mutex_lock(&swapon_mutex);
@@ -3315,6 +3316,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
 
        error = 0;
        goto out;
+bad_swap_unlock_inode:
+       inode_unlock(inode);
 bad_swap:
        free_percpu(p->percpu_cluster);
        p->percpu_cluster = NULL;
@@ -3322,6 +3325,7 @@ bad_swap:
                set_blocksize(p->bdev, p->old_block_size);
                blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
        }
+       inode = NULL;
        destroy_swap_extents(p);
        swap_cgroup_swapoff(p->type);
        spin_lock(&swap_lock);
@@ -3333,13 +3337,8 @@ bad_swap:
        kvfree(frontswap_map);
        if (inced_nr_rotate_swap)
                atomic_dec(&nr_rotate_swap);
-       if (swap_file) {
-               if (inode) {
-                       inode_unlock(inode);
-                       inode = NULL;
-               }
+       if (swap_file)
                filp_close(swap_file, NULL);
-       }
 out:
        if (page && !IS_ERR(page)) {
                kunmap(page);
index 1f46c3b..6b8eeb0 100644 (file)
@@ -1295,7 +1295,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
         * First make sure the mappings are removed from all page-tables
         * before they are freed.
         */
-       vmalloc_sync_all();
+       vmalloc_sync_unmappings();
 
        /*
         * TODO: to calculate a flush range without looping.
@@ -3128,16 +3128,19 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
 EXPORT_SYMBOL(remap_vmalloc_range);
 
 /*
- * Implement a stub for vmalloc_sync_all() if the architecture chose not to
- * have one.
+ * Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose
+ * not to have one.
  *
  * The purpose of this function is to make sure the vmalloc area
  * mappings are identical in all page-tables in the system.
  */
-void __weak vmalloc_sync_all(void)
+void __weak vmalloc_sync_mappings(void)
 {
 }
 
+void __weak vmalloc_sync_unmappings(void)
+{
+}
 
 static int f(pte_t *pte, unsigned long addr, void *data)
 {
index 2eeb0e5..df8d8c9 100644 (file)
@@ -52,6 +52,9 @@ config NET_INGRESS
 config NET_EGRESS
        bool
 
+config NET_REDIRECT
+       bool
+
 config SKB_EXTENSIONS
        bool
 
index f020950..a7c8dd7 100644 (file)
@@ -789,6 +789,10 @@ static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
 
        lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
 
+       /* interface already disabled by batadv_iv_ogm_iface_disable */
+       if (!*ogm_buff)
+               return;
+
        /* the interface gets activated here to avoid race conditions between
         * the moment of activating the interface in
         * hardif_activate_interface() where the originator mac is set and
index 77396a0..efea487 100644 (file)
@@ -10,7 +10,7 @@
 #include <asm/unistd.h>
 #include "msgfmt.h"
 
-int debug_fd;
+FILE *debug_f;
 
 static int handle_get_cmd(struct mbox_request *cmd)
 {
@@ -35,9 +35,10 @@ static void loop(void)
                struct mbox_reply reply;
                int n;
 
+               fprintf(debug_f, "testing the buffer\n");
                n = read(0, &req, sizeof(req));
                if (n != sizeof(req)) {
-                       dprintf(debug_fd, "invalid request %d\n", n);
+                       fprintf(debug_f, "invalid request %d\n", n);
                        return;
                }
 
@@ -47,7 +48,7 @@ static void loop(void)
 
                n = write(1, &reply, sizeof(reply));
                if (n != sizeof(reply)) {
-                       dprintf(debug_fd, "reply failed %d\n", n);
+                       fprintf(debug_f, "reply failed %d\n", n);
                        return;
                }
        }
@@ -55,9 +56,10 @@ static void loop(void)
 
 int main(void)
 {
-       debug_fd = open("/dev/kmsg", 00000002);
-       dprintf(debug_fd, "Started bpfilter\n");
+       debug_f = fopen("/dev/kmsg", "w");
+       setvbuf(debug_f, 0, _IOLBF, 0);
+       fprintf(debug_f, "Started bpfilter\n");
        loop();
-       close(debug_fd);
+       fclose(debug_f);
        return 0;
 }
index 03c7cdd..195d2d6 100644 (file)
@@ -112,7 +112,8 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
            caif_device_list(dev_net(dev));
        struct caif_device_entry *caifd;
 
-       list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
+       list_for_each_entry_rcu(caifd, &caifdevs->list, list,
+                               lockdep_rtnl_is_held()) {
                if (caifd->netdev == dev)
                        return caifd;
        }
index 5b4bd82..f8ca5ed 100644 (file)
@@ -3248,12 +3248,16 @@ static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg)
 
 static void ceph_msg_data_destroy(struct ceph_msg_data *data)
 {
-       if (data->type == CEPH_MSG_DATA_PAGELIST)
+       if (data->type == CEPH_MSG_DATA_PAGES && data->own_pages) {
+               int num_pages = calc_pages_for(data->alignment, data->length);
+               ceph_release_page_vector(data->pages, num_pages);
+       } else if (data->type == CEPH_MSG_DATA_PAGELIST) {
                ceph_pagelist_release(data->pagelist);
+       }
 }
 
 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
-               size_t length, size_t alignment)
+                            size_t length, size_t alignment, bool own_pages)
 {
        struct ceph_msg_data *data;
 
@@ -3265,6 +3269,7 @@ void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
        data->pages = pages;
        data->length = length;
        data->alignment = alignment & ~PAGE_MASK;
+       data->own_pages = own_pages;
 
        msg->data_length += length;
 }
index b68b376..af868d3 100644 (file)
@@ -962,7 +962,7 @@ static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
                BUG_ON(length > (u64) SIZE_MAX);
                if (length)
                        ceph_msg_data_add_pages(msg, osd_data->pages,
-                                       length, osd_data->alignment);
+                                       length, osd_data->alignment, false);
        } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
                BUG_ON(!length);
                ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
@@ -4436,9 +4436,7 @@ static void handle_watch_notify(struct ceph_osd_client *osdc,
                                                        CEPH_MSG_DATA_PAGES);
                                        *lreq->preply_pages = data->pages;
                                        *lreq->preply_len = data->length;
-                               } else {
-                                       ceph_release_page_vector(data->pages,
-                                              calc_pages_for(0, data->length));
+                                       data->own_pages = false;
                                }
                        }
                        lreq->notify_finish_error = return_code;
@@ -5506,9 +5504,6 @@ out_unlock_osdc:
        return m;
 }
 
-/*
- * TODO: switch to a msg-owned pagelist
- */
 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
 {
        struct ceph_msg *m;
@@ -5522,7 +5517,6 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
 
        if (data_len) {
                struct page **pages;
-               struct ceph_osd_data osd_data;
 
                pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
                                               GFP_NOIO);
@@ -5531,9 +5525,7 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
                        return NULL;
                }
 
-               ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
-                                        false);
-               ceph_osdc_msg_data_add(m, &osd_data);
+               ceph_msg_data_add_pages(m, pages, data_len, 0, true);
        }
 
        return m;
index 4e0de14..2a6e63a 100644 (file)
@@ -710,6 +710,15 @@ int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
 }
 EXPORT_SYMBOL(ceph_pg_poolid_by_name);
 
+u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id)
+{
+       struct ceph_pg_pool_info *pi;
+
+       pi = __lookup_pg_pool(&map->pg_pools, id);
+       return pi ? pi->flags : 0;
+}
+EXPORT_SYMBOL(ceph_pg_pool_flags);
+
 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
 {
        rb_erase(&pi->node, root);
index c6c985f..500bba8 100644 (file)
@@ -4516,7 +4516,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
        /* Reinjected packets coming from act_mirred or similar should
         * not get XDP generic processing.
         */
-       if (skb_is_tc_redirected(skb))
+       if (skb_is_redirected(skb))
                return XDP_PASS;
 
        /* XDP packets must be linear and must have sufficient headroom
@@ -5063,7 +5063,7 @@ skip_taps:
                        goto out;
        }
 #endif
-       skb_reset_tc(skb);
+       skb_reset_redirect(skb);
 skip_classify:
        if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
                goto drop;
@@ -5195,7 +5195,7 @@ static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
  *
  *     More direct receive version of netif_receive_skb().  It should
  *     only be used by callers that have a need to skip RPS and Generic XDP.
- *     Caller must also take care of handling if (page_is_)pfmemalloc.
+ *     Caller must also take care of handling if ``(page_is_)pfmemalloc``.
  *
  *     This function may only be called from softirq context and interrupts
  *     should be enabled.
index 5e22080..b831c55 100644 (file)
@@ -3352,34 +3352,41 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
                                  struct genl_info *info,
                                  union devlink_param_value *value)
 {
+       struct nlattr *param_data;
        int len;
 
-       if (param->type != DEVLINK_PARAM_TYPE_BOOL &&
-           !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])
+       param_data = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA];
+
+       if (param->type != DEVLINK_PARAM_TYPE_BOOL && !param_data)
                return -EINVAL;
 
        switch (param->type) {
        case DEVLINK_PARAM_TYPE_U8:
-               value->vu8 = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+               if (nla_len(param_data) != sizeof(u8))
+                       return -EINVAL;
+               value->vu8 = nla_get_u8(param_data);
                break;
        case DEVLINK_PARAM_TYPE_U16:
-               value->vu16 = nla_get_u16(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+               if (nla_len(param_data) != sizeof(u16))
+                       return -EINVAL;
+               value->vu16 = nla_get_u16(param_data);
                break;
        case DEVLINK_PARAM_TYPE_U32:
-               value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+               if (nla_len(param_data) != sizeof(u32))
+                       return -EINVAL;
+               value->vu32 = nla_get_u32(param_data);
                break;
        case DEVLINK_PARAM_TYPE_STRING:
-               len = strnlen(nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]),
-                             nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
-               if (len == nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) ||
+               len = strnlen(nla_data(param_data), nla_len(param_data));
+               if (len == nla_len(param_data) ||
                    len >= __DEVLINK_PARAM_MAX_STRING_VALUE)
                        return -EINVAL;
-               strcpy(value->vstr,
-                      nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
+               strcpy(value->vstr, nla_data(param_data));
                break;
        case DEVLINK_PARAM_TYPE_BOOL:
-               value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ?
-                              true : false;
+               if (param_data && nla_len(param_data))
+                       return -EINVAL;
+               value->vbool = nla_get_flag(param_data);
                break;
        }
        return 0;
@@ -5951,6 +5958,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
        [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 },
        [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING },
        [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 },
+       [DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .type = NLA_U64 },
+       [DEVLINK_ATTR_REGION_CHUNK_LEN] = { .type = NLA_U64 },
        [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING },
        [DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 },
        [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 },
index 0642f91..b4c87fe 100644 (file)
@@ -53,30 +53,60 @@ static void cgrp_css_free(struct cgroup_subsys_state *css)
        kfree(css_cls_state(css));
 }
 
+/*
+ * To avoid freezing of sockets creation for tasks with big number of threads
+ * and opened sockets lets release file_lock every 1000 iterated descriptors.
+ * New sockets will already have been created with new classid.
+ */
+
+struct update_classid_context {
+       u32 classid;
+       unsigned int batch;
+};
+
+#define UPDATE_CLASSID_BATCH 1000
+
 static int update_classid_sock(const void *v, struct file *file, unsigned n)
 {
        int err;
+       struct update_classid_context *ctx = (void *)v;
        struct socket *sock = sock_from_file(file, &err);
 
        if (sock) {
                spin_lock(&cgroup_sk_update_lock);
-               sock_cgroup_set_classid(&sock->sk->sk_cgrp_data,
-                                       (unsigned long)v);
+               sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid);
                spin_unlock(&cgroup_sk_update_lock);
        }
+       if (--ctx->batch == 0) {
+               ctx->batch = UPDATE_CLASSID_BATCH;
+               return n + 1;
+       }
        return 0;
 }
 
+static void update_classid_task(struct task_struct *p, u32 classid)
+{
+       struct update_classid_context ctx = {
+               .classid = classid,
+               .batch = UPDATE_CLASSID_BATCH
+       };
+       unsigned int fd = 0;
+
+       do {
+               task_lock(p);
+               fd = iterate_fd(p->files, fd, update_classid_sock, &ctx);
+               task_unlock(p);
+               cond_resched();
+       } while (fd);
+}
+
 static void cgrp_attach(struct cgroup_taskset *tset)
 {
        struct cgroup_subsys_state *css;
        struct task_struct *p;
 
        cgroup_taskset_for_each(p, css, tset) {
-               task_lock(p);
-               iterate_fd(p->files, 0, update_classid_sock,
-                          (void *)(unsigned long)css_cls_state(css)->classid);
-               task_unlock(p);
+               update_classid_task(p, css_cls_state(css)->classid);
        }
 }
 
@@ -98,10 +128,7 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
 
        css_task_iter_start(css, 0, &it);
        while ((p = css_task_iter_next(&it))) {
-               task_lock(p);
-               iterate_fd(p->files, 0, update_classid_sock,
-                          (void *)(unsigned long)cs->classid);
-               task_unlock(p);
+               update_classid_task(p, cs->classid);
                cond_resched();
        }
        css_task_iter_end(&it);
index acc849d..d0641bb 100644 (file)
@@ -3362,7 +3362,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
                        /* skb was 'freed' by stack, so clean few
                         * bits and reuse it
                         */
-                       skb_reset_tc(skb);
+                       skb_reset_redirect(skb);
                } while (--burst > 0);
                goto out; /* Skips xmit_mode M_START_XMIT */
        } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
index a4c8fac..8f71684 100644 (file)
@@ -1830,7 +1830,10 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                atomic_set(&newsk->sk_zckey, 0);
 
                sock_reset_flag(newsk, SOCK_DONE);
-               mem_cgroup_sk_alloc(newsk);
+
+               /* sk->sk_memcg will be populated at accept() time */
+               newsk->sk_memcg = NULL;
+
                cgroup_sk_alloc(&newsk->sk_cgrp_data);
 
                rcu_read_lock();
index 085cef5..b70c844 100644 (file)
@@ -233,8 +233,11 @@ static void sock_map_free(struct bpf_map *map)
        struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
        int i;
 
+       /* After the sync no updates or deletes will be in-flight so it
+        * is safe to walk map and remove entries without risking a race
+        * in EEXIST update case.
+        */
        synchronize_rcu();
-       raw_spin_lock_bh(&stab->lock);
        for (i = 0; i < stab->map.max_entries; i++) {
                struct sock **psk = &stab->sks[i];
                struct sock *sk;
@@ -248,7 +251,6 @@ static void sock_map_free(struct bpf_map *map)
                        release_sock(sk);
                }
        }
-       raw_spin_unlock_bh(&stab->lock);
 
        /* wait for psock readers accessing its map link */
        synchronize_rcu();
@@ -863,10 +865,13 @@ static void sock_hash_free(struct bpf_map *map)
        struct hlist_node *node;
        int i;
 
+       /* After the sync no updates or deletes will be in-flight so it
+        * is safe to walk map and remove entries without risking a race
+        * in EEXIST update case.
+        */
        synchronize_rcu();
        for (i = 0; i < htab->buckets_num; i++) {
                bucket = sock_hash_select_bucket(htab, i);
-               raw_spin_lock_bh(&bucket->lock);
                hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
                        hlist_del_rcu(&elem->node);
                        lock_sock(elem->sk);
@@ -875,7 +880,6 @@ static void sock_hash_free(struct bpf_map *map)
                        rcu_read_unlock();
                        release_sock(elem->sk);
                }
-               raw_spin_unlock_bh(&bucket->lock);
        }
 
        /* wait for psock readers accessing its map link */
index a7662e7..760e6ea 100644 (file)
@@ -117,7 +117,9 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
 /* port.c */
 int dsa_port_set_state(struct dsa_port *dp, u8 state,
                       struct switchdev_trans *trans);
+int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
+void dsa_port_disable_rt(struct dsa_port *dp);
 void dsa_port_disable(struct dsa_port *dp);
 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br);
 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
index 774facb..ec13dc6 100644 (file)
@@ -63,7 +63,7 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
                pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
 }
 
-int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
+int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
 {
        struct dsa_switch *ds = dp->ds;
        int port = dp->index;
@@ -78,14 +78,31 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
        if (!dp->bridge_dev)
                dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
 
+       if (dp->pl)
+               phylink_start(dp->pl);
+
        return 0;
 }
 
-void dsa_port_disable(struct dsa_port *dp)
+int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
+{
+       int err;
+
+       rtnl_lock();
+       err = dsa_port_enable_rt(dp, phy);
+       rtnl_unlock();
+
+       return err;
+}
+
+void dsa_port_disable_rt(struct dsa_port *dp)
 {
        struct dsa_switch *ds = dp->ds;
        int port = dp->index;
 
+       if (dp->pl)
+               phylink_stop(dp->pl);
+
        if (!dp->bridge_dev)
                dsa_port_set_state_now(dp, BR_STATE_DISABLED);
 
@@ -93,6 +110,13 @@ void dsa_port_disable(struct dsa_port *dp)
                ds->ops->port_disable(ds, port);
 }
 
+void dsa_port_disable(struct dsa_port *dp)
+{
+       rtnl_lock();
+       dsa_port_disable_rt(dp);
+       rtnl_unlock();
+}
+
 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
 {
        struct dsa_notifier_bridge_info info = {
@@ -614,10 +638,6 @@ static int dsa_port_phylink_register(struct dsa_port *dp)
                goto err_phy_connect;
        }
 
-       rtnl_lock();
-       phylink_start(dp->pl);
-       rtnl_unlock();
-
        return 0;
 
 err_phy_connect:
@@ -628,9 +648,14 @@ err_phy_connect:
 int dsa_port_link_register_of(struct dsa_port *dp)
 {
        struct dsa_switch *ds = dp->ds;
+       struct device_node *phy_np;
 
-       if (!ds->ops->adjust_link)
-               return dsa_port_phylink_register(dp);
+       if (!ds->ops->adjust_link) {
+               phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
+               if (of_phy_is_fixed_link(dp->dn) || phy_np)
+                       return dsa_port_phylink_register(dp);
+               return 0;
+       }
 
        dev_warn(ds->dev,
                 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
@@ -645,11 +670,12 @@ void dsa_port_link_unregister_of(struct dsa_port *dp)
 {
        struct dsa_switch *ds = dp->ds;
 
-       if (!ds->ops->adjust_link) {
+       if (!ds->ops->adjust_link && dp->pl) {
                rtnl_lock();
                phylink_disconnect_phy(dp->pl);
                rtnl_unlock();
                phylink_destroy(dp->pl);
+               dp->pl = NULL;
                return;
        }
 
index 088c886..ddc0f92 100644 (file)
@@ -88,12 +88,10 @@ static int dsa_slave_open(struct net_device *dev)
                        goto clear_allmulti;
        }
 
-       err = dsa_port_enable(dp, dev->phydev);
+       err = dsa_port_enable_rt(dp, dev->phydev);
        if (err)
                goto clear_promisc;
 
-       phylink_start(dp->pl);
-
        return 0;
 
 clear_promisc:
@@ -114,9 +112,7 @@ static int dsa_slave_close(struct net_device *dev)
        struct net_device *master = dsa_slave_to_master(dev);
        struct dsa_port *dp = dsa_slave_to_port(dev);
 
-       phylink_stop(dp->pl);
-
-       dsa_port_disable(dp);
+       dsa_port_disable_rt(dp);
 
        dev_mc_unsync(master, dev);
        dev_uc_unsync(master, dev);
index 2fb6c26..b97ad93 100644 (file)
@@ -298,47 +298,4 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
 }
 EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
 
-/* In the DSA packet_type handler, skb->data points in the middle of the VLAN
- * tag, after tpid and before tci. This is because so far, ETH_HLEN
- * (DMAC, SMAC, EtherType) bytes were pulled.
- * There are 2 bytes of VLAN tag left in skb->data, and upper
- * layers expect the 'real' EtherType to be consumed as well.
- * Coincidentally, a VLAN header is also of the same size as
- * the number of bytes that need to be pulled.
- *
- * skb_mac_header                                      skb->data
- * |                                                       |
- * v                                                       v
- * |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
- * +-----------------------+-----------------------+-------+-------+-------+
- * |    Destination MAC    |      Source MAC       |  TPID |  TCI  | EType |
- * +-----------------------+-----------------------+-------+-------+-------+
- * ^                                               |               |
- * |<--VLAN_HLEN-->to                              <---VLAN_HLEN--->
- * from            |
- *       >>>>>>>   v
- *       >>>>>>>   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
- *       >>>>>>>   +-----------------------+-----------------------+-------+
- *       >>>>>>>   |    Destination MAC    |      Source MAC       | EType |
- *                 +-----------------------+-----------------------+-------+
- *                 ^                                                       ^
- * (now part of    |                                                       |
- *  skb->head)     skb_mac_header                                  skb->data
- */
-struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
-{
-       u8 *from = skb_mac_header(skb);
-       u8 *dest = from + VLAN_HLEN;
-
-       memmove(dest, from, ETH_HLEN - VLAN_HLEN);
-       skb_pull(skb, VLAN_HLEN);
-       skb_push(skb, ETH_HLEN);
-       skb_reset_mac_header(skb);
-       skb_reset_mac_len(skb);
-       skb_pull_rcsum(skb, ETH_HLEN);
-
-       return skb;
-}
-EXPORT_SYMBOL_GPL(dsa_8021q_remove_header);
-
 MODULE_LICENSE("GPL v2");
index 9c31141..9169b63 100644 (file)
@@ -140,6 +140,8 @@ static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb,
        /* Remove Broadcom tag and update checksum */
        skb_pull_rcsum(skb, BRCM_TAG_LEN);
 
+       skb->offload_fwd_mark = 1;
+
        return skb;
 }
 #endif
index 5366ea4..d553bf3 100644 (file)
@@ -250,14 +250,14 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
 {
        struct sja1105_meta meta = {0};
        int source_port, switch_id;
-       struct vlan_ethhdr *hdr;
+       struct ethhdr *hdr;
        u16 tpid, vid, tci;
        bool is_link_local;
        bool is_tagged;
        bool is_meta;
 
-       hdr = vlan_eth_hdr(skb);
-       tpid = ntohs(hdr->h_vlan_proto);
+       hdr = eth_hdr(skb);
+       tpid = ntohs(hdr->h_proto);
        is_tagged = (tpid == ETH_P_SJA1105);
        is_link_local = sja1105_is_link_local(skb);
        is_meta = sja1105_is_meta_frame(skb);
@@ -266,7 +266,12 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
 
        if (is_tagged) {
                /* Normal traffic path. */
-               tci = ntohs(hdr->h_vlan_TCI);
+               skb_push_rcsum(skb, ETH_HLEN);
+               __skb_vlan_pop(skb, &tci);
+               skb_pull_rcsum(skb, ETH_HLEN);
+               skb_reset_network_header(skb);
+               skb_reset_transport_header(skb);
+
                vid = tci & VLAN_VID_MASK;
                source_port = dsa_8021q_rx_source_port(vid);
                switch_id = dsa_8021q_rx_switch_id(vid);
@@ -295,12 +300,6 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
                return NULL;
        }
 
-       /* Delete/overwrite fake VLAN header, DSA expects to not find
-        * it there, see dsa_switch_rcv: skb_push(skb, ETH_HLEN).
-        */
-       if (is_tagged)
-               skb = dsa_8021q_remove_header(skb);
-
        return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
                                              is_meta);
 }
index aaef484..92599ad 100644 (file)
@@ -107,8 +107,9 @@ int ethnl_set_debug(struct sk_buff *skb, struct genl_info *info)
        if (ret < 0)
                return ret;
        dev = req_info.dev;
+       ret = -EOPNOTSUPP;
        if (!dev->ethtool_ops->get_msglevel || !dev->ethtool_ops->set_msglevel)
-               return -EOPNOTSUPP;
+               goto out_dev;
 
        rtnl_lock();
        ret = ethnl_ops_begin(dev);
@@ -129,6 +130,7 @@ out_ops:
        ethnl_ops_complete(dev);
 out_rtnl:
        rtnl_unlock();
+out_dev:
        dev_put(dev);
        return ret;
 }
index 5d16cb4..6e9e0b5 100644 (file)
@@ -126,9 +126,10 @@ int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info)
        if (ret < 0)
                return ret;
        dev = req_info.dev;
+       ret = -EOPNOTSUPP;
        if (!dev->ethtool_ops->get_link_ksettings ||
            !dev->ethtool_ops->set_link_ksettings)
-               return -EOPNOTSUPP;
+               goto out_dev;
 
        rtnl_lock();
        ret = ethnl_ops_begin(dev);
@@ -162,6 +163,7 @@ out_ops:
        ethnl_ops_complete(dev);
 out_rtnl:
        rtnl_unlock();
+out_dev:
        dev_put(dev);
        return ret;
 }
index 96f20be..18cc37b 100644 (file)
@@ -338,9 +338,10 @@ int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info)
        if (ret < 0)
                return ret;
        dev = req_info.dev;
+       ret = -EOPNOTSUPP;
        if (!dev->ethtool_ops->get_link_ksettings ||
            !dev->ethtool_ops->set_link_ksettings)
-               return -EOPNOTSUPP;
+               goto out_dev;
 
        rtnl_lock();
        ret = ethnl_ops_begin(dev);
@@ -370,6 +371,7 @@ out_ops:
        ethnl_ops_complete(dev);
 out_rtnl:
        rtnl_unlock();
+out_dev:
        dev_put(dev);
        return ret;
 }
index 180c194..fc9e0b8 100644 (file)
@@ -40,6 +40,7 @@ int ethnl_parse_header(struct ethnl_req_info *req_info,
        struct nlattr *tb[ETHTOOL_A_HEADER_MAX + 1];
        const struct nlattr *devname_attr;
        struct net_device *dev = NULL;
+       u32 flags = 0;
        int ret;
 
        if (!header) {
@@ -50,8 +51,17 @@ int ethnl_parse_header(struct ethnl_req_info *req_info,
                               ethnl_header_policy, extack);
        if (ret < 0)
                return ret;
-       devname_attr = tb[ETHTOOL_A_HEADER_DEV_NAME];
+       if (tb[ETHTOOL_A_HEADER_FLAGS]) {
+               flags = nla_get_u32(tb[ETHTOOL_A_HEADER_FLAGS]);
+               if (flags & ~ETHTOOL_FLAG_ALL) {
+                       NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_HEADER_FLAGS],
+                                           "unrecognized request flags");
+                       nl_set_extack_cookie_u32(extack, ETHTOOL_FLAG_ALL);
+                       return -EOPNOTSUPP;
+               }
+       }
 
+       devname_attr = tb[ETHTOOL_A_HEADER_DEV_NAME];
        if (tb[ETHTOOL_A_HEADER_DEV_INDEX]) {
                u32 ifindex = nla_get_u32(tb[ETHTOOL_A_HEADER_DEV_INDEX]);
 
@@ -90,9 +100,7 @@ int ethnl_parse_header(struct ethnl_req_info *req_info,
        }
 
        req_info->dev = dev;
-       if (tb[ETHTOOL_A_HEADER_FLAGS])
-               req_info->flags = nla_get_u32(tb[ETHTOOL_A_HEADER_FLAGS]);
-
+       req_info->flags = flags;
        return 0;
 }
 
index e1b8a65..55e1eca 100644 (file)
@@ -128,8 +128,9 @@ int ethnl_set_wol(struct sk_buff *skb, struct genl_info *info)
        if (ret < 0)
                return ret;
        dev = req_info.dev;
+       ret = -EOPNOTSUPP;
        if (!dev->ethtool_ops->get_wol || !dev->ethtool_ops->set_wol)
-               return -EOPNOTSUPP;
+               goto out_dev;
 
        rtnl_lock();
        ret = ethnl_ops_begin(dev);
@@ -172,6 +173,7 @@ out_ops:
        ethnl_ops_complete(dev);
 out_rtnl:
        rtnl_unlock();
+out_dev:
        dev_put(dev);
        return ret;
 }
index 3ba7f61..a64bb64 100644 (file)
@@ -482,12 +482,9 @@ int hsr_get_node_data(struct hsr_priv *hsr,
        struct hsr_port *port;
        unsigned long tdiff;
 
-       rcu_read_lock();
        node = find_node_by_addr_A(&hsr->node_db, addr);
-       if (!node) {
-               rcu_read_unlock();
-               return -ENOENT; /* No such entry */
-       }
+       if (!node)
+               return -ENOENT;
 
        ether_addr_copy(addr_b, node->macaddress_B);
 
@@ -522,7 +519,5 @@ int hsr_get_node_data(struct hsr_priv *hsr,
                *addr_b_ifindex = -1;
        }
 
-       rcu_read_unlock();
-
        return 0;
 }
index 8dc0547..fae21c8 100644 (file)
@@ -251,15 +251,16 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
        if (!na)
                goto invalid;
 
-       hsr_dev = __dev_get_by_index(genl_info_net(info),
-                                    nla_get_u32(info->attrs[HSR_A_IFINDEX]));
+       rcu_read_lock();
+       hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
+                                      nla_get_u32(info->attrs[HSR_A_IFINDEX]));
        if (!hsr_dev)
-               goto invalid;
+               goto rcu_unlock;
        if (!is_hsr_master(hsr_dev))
-               goto invalid;
+               goto rcu_unlock;
 
        /* Send reply */
-       skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
        if (!skb_out) {
                res = -ENOMEM;
                goto fail;
@@ -313,12 +314,10 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
        res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
        if (res < 0)
                goto nla_put_failure;
-       rcu_read_lock();
        port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
        if (port)
                res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
                                  port->dev->ifindex);
-       rcu_read_unlock();
        if (res < 0)
                goto nla_put_failure;
 
@@ -328,20 +327,22 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
        res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
        if (res < 0)
                goto nla_put_failure;
-       rcu_read_lock();
        port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
        if (port)
                res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
                                  port->dev->ifindex);
-       rcu_read_unlock();
        if (res < 0)
                goto nla_put_failure;
 
+       rcu_read_unlock();
+
        genlmsg_end(skb_out, msg_head);
        genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
 
        return 0;
 
+rcu_unlock:
+       rcu_read_unlock();
 invalid:
        netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
        return 0;
@@ -351,6 +352,7 @@ nla_put_failure:
        /* Fall through */
 
 fail:
+       rcu_read_unlock();
        return res;
 }
 
@@ -358,16 +360,14 @@ fail:
  */
 static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
 {
-       /* For receiving */
-       struct nlattr *na;
+       unsigned char addr[ETH_ALEN];
        struct net_device *hsr_dev;
-
-       /* For sending */
        struct sk_buff *skb_out;
-       void *msg_head;
        struct hsr_priv *hsr;
-       void *pos;
-       unsigned char addr[ETH_ALEN];
+       bool restart = false;
+       struct nlattr *na;
+       void *pos = NULL;
+       void *msg_head;
        int res;
 
        if (!info)
@@ -377,15 +377,17 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
        if (!na)
                goto invalid;
 
-       hsr_dev = __dev_get_by_index(genl_info_net(info),
-                                    nla_get_u32(info->attrs[HSR_A_IFINDEX]));
+       rcu_read_lock();
+       hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
+                                      nla_get_u32(info->attrs[HSR_A_IFINDEX]));
        if (!hsr_dev)
-               goto invalid;
+               goto rcu_unlock;
        if (!is_hsr_master(hsr_dev))
-               goto invalid;
+               goto rcu_unlock;
 
+restart:
        /* Send reply */
-       skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
        if (!skb_out) {
                res = -ENOMEM;
                goto fail;
@@ -399,18 +401,26 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
                goto nla_put_failure;
        }
 
-       res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
-       if (res < 0)
-               goto nla_put_failure;
+       if (!restart) {
+               res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
+               if (res < 0)
+                       goto nla_put_failure;
+       }
 
        hsr = netdev_priv(hsr_dev);
 
-       rcu_read_lock();
-       pos = hsr_get_next_node(hsr, NULL, addr);
+       if (!pos)
+               pos = hsr_get_next_node(hsr, NULL, addr);
        while (pos) {
                res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
                if (res < 0) {
-                       rcu_read_unlock();
+                       if (res == -EMSGSIZE) {
+                               genlmsg_end(skb_out, msg_head);
+                               genlmsg_unicast(genl_info_net(info), skb_out,
+                                               info->snd_portid);
+                               restart = true;
+                               goto restart;
+                       }
                        goto nla_put_failure;
                }
                pos = hsr_get_next_node(hsr, pos, addr);
@@ -422,15 +432,18 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
 
        return 0;
 
+rcu_unlock:
+       rcu_read_unlock();
 invalid:
        netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
        return 0;
 
 nla_put_failure:
-       kfree_skb(skb_out);
+       nlmsg_free(skb_out);
        /* Fall through */
 
 fail:
+       rcu_read_unlock();
        return res;
 }
 
@@ -457,6 +470,7 @@ static struct genl_family hsr_genl_family __ro_after_init = {
        .version = 1,
        .maxattr = HSR_A_MAX,
        .policy = hsr_genl_policy,
+       .netnsok = true,
        .module = THIS_MODULE,
        .ops = hsr_ops,
        .n_ops = ARRAY_SIZE(hsr_ops),
index fbfd0db..a9104d4 100644 (file)
@@ -145,16 +145,16 @@ int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
        if (!port)
                return -ENOMEM;
 
+       port->hsr = hsr;
+       port->dev = dev;
+       port->type = type;
+
        if (type != HSR_PT_MASTER) {
                res = hsr_portdev_setup(dev, port);
                if (res)
                        goto fail_dev_setup;
        }
 
-       port->hsr = hsr;
-       port->dev = dev;
-       port->type = type;
-
        list_add_tail_rcu(&port->port_list, &hsr->ports);
        synchronize_rcu();
 
index 2c7a38d..0672b2f 100644 (file)
@@ -21,7 +21,13 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
        [IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, },
        [IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, },
        [IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_BCN_ORD] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_SF_ORD] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_PAN_COORD] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_BAT_EXT] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_COORD_REALIGN] = { .type = NLA_U8, },
        [IEEE802154_ATTR_PAGE] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_DEV_TYPE] = { .type = NLA_U8, },
        [IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, },
        [IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, },
        [IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, },
index f96bd48..6490b84 100644 (file)
@@ -303,6 +303,7 @@ config SYN_COOKIES
 
 config NET_IPVTI
        tristate "Virtual (secure) IP: tunneling"
+       depends on IPV6 || IPV6=n
        select INET_TUNNEL
        select NET_IP_TUNNEL
        select XFRM
index 574972b..2bf3abe 100644 (file)
@@ -184,7 +184,6 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
 {
        const struct tcp_congestion_ops *utcp_ca;
        struct tcp_congestion_ops *tcp_ca;
-       size_t tcp_ca_name_len;
        int prog_fd;
        u32 moff;
 
@@ -199,13 +198,11 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
                tcp_ca->flags = utcp_ca->flags;
                return 1;
        case offsetof(struct tcp_congestion_ops, name):
-               tcp_ca_name_len = strnlen(utcp_ca->name, sizeof(utcp_ca->name));
-               if (!tcp_ca_name_len ||
-                   tcp_ca_name_len == sizeof(utcp_ca->name))
+               if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
+                                    sizeof(tcp_ca->name)) <= 0)
                        return -EINVAL;
                if (tcp_ca_find(utcp_ca->name))
                        return -EEXIST;
-               memcpy(tcp_ca->name, utcp_ca->name, sizeof(tcp_ca->name));
                return 1;
        }
 
index 577db1d..213be9c 100644 (file)
@@ -997,7 +997,9 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
                        return -ENOENT;
                }
 
+               rcu_read_lock();
                err = fib_table_dump(tb, skb, cb, &filter);
+               rcu_read_unlock();
                return skb->len ? : err;
        }
 
index 5fd6e8e..66fdbfe 100644 (file)
@@ -56,7 +56,9 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
 }
 EXPORT_SYMBOL_GPL(gre_del_protocol);
 
-/* Fills in tpi and returns header length to be pulled. */
+/* Fills in tpi and returns header length to be pulled.
+ * Note that caller must use pskb_may_pull() before pulling GRE header.
+ */
 int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                     bool *csum_err, __be16 proto, int nhs)
 {
@@ -110,8 +112,14 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
         * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
         */
        if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
+               u8 _val, *val;
+
+               val = skb_header_pointer(skb, nhs + hdr_len,
+                                        sizeof(_val), &_val);
+               if (!val)
+                       return -EINVAL;
                tpi->proto = proto;
-               if ((*(u8 *)options & 0xF0) != 0x40)
+               if ((*val & 0xF0) != 0x40)
                        hdr_len += 4;
        }
        tpi->hdr_len = hdr_len;
index a4db79b..d545fb9 100644 (file)
@@ -482,8 +482,28 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
                }
                spin_unlock_bh(&queue->fastopenq.lock);
        }
+
 out:
        release_sock(sk);
+       if (newsk && mem_cgroup_sockets_enabled) {
+               int amt;
+
+               /* atomically get the memory usage, set and charge the
+                * newsk->sk_memcg.
+                */
+               lock_sock(newsk);
+
+               /* The socket has not been accepted yet, no need to look at
+                * newsk->sk_wmem_queued.
+                */
+               amt = sk_mem_pages(newsk->sk_forward_alloc +
+                                  atomic_read(&newsk->sk_rmem_alloc));
+               mem_cgroup_sk_alloc(newsk);
+               if (newsk->sk_memcg && amt)
+                       mem_cgroup_charge_skmem(newsk->sk_memcg, amt);
+
+               release_sock(newsk);
+       }
        if (req)
                reqsk_put(req);
        return newsk;
index f11e997..8c83775 100644 (file)
@@ -100,13 +100,9 @@ static size_t inet_sk_attr_size(struct sock *sk,
                aux = handler->idiag_get_aux_size(sk, net_admin);
 
        return    nla_total_size(sizeof(struct tcp_info))
-               + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
-               + nla_total_size(1) /* INET_DIAG_TOS */
-               + nla_total_size(1) /* INET_DIAG_TCLASS */
-               + nla_total_size(4) /* INET_DIAG_MARK */
-               + nla_total_size(4) /* INET_DIAG_CLASS_ID */
-               + nla_total_size(sizeof(struct inet_diag_meminfo))
                + nla_total_size(sizeof(struct inet_diag_msg))
+               + inet_diag_msg_attrs_size()
+               + nla_total_size(sizeof(struct inet_diag_meminfo))
                + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
                + nla_total_size(TCP_CA_NAME_MAX)
                + nla_total_size(sizeof(struct tcpvegas_info))
@@ -147,6 +143,24 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
        if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark))
                goto errout;
 
+       if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
+           ext & (1 << (INET_DIAG_TCLASS - 1))) {
+               u32 classid = 0;
+
+#ifdef CONFIG_SOCK_CGROUP_DATA
+               classid = sock_cgroup_classid(&sk->sk_cgrp_data);
+#endif
+               /* Fallback to socket priority if class id isn't set.
+                * Classful qdiscs use it as direct reference to class.
+                * For cgroup2 classid is always zero.
+                */
+               if (!classid)
+                       classid = sk->sk_priority;
+
+               if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
+                       goto errout;
+       }
+
        r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
        r->idiag_inode = sock_i_ino(sk);
 
@@ -284,24 +298,6 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                        goto errout;
        }
 
-       if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
-           ext & (1 << (INET_DIAG_TCLASS - 1))) {
-               u32 classid = 0;
-
-#ifdef CONFIG_SOCK_CGROUP_DATA
-               classid = sock_cgroup_classid(&sk->sk_cgrp_data);
-#endif
-               /* Fallback to socket priority if class id isn't set.
-                * Classful qdiscs use it as direct reference to class.
-                * For cgroup2 classid is always zero.
-                */
-               if (!classid)
-                       classid = sk->sk_priority;
-
-               if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
-                       goto errout;
-       }
-
 out:
        nlmsg_end(skb, nlh);
        return 0;
index 8274f98..029b24e 100644 (file)
@@ -1153,6 +1153,24 @@ static int ipgre_netlink_parms(struct net_device *dev,
        if (data[IFLA_GRE_FWMARK])
                *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
 
+       return 0;
+}
+
+static int erspan_netlink_parms(struct net_device *dev,
+                               struct nlattr *data[],
+                               struct nlattr *tb[],
+                               struct ip_tunnel_parm *parms,
+                               __u32 *fwmark)
+{
+       struct ip_tunnel *t = netdev_priv(dev);
+       int err;
+
+       err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
+       if (err)
+               return err;
+       if (!data)
+               return 0;
+
        if (data[IFLA_GRE_ERSPAN_VER]) {
                t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
 
@@ -1276,45 +1294,70 @@ static void ipgre_tap_setup(struct net_device *dev)
        ip_tunnel_setup(dev, gre_tap_net_id);
 }
 
-static int ipgre_newlink(struct net *src_net, struct net_device *dev,
-                        struct nlattr *tb[], struct nlattr *data[],
-                        struct netlink_ext_ack *extack)
+static int
+ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
 {
-       struct ip_tunnel_parm p;
        struct ip_tunnel_encap ipencap;
-       __u32 fwmark = 0;
-       int err;
 
        if (ipgre_netlink_encap_parms(data, &ipencap)) {
                struct ip_tunnel *t = netdev_priv(dev);
-               err = ip_tunnel_encap_setup(t, &ipencap);
+               int err = ip_tunnel_encap_setup(t, &ipencap);
 
                if (err < 0)
                        return err;
        }
 
+       return 0;
+}
+
+static int ipgre_newlink(struct net *src_net, struct net_device *dev,
+                        struct nlattr *tb[], struct nlattr *data[],
+                        struct netlink_ext_ack *extack)
+{
+       struct ip_tunnel_parm p;
+       __u32 fwmark = 0;
+       int err;
+
+       err = ipgre_newlink_encap_setup(dev, data);
+       if (err)
+               return err;
+
        err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
        if (err < 0)
                return err;
        return ip_tunnel_newlink(dev, tb, &p, fwmark);
 }
 
+static int erspan_newlink(struct net *src_net, struct net_device *dev,
+                         struct nlattr *tb[], struct nlattr *data[],
+                         struct netlink_ext_ack *extack)
+{
+       struct ip_tunnel_parm p;
+       __u32 fwmark = 0;
+       int err;
+
+       err = ipgre_newlink_encap_setup(dev, data);
+       if (err)
+               return err;
+
+       err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
+       if (err)
+               return err;
+       return ip_tunnel_newlink(dev, tb, &p, fwmark);
+}
+
 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
                            struct nlattr *data[],
                            struct netlink_ext_ack *extack)
 {
        struct ip_tunnel *t = netdev_priv(dev);
-       struct ip_tunnel_encap ipencap;
        __u32 fwmark = t->fwmark;
        struct ip_tunnel_parm p;
        int err;
 
-       if (ipgre_netlink_encap_parms(data, &ipencap)) {
-               err = ip_tunnel_encap_setup(t, &ipencap);
-
-               if (err < 0)
-                       return err;
-       }
+       err = ipgre_newlink_encap_setup(dev, data);
+       if (err)
+               return err;
 
        err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
        if (err < 0)
@@ -1327,8 +1370,34 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
        t->parms.i_flags = p.i_flags;
        t->parms.o_flags = p.o_flags;
 
-       if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
-               ipgre_link_update(dev, !tb[IFLA_MTU]);
+       ipgre_link_update(dev, !tb[IFLA_MTU]);
+
+       return 0;
+}
+
+static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
+                            struct nlattr *data[],
+                            struct netlink_ext_ack *extack)
+{
+       struct ip_tunnel *t = netdev_priv(dev);
+       __u32 fwmark = t->fwmark;
+       struct ip_tunnel_parm p;
+       int err;
+
+       err = ipgre_newlink_encap_setup(dev, data);
+       if (err)
+               return err;
+
+       err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
+       if (err < 0)
+               return err;
+
+       err = ip_tunnel_changelink(dev, tb, &p, fwmark);
+       if (err < 0)
+               return err;
+
+       t->parms.i_flags = p.i_flags;
+       t->parms.o_flags = p.o_flags;
 
        return 0;
 }
@@ -1519,8 +1588,8 @@ static struct rtnl_link_ops erspan_link_ops __read_mostly = {
        .priv_size      = sizeof(struct ip_tunnel),
        .setup          = erspan_setup,
        .validate       = erspan_validate,
-       .newlink        = ipgre_newlink,
-       .changelink     = ipgre_changelink,
+       .newlink        = erspan_newlink,
+       .changelink     = erspan_changelink,
        .dellink        = ip_tunnel_dellink,
        .get_size       = ipgre_get_size,
        .fill_info      = ipgre_fill_info,
index 37cddd1..1b4e6f2 100644 (file)
@@ -187,17 +187,39 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        int mtu;
 
        if (!dst) {
-               struct rtable *rt;
-
-               fl->u.ip4.flowi4_oif = dev->ifindex;
-               fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
-               rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
-               if (IS_ERR(rt)) {
+               switch (skb->protocol) {
+               case htons(ETH_P_IP): {
+                       struct rtable *rt;
+
+                       fl->u.ip4.flowi4_oif = dev->ifindex;
+                       fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+                       rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
+                       if (IS_ERR(rt)) {
+                               dev->stats.tx_carrier_errors++;
+                               goto tx_error_icmp;
+                       }
+                       dst = &rt->dst;
+                       skb_dst_set(skb, dst);
+                       break;
+               }
+#if IS_ENABLED(CONFIG_IPV6)
+               case htons(ETH_P_IPV6):
+                       fl->u.ip6.flowi6_oif = dev->ifindex;
+                       fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+                       dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
+                       if (dst->error) {
+                               dst_release(dst);
+                               dst = NULL;
+                               dev->stats.tx_carrier_errors++;
+                               goto tx_error_icmp;
+                       }
+                       skb_dst_set(skb, dst);
+                       break;
+#endif
+               default:
                        dev->stats.tx_carrier_errors++;
                        goto tx_error_icmp;
                }
-               dst = &rt->dst;
-               skb_dst_set(skb, dst);
        }
 
        dst_hold(dst);
index e35736b..a93e7d1 100644 (file)
@@ -100,8 +100,9 @@ static int raw_diag_dump_one(struct sk_buff *in_skb,
        if (IS_ERR(sk))
                return PTR_ERR(sk);
 
-       rep = nlmsg_new(sizeof(struct inet_diag_msg) +
-                       sizeof(struct inet_diag_meminfo) + 64,
+       rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) +
+                       inet_diag_msg_attrs_size() +
+                       nla_total_size(sizeof(struct inet_diag_meminfo)) + 64,
                        GFP_KERNEL);
        if (!rep) {
                sock_put(sk);
index eb2d805..dc77c30 100644 (file)
@@ -2948,8 +2948,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                        err = -EPERM;
                else if (tp->repair_queue == TCP_SEND_QUEUE)
                        WRITE_ONCE(tp->write_seq, val);
-               else if (tp->repair_queue == TCP_RECV_QUEUE)
+               else if (tp->repair_queue == TCP_RECV_QUEUE) {
                        WRITE_ONCE(tp->rcv_nxt, val);
+                       WRITE_ONCE(tp->copied_seq, val);
+               }
                else
                        err = -EINVAL;
                break;
index 306e25d..2f45cde 100644 (file)
@@ -1109,6 +1109,10 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
 
                if (unlikely(!skb))
                        return -ENOBUFS;
+               /* retransmit skbs might have a non zero value in skb->dev
+                * because skb->dev is aliased with skb->rbnode.rb_left
+                */
+               skb->dev = NULL;
        }
 
        inet = inet_sk(sk);
@@ -3037,8 +3041,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
 
                tcp_skb_tsorted_save(skb) {
                        nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
-                       err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
-                                    -ENOBUFS;
+                       if (nskb) {
+                               nskb->dev = NULL;
+                               err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC);
+                       } else {
+                               err = -ENOBUFS;
+                       }
                } tcp_skb_tsorted_restore(skb);
 
                if (!err) {
index 910555a..dccd228 100644 (file)
@@ -64,8 +64,9 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
                goto out;
 
        err = -ENOMEM;
-       rep = nlmsg_new(sizeof(struct inet_diag_msg) +
-                       sizeof(struct inet_diag_meminfo) + 64,
+       rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) +
+                       inet_diag_msg_attrs_size() +
+                       nla_total_size(sizeof(struct inet_diag_meminfo)) + 64,
                        GFP_KERNEL);
        if (!rep)
                goto out;
index cb493e1..46d614b 100644 (file)
@@ -1226,11 +1226,13 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
 }
 
 static void
-cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt)
+cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
+                    bool del_rt, bool del_peer)
 {
        struct fib6_info *f6i;
 
-       f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len,
+       f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
+                                       ifp->prefix_len,
                                        ifp->idev->dev, 0, RTF_DEFAULT, true);
        if (f6i) {
                if (del_rt)
@@ -1293,7 +1295,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
 
        if (action != CLEANUP_PREFIX_RT_NOP) {
                cleanup_prefix_route(ifp, expires,
-                       action == CLEANUP_PREFIX_RT_DEL);
+                       action == CLEANUP_PREFIX_RT_DEL, false);
        }
 
        /* clean up prefsrc entries */
@@ -3345,6 +3347,10 @@ static void addrconf_dev_config(struct net_device *dev)
            (dev->type != ARPHRD_NONE) &&
            (dev->type != ARPHRD_RAWIP)) {
                /* Alas, we support only Ethernet autoconfiguration. */
+               idev = __in6_dev_get(dev);
+               if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
+                   dev->flags & IFF_MULTICAST)
+                       ipv6_mc_up(idev);
                return;
        }
 
@@ -4586,12 +4592,14 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
 }
 
 static int modify_prefix_route(struct inet6_ifaddr *ifp,
-                              unsigned long expires, u32 flags)
+                              unsigned long expires, u32 flags,
+                              bool modify_peer)
 {
        struct fib6_info *f6i;
        u32 prio;
 
-       f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len,
+       f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
+                                       ifp->prefix_len,
                                        ifp->idev->dev, 0, RTF_DEFAULT, true);
        if (!f6i)
                return -ENOENT;
@@ -4602,7 +4610,8 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
                ip6_del_rt(dev_net(ifp->idev->dev), f6i);
 
                /* add new one */
-               addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
+               addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
+                                     ifp->prefix_len,
                                      ifp->rt_priority, ifp->idev->dev,
                                      expires, flags, GFP_KERNEL);
        } else {
@@ -4624,6 +4633,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
        unsigned long timeout;
        bool was_managetempaddr;
        bool had_prefixroute;
+       bool new_peer = false;
 
        ASSERT_RTNL();
 
@@ -4655,6 +4665,13 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
                cfg->preferred_lft = timeout;
        }
 
+       if (cfg->peer_pfx &&
+           memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) {
+               if (!ipv6_addr_any(&ifp->peer_addr))
+                       cleanup_prefix_route(ifp, expires, true, true);
+               new_peer = true;
+       }
+
        spin_lock_bh(&ifp->lock);
        was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
        had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
@@ -4670,6 +4687,9 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
        if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
                ifp->rt_priority = cfg->rt_priority;
 
+       if (new_peer)
+               ifp->peer_addr = *cfg->peer_pfx;
+
        spin_unlock_bh(&ifp->lock);
        if (!(ifp->flags&IFA_F_TENTATIVE))
                ipv6_ifa_notify(0, ifp);
@@ -4678,7 +4698,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
                int rc = -ENOENT;
 
                if (had_prefixroute)
-                       rc = modify_prefix_route(ifp, expires, flags);
+                       rc = modify_prefix_route(ifp, expires, flags, false);
 
                /* prefix route could have been deleted; if so restore it */
                if (rc == -ENOENT) {
@@ -4686,6 +4706,15 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
                                              ifp->rt_priority, ifp->idev->dev,
                                              expires, flags, GFP_KERNEL);
                }
+
+               if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
+                       rc = modify_prefix_route(ifp, expires, flags, true);
+
+               if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
+                       addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
+                                             ifp->rt_priority, ifp->idev->dev,
+                                             expires, flags, GFP_KERNEL);
+               }
        } else if (had_prefixroute) {
                enum cleanup_prefix_rt_t action;
                unsigned long rt_expires;
@@ -4696,7 +4725,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
 
                if (action != CLEANUP_PREFIX_RT_NOP) {
                        cleanup_prefix_route(ifp, rt_expires,
-                               action == CLEANUP_PREFIX_RT_DEL);
+                               action == CLEANUP_PREFIX_RT_DEL, false);
                }
        }
 
@@ -5983,9 +6012,9 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
                if (ifp->idev->cnf.forwarding)
                        addrconf_join_anycast(ifp);
                if (!ipv6_addr_any(&ifp->peer_addr))
-                       addrconf_prefix_route(&ifp->peer_addr, 128, 0,
-                                             ifp->idev->dev, 0, 0,
-                                             GFP_ATOMIC);
+                       addrconf_prefix_route(&ifp->peer_addr, 128,
+                                             ifp->rt_priority, ifp->idev->dev,
+                                             0, 0, GFP_ATOMIC);
                break;
        case RTM_DELADDR:
                if (ifp->idev->cnf.forwarding)
index 524006a..cc6180e 100644 (file)
@@ -311,7 +311,7 @@ static int vti6_rcv(struct sk_buff *skb)
 
                if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
                        rcu_read_unlock();
-                       return 0;
+                       goto discard;
                }
 
                ipv6h = ipv6_hdr(skb);
@@ -450,15 +450,33 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        int mtu;
 
        if (!dst) {
-               fl->u.ip6.flowi6_oif = dev->ifindex;
-               fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
-               dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
-               if (dst->error) {
-                       dst_release(dst);
-                       dst = NULL;
+               switch (skb->protocol) {
+               case htons(ETH_P_IP): {
+                       struct rtable *rt;
+
+                       fl->u.ip4.flowi4_oif = dev->ifindex;
+                       fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+                       rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
+                       if (IS_ERR(rt))
+                               goto tx_err_link_failure;
+                       dst = &rt->dst;
+                       skb_dst_set(skb, dst);
+                       break;
+               }
+               case htons(ETH_P_IPV6):
+                       fl->u.ip6.flowi6_oif = dev->ifindex;
+                       fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+                       dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
+                       if (dst->error) {
+                               dst_release(dst);
+                               dst = NULL;
+                               goto tx_err_link_failure;
+                       }
+                       skb_dst_set(skb, dst);
+                       break;
+               default:
                        goto tx_err_link_failure;
                }
-               skb_dst_set(skb, dst);
        }
 
        dst_hold(dst);
index ab7f124..8c52efe 100644 (file)
@@ -268,7 +268,7 @@ static int seg6_do_srh(struct sk_buff *skb)
                skb_mac_header_rebuild(skb);
                skb_push(skb, skb->mac_len);
 
-               err = seg6_do_srh_encap(skb, tinfo->srh, NEXTHDR_NONE);
+               err = seg6_do_srh_encap(skb, tinfo->srh, IPPROTO_ETHERNET);
                if (err)
                        return err;
 
index 7cbc197..8165802 100644 (file)
@@ -282,7 +282,7 @@ static int input_action_end_dx2(struct sk_buff *skb,
        struct net_device *odev;
        struct ethhdr *eth;
 
-       if (!decap_and_validate(skb, NEXTHDR_NONE))
+       if (!decap_and_validate(skb, IPPROTO_ETHERNET))
                goto drop;
 
        if (!pskb_may_pull(skb, ETH_HLEN))
index e11bdb0..25b7ebd 100644 (file)
@@ -78,7 +78,7 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const
 
        hlist_for_each_entry_rcu(x6spi,
                             &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
-                            list_byaddr) {
+                            list_byaddr, lockdep_is_held(&xfrm6_tunnel_spi_lock)) {
                if (xfrm6_addr_equal(&x6spi->addr, saddr))
                        return x6spi;
        }
index c80b1e1..3419ed6 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2007      Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
  */
 
 #include <linux/debugfs.h>
@@ -78,6 +78,7 @@ static const char * const sta_flag_names[] = {
        FLAG(MPSP_OWNER),
        FLAG(MPSP_RECIPIENT),
        FLAG(PS_DELIVER),
+       FLAG(USES_ENCRYPTION),
 #undef FLAG
 };
 
index 0f889b9..efc1acc 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright 2018-2019  Intel Corporation
+ * Copyright 2018-2020  Intel Corporation
  */
 
 #include <linux/if_ether.h>
@@ -262,22 +262,29 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
                          sta ? sta->sta.addr : bcast_addr, ret);
 }
 
-int ieee80211_set_tx_key(struct ieee80211_key *key)
+static int _ieee80211_set_tx_key(struct ieee80211_key *key, bool force)
 {
        struct sta_info *sta = key->sta;
        struct ieee80211_local *local = key->local;
 
        assert_key_lock(local);
 
+       set_sta_flag(sta, WLAN_STA_USES_ENCRYPTION);
+
        sta->ptk_idx = key->conf.keyidx;
 
-       if (!ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT))
+       if (force || !ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT))
                clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
        ieee80211_check_fast_xmit(sta);
 
        return 0;
 }
 
+int ieee80211_set_tx_key(struct ieee80211_key *key)
+{
+       return _ieee80211_set_tx_key(key, false);
+}
+
 static void ieee80211_pairwise_rekey(struct ieee80211_key *old,
                                     struct ieee80211_key *new)
 {
@@ -441,11 +448,8 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
                if (pairwise) {
                        rcu_assign_pointer(sta->ptk[idx], new);
                        if (new &&
-                           !(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX)) {
-                               sta->ptk_idx = idx;
-                               clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
-                               ieee80211_check_fast_xmit(sta);
-                       }
+                           !(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX))
+                               _ieee80211_set_tx_key(new, true);
                } else {
                        rcu_assign_pointer(sta->gtk[idx], new);
                }
index d699833..38a0383 100644 (file)
@@ -1152,7 +1152,8 @@ int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
                }
        }
 
-       if (!(mpath->flags & MESH_PATH_RESOLVING))
+       if (!(mpath->flags & MESH_PATH_RESOLVING) &&
+           mesh_path_sel_is_hwmp(sdata))
                mesh_queue_preq(mpath, PREQ_Q_F_START);
 
        if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
index 0f5f406..e3572be 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
  */
 
 #include <linux/module.h>
@@ -1049,6 +1049,11 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
        might_sleep();
        lockdep_assert_held(&local->sta_mtx);
 
+       while (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
+               ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+               WARN_ON_ONCE(ret);
+       }
+
        /* now keys can no longer be reached */
        ieee80211_free_sta_keys(local, sta);
 
index c00e285..552eed3 100644 (file)
@@ -98,6 +98,7 @@ enum ieee80211_sta_info_flags {
        WLAN_STA_MPSP_OWNER,
        WLAN_STA_MPSP_RECIPIENT,
        WLAN_STA_PS_DELIVER,
+       WLAN_STA_USES_ENCRYPTION,
 
        NUM_WLAN_STA_FLAGS,
 };
index 87def9c..d9cca6d 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007      Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018, 2020 Intel Corporation
  *
  * Transmit and frame generation functions.
  */
@@ -590,10 +590,13 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
 
-       if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT))
+       if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) {
                tx->key = NULL;
-       else if (tx->sta &&
-                (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
+               return TX_CONTINUE;
+       }
+
+       if (tx->sta &&
+           (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
                tx->key = key;
        else if (ieee80211_is_group_privacy_action(tx->skb) &&
                (key = rcu_dereference(tx->sdata->default_multicast_key)))
@@ -654,6 +657,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
                if (!skip_hw && tx->key &&
                    tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
                        info->control.hw_key = &tx->key->conf;
+       } else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta &&
+                  test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
+               return TX_DROP;
        }
 
        return TX_CONTINUE;
@@ -3598,8 +3604,25 @@ begin:
        tx.skb = skb;
        tx.sdata = vif_to_sdata(info->control.vif);
 
-       if (txq->sta)
+       if (txq->sta) {
                tx.sta = container_of(txq->sta, struct sta_info, sta);
+               /*
+                * Drop unicast frames to unauthorised stations unless they are
+                * EAPOL frames from the local station.
+                */
+               if (unlikely(!ieee80211_vif_is_mesh(&tx.sdata->vif) &&
+                            tx.sdata->vif.type != NL80211_IFTYPE_OCB &&
+                            !is_multicast_ether_addr(hdr->addr1) &&
+                            !test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) &&
+                            (!(info->control.flags &
+                               IEEE80211_TX_CTRL_PORT_CTRL_PROTO) ||
+                             !ether_addr_equal(tx.sdata->vif.addr,
+                                               hdr->addr2)))) {
+                       I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
+                       ieee80211_free_txskb(&local->hw, skb);
+                       goto begin;
+               }
+       }
 
        /*
         * The key can be removed while the packet was queued, so need to call
@@ -5126,6 +5149,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
        struct ieee80211_local *local = sdata->local;
        struct sk_buff *skb;
        struct ethhdr *ehdr;
+       u32 ctrl_flags = 0;
        u32 flags;
 
        /* Only accept CONTROL_PORT_PROTOCOL configured in CONNECT/ASSOCIATE
@@ -5135,6 +5159,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
            proto != cpu_to_be16(ETH_P_PREAUTH))
                return -EINVAL;
 
+       if (proto == sdata->control_port_protocol)
+               ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+
        if (unencrypted)
                flags = IEEE80211_TX_INTFL_DONT_ENCRYPT;
        else
@@ -5160,7 +5187,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
        skb_reset_mac_header(skb);
 
        local_bh_disable();
-       __ieee80211_subif_start_xmit(skb, skb->dev, flags, 0);
+       __ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags);
        local_bh_enable();
 
        return 0;
index 45acd87..fd2c315 100644 (file)
@@ -334,6 +334,8 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
        struct mptcp_sock *msk;
        unsigned int ack_size;
        bool ret = false;
+       bool can_ack;
+       u64 ack_seq;
        u8 tcp_fin;
 
        if (skb) {
@@ -360,9 +362,22 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
                ret = true;
        }
 
+       /* passive sockets msk will set the 'can_ack' after accept(), even
+        * if the first subflow may have the already the remote key handy
+        */
+       can_ack = true;
        opts->ext_copy.use_ack = 0;
        msk = mptcp_sk(subflow->conn);
-       if (!msk || !READ_ONCE(msk->can_ack)) {
+       if (likely(msk && READ_ONCE(msk->can_ack))) {
+               ack_seq = msk->ack_seq;
+       } else if (subflow->can_ack) {
+               mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
+               ack_seq++;
+       } else {
+               can_ack = false;
+       }
+
+       if (unlikely(!can_ack)) {
                *size = ALIGN(dss_size, 4);
                return ret;
        }
@@ -375,7 +390,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
 
        dss_size += ack_size;
 
-       opts->ext_copy.data_ack = msk->ack_seq;
+       opts->ext_copy.data_ack = ack_seq;
        opts->ext_copy.ack64 = 1;
        opts->ext_copy.use_ack = 1;
 
index 410809c..4912069 100644 (file)
@@ -411,7 +411,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                *pos = cpu + 1;
                return per_cpu_ptr(net->ct.stat, cpu);
        }
-
+       (*pos)++;
        return NULL;
 }
 
index 8af28e1..70ebeba 100644 (file)
@@ -554,6 +554,9 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
        nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
        nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
        nf_flow_table_offload_flush(flow_table);
+       if (nf_flowtable_hw_offload(flow_table))
+               nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
+                                     flow_table);
        rhashtable_destroy(&flow_table->rhashtable);
 }
 EXPORT_SYMBOL_GPL(nf_flow_table_free);
index 9e563fd..ba775ae 100644 (file)
@@ -146,11 +146,13 @@ static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
 
        if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
            (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
-            nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
+            nf_flow_snat_ip(flow, skb, ip_hdr(skb), thoff, dir) < 0))
                return -1;
+
+       iph = ip_hdr(skb);
        if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
            (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
-            nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
+            nf_flow_dnat_ip(flow, skb, ip_hdr(skb), thoff, dir) < 0))
                return -1;
 
        return 0;
@@ -189,6 +191,7 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
        if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
                return -1;
 
+       iph = ip_hdr(skb);
        ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
 
        tuple->src_v4.s_addr    = iph->saddr;
@@ -426,11 +429,13 @@ static int nf_flow_nat_ipv6(const struct flow_offload *flow,
 
        if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
            (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
-            nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
+            nf_flow_snat_ipv6(flow, skb, ipv6_hdr(skb), thoff, dir) < 0))
                return -1;
+
+       ip6h = ipv6_hdr(skb);
        if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
            (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
-            nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
+            nf_flow_dnat_ipv6(flow, skb, ipv6_hdr(skb), thoff, dir) < 0))
                return -1;
 
        return 0;
@@ -459,6 +464,7 @@ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
        if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
                return -1;
 
+       ip6h = ipv6_hdr(skb);
        ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
 
        tuple->src_v6           = ip6h->saddr;
index 06f00cd..f2c22c6 100644 (file)
@@ -87,6 +87,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
        default:
                return -EOPNOTSUPP;
        }
+       mask->control.addr_type = 0xffff;
        match->dissector.used_keys |= BIT(key->control.addr_type);
        mask->basic.n_proto = 0xffff;
 
index b0930d4..b9cbe1e 100644 (file)
@@ -267,7 +267,7 @@ static void *synproxy_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                *pos = cpu + 1;
                return per_cpu_ptr(snet->stats, cpu);
        }
-
+       (*pos)++;
        return NULL;
 }
 
index d1318bd..d11f1a7 100644 (file)
@@ -1405,6 +1405,11 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
                                              lockdep_commit_lock_is_held(net));
                if (nft_dump_stats(skb, stats))
                        goto nla_put_failure;
+
+               if ((chain->flags & NFT_CHAIN_HW_OFFLOAD) &&
+                   nla_put_be32(skb, NFTA_CHAIN_FLAGS,
+                                htonl(NFT_CHAIN_HW_OFFLOAD)))
+                       goto nla_put_failure;
        }
 
        if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
@@ -5077,6 +5082,11 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                                err = -EBUSY;
                        else if (!(nlmsg_flags & NLM_F_EXCL))
                                err = 0;
+               } else if (err == -ENOTEMPTY) {
+                       /* ENOTEMPTY reports overlapping between this element
+                        * and an existing one.
+                        */
+                       err = -EEXIST;
                }
                goto err_element_clash;
        }
@@ -6300,8 +6310,13 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
                goto err4;
 
        err = nft_register_flowtable_net_hooks(ctx.net, table, flowtable);
-       if (err < 0)
+       if (err < 0) {
+               list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+                       list_del_rcu(&hook->list);
+                       kfree_rcu(hook, rcu);
+               }
                goto err4;
+       }
 
        err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
        if (err < 0)
@@ -7378,13 +7393,8 @@ static void nf_tables_module_autoload(struct net *net)
        list_splice_init(&net->nft.module_list, &module_list);
        mutex_unlock(&net->nft.commit_mutex);
        list_for_each_entry_safe(req, next, &module_list, list) {
-               if (req->done) {
-                       list_del(&req->list);
-                       kfree(req);
-               } else {
-                       request_module("%s", req->module);
-                       req->done = true;
-               }
+               request_module("%s", req->module);
+               req->done = true;
        }
        mutex_lock(&net->nft.commit_mutex);
        list_splice(&module_list, &net->nft.module_list);
@@ -8167,6 +8177,7 @@ static void __net_exit nf_tables_exit_net(struct net *net)
        __nft_release_tables(net);
        mutex_unlock(&net->nft.commit_mutex);
        WARN_ON_ONCE(!list_empty(&net->nft.tables));
+       WARN_ON_ONCE(!list_empty(&net->nft.module_list));
 }
 
 static struct pernet_operations nf_tables_net_ops = {
index de3a959..a5f294a 100644 (file)
@@ -742,6 +742,8 @@ static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
        [NFCTH_NAME] = { .type = NLA_NUL_STRING,
                         .len = NF_CT_HELPER_NAME_LEN-1 },
        [NFCTH_QUEUE_NUM] = { .type = NLA_U32, },
+       [NFCTH_PRIV_DATA_LEN] = { .type = NLA_U32, },
+       [NFCTH_STATUS] = { .type = NLA_U32, },
 };
 
 static const struct nfnl_callback nfnl_cthelper_cb[NFNL_MSG_CTHELPER_MAX] = {
index ff9ac8a..eac4a90 100644 (file)
@@ -89,6 +89,7 @@ static const struct nft_chain_type nft_chain_nat_inet = {
        .name           = "nat",
        .type           = NFT_CHAIN_T_NAT,
        .family         = NFPROTO_INET,
+       .owner          = THIS_MODULE,
        .hook_mask      = (1 << NF_INET_PRE_ROUTING) |
                          (1 << NF_INET_LOCAL_IN) |
                          (1 << NF_INET_LOCAL_OUT) |
index aba11c2..3087e23 100644 (file)
@@ -28,6 +28,9 @@ static void nft_fwd_netdev_eval(const struct nft_expr *expr,
        struct nft_fwd_netdev *priv = nft_expr_priv(expr);
        int oif = regs->data[priv->sreg_dev];
 
+       /* This is used by ifb only. */
+       skb_set_redirected(pkt->skb, true);
+
        nf_fwd_netdev_egress(pkt, oif);
        regs->verdict.code = NF_STOLEN;
 }
@@ -190,6 +193,13 @@ nla_put_failure:
        return -1;
 }
 
+static int nft_fwd_validate(const struct nft_ctx *ctx,
+                           const struct nft_expr *expr,
+                           const struct nft_data **data)
+{
+       return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS));
+}
+
 static struct nft_expr_type nft_fwd_netdev_type;
 static const struct nft_expr_ops nft_fwd_neigh_netdev_ops = {
        .type           = &nft_fwd_netdev_type,
@@ -197,6 +207,7 @@ static const struct nft_expr_ops nft_fwd_neigh_netdev_ops = {
        .eval           = nft_fwd_neigh_eval,
        .init           = nft_fwd_neigh_init,
        .dump           = nft_fwd_neigh_dump,
+       .validate       = nft_fwd_validate,
 };
 
 static const struct nft_expr_ops nft_fwd_netdev_ops = {
@@ -205,6 +216,7 @@ static const struct nft_expr_ops nft_fwd_netdev_ops = {
        .eval           = nft_fwd_netdev_eval,
        .init           = nft_fwd_netdev_init,
        .dump           = nft_fwd_netdev_dump,
+       .validate       = nft_fwd_validate,
        .offload        = nft_fwd_netdev_offload,
 };
 
index 1993af3..a7de3a5 100644 (file)
@@ -129,6 +129,7 @@ static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
        [NFTA_PAYLOAD_LEN]              = { .type = NLA_U32 },
        [NFTA_PAYLOAD_CSUM_TYPE]        = { .type = NLA_U32 },
        [NFTA_PAYLOAD_CSUM_OFFSET]      = { .type = NLA_U32 },
+       [NFTA_PAYLOAD_CSUM_FLAGS]       = { .type = NLA_U32 },
 };
 
 static int nft_payload_init(const struct nft_ctx *ctx,
index 4fc0c92..ef7e8ad 100644 (file)
@@ -1098,21 +1098,41 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
        struct nft_pipapo_field *f;
        int i, bsize_max, err = 0;
 
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END))
+               end = (const u8 *)nft_set_ext_key_end(ext)->data;
+       else
+               end = start;
+
        dup = pipapo_get(net, set, start, genmask);
-       if (PTR_ERR(dup) == -ENOENT) {
-               if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END)) {
-                       end = (const u8 *)nft_set_ext_key_end(ext)->data;
-                       dup = pipapo_get(net, set, end, nft_genmask_next(net));
-               } else {
-                       end = start;
+       if (!IS_ERR(dup)) {
+               /* Check if we already have the same exact entry */
+               const struct nft_data *dup_key, *dup_end;
+
+               dup_key = nft_set_ext_key(&dup->ext);
+               if (nft_set_ext_exists(&dup->ext, NFT_SET_EXT_KEY_END))
+                       dup_end = nft_set_ext_key_end(&dup->ext);
+               else
+                       dup_end = dup_key;
+
+               if (!memcmp(start, dup_key->data, sizeof(*dup_key->data)) &&
+                   !memcmp(end, dup_end->data, sizeof(*dup_end->data))) {
+                       *ext2 = &dup->ext;
+                       return -EEXIST;
                }
+
+               return -ENOTEMPTY;
+       }
+
+       if (PTR_ERR(dup) == -ENOENT) {
+               /* Look for partially overlapping entries */
+               dup = pipapo_get(net, set, end, nft_genmask_next(net));
        }
 
        if (PTR_ERR(dup) != -ENOENT) {
                if (IS_ERR(dup))
                        return PTR_ERR(dup);
                *ext2 = &dup->ext;
-               return -EEXIST;
+               return -ENOTEMPTY;
        }
 
        /* Validate */
index 5000b93..8617fc1 100644 (file)
@@ -33,6 +33,11 @@ static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
               (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
 }
 
+static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
+{
+       return !nft_rbtree_interval_end(rbe);
+}
+
 static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
                             const struct nft_rbtree_elem *interval)
 {
@@ -64,7 +69,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
                        if (interval &&
                            nft_rbtree_equal(set, this, interval) &&
                            nft_rbtree_interval_end(rbe) &&
-                           !nft_rbtree_interval_end(interval))
+                           nft_rbtree_interval_start(interval))
                                continue;
                        interval = rbe;
                } else if (d > 0)
@@ -89,7 +94,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
 
        if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
            nft_set_elem_active(&interval->ext, genmask) &&
-           !nft_rbtree_interval_end(interval)) {
+           nft_rbtree_interval_start(interval)) {
                *ext = &interval->ext;
                return true;
        }
@@ -208,8 +213,43 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
        u8 genmask = nft_genmask_next(net);
        struct nft_rbtree_elem *rbe;
        struct rb_node *parent, **p;
+       bool overlap = false;
        int d;
 
+       /* Detect overlaps as we descend the tree. Set the flag in these cases:
+        *
+        * a1. |__ _ _?  >|__ _ _  (insert start after existing start)
+        * a2. _ _ __>|  ?_ _ __|  (insert end before existing end)
+        * a3. _ _ ___|  ?_ _ _>|  (insert end after existing end)
+        * a4. >|__ _ _   _ _ __|  (insert start before existing end)
+        *
+        * and clear it later on, as we eventually reach the points indicated by
+        * '?' above, in the cases described below. We'll always meet these
+        * later, locally, due to tree ordering, and overlaps for the intervals
+        * that are the closest together are always evaluated last.
+        *
+        * b1. |__ _ _!  >|__ _ _  (insert start after existing end)
+        * b2. _ _ __>|  !_ _ __|  (insert end before existing start)
+        * b3. !_____>|            (insert end after existing start)
+        *
+        * Case a4. resolves to b1.:
+        * - if the inserted start element is the leftmost, because the '0'
+        *   element in the tree serves as end element
+        * - otherwise, if an existing end is found. Note that end elements are
+        *   always inserted after corresponding start elements.
+        *
+        * For a new, rightmost pair of elements, we'll hit cases b1. and b3.,
+        * in that order.
+        *
+        * The flag is also cleared in two special cases:
+        *
+        * b4. |__ _ _!|<_ _ _   (insert start right before existing end)
+        * b5. |__ _ >|!__ _ _   (insert end right after existing start)
+        *
+        * which always happen as last step and imply that no further
+        * overlapping is possible.
+        */
+
        parent = NULL;
        p = &priv->root.rb_node;
        while (*p != NULL) {
@@ -218,17 +258,42 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
                d = memcmp(nft_set_ext_key(&rbe->ext),
                           nft_set_ext_key(&new->ext),
                           set->klen);
-               if (d < 0)
+               if (d < 0) {
                        p = &parent->rb_left;
-               else if (d > 0)
+
+                       if (nft_rbtree_interval_start(new)) {
+                               overlap = nft_rbtree_interval_start(rbe) &&
+                                         nft_set_elem_active(&rbe->ext,
+                                                             genmask);
+                       } else {
+                               overlap = nft_rbtree_interval_end(rbe) &&
+                                         nft_set_elem_active(&rbe->ext,
+                                                             genmask);
+                       }
+               } else if (d > 0) {
                        p = &parent->rb_right;
-               else {
+
+                       if (nft_rbtree_interval_end(new)) {
+                               overlap = nft_rbtree_interval_end(rbe) &&
+                                         nft_set_elem_active(&rbe->ext,
+                                                             genmask);
+                       } else if (nft_rbtree_interval_end(rbe) &&
+                                  nft_set_elem_active(&rbe->ext, genmask)) {
+                               overlap = true;
+                       }
+               } else {
                        if (nft_rbtree_interval_end(rbe) &&
-                           !nft_rbtree_interval_end(new)) {
+                           nft_rbtree_interval_start(new)) {
                                p = &parent->rb_left;
-                       } else if (!nft_rbtree_interval_end(rbe) &&
+
+                               if (nft_set_elem_active(&rbe->ext, genmask))
+                                       overlap = false;
+                       } else if (nft_rbtree_interval_start(rbe) &&
                                   nft_rbtree_interval_end(new)) {
                                p = &parent->rb_right;
+
+                               if (nft_set_elem_active(&rbe->ext, genmask))
+                                       overlap = false;
                        } else if (nft_set_elem_active(&rbe->ext, genmask)) {
                                *ext = &rbe->ext;
                                return -EEXIST;
@@ -237,6 +302,10 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
                        }
                }
        }
+
+       if (overlap)
+               return -ENOTEMPTY;
+
        rb_link_node_rcu(&new->node, parent, p);
        rb_insert_color(&new->node, &priv->root);
        return 0;
@@ -317,10 +386,10 @@ static void *nft_rbtree_deactivate(const struct net *net,
                        parent = parent->rb_right;
                else {
                        if (nft_rbtree_interval_end(rbe) &&
-                           !nft_rbtree_interval_end(this)) {
+                           nft_rbtree_interval_start(this)) {
                                parent = parent->rb_left;
                                continue;
-                       } else if (!nft_rbtree_interval_end(rbe) &&
+                       } else if (nft_rbtree_interval_start(rbe) &&
                                   nft_rbtree_interval_end(this)) {
                                parent = parent->rb_right;
                                continue;
index 4c3f2e2..764e886 100644 (file)
@@ -339,6 +339,8 @@ static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] =
        [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
        [NFTA_TUNNEL_KEY_TOS]   = { .type = NLA_U8, },
        [NFTA_TUNNEL_KEY_TTL]   = { .type = NLA_U8, },
+       [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
+       [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
        [NFTA_TUNNEL_KEY_OPTS]  = { .type = NLA_NESTED, },
 };
 
index e27c6c5..cd2b034 100644 (file)
@@ -1551,6 +1551,9 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
        uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
        struct nf_mttg_trav *trav = seq->private;
 
+       if (ppos != NULL)
+               ++(*ppos);
+
        switch (trav->class) {
        case MTTG_TRAV_INIT:
                trav->class = MTTG_TRAV_NFP_UNSPEC;
@@ -1576,9 +1579,6 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
        default:
                return NULL;
        }
-
-       if (ppos != NULL)
-               ++*ppos;
        return trav;
 }
 
index 0a97080..225a7ab 100644 (file)
@@ -492,12 +492,12 @@ static void *recent_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        const struct recent_entry *e = v;
        const struct list_head *head = e->list.next;
 
+       (*pos)++;
        while (head == &t->iphash[st->bucket]) {
                if (++st->bucket >= ip_list_hash_size)
                        return NULL;
                head = t->iphash[st->bucket].next;
        }
-       (*pos)++;
        return list_entry(head, struct recent_entry, list);
 }
 
index edf3e28..2f23479 100644 (file)
@@ -2392,19 +2392,14 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
        if (nlk_has_extack && extack && extack->_msg)
                tlvlen += nla_total_size(strlen(extack->_msg) + 1);
 
-       if (err) {
-               if (!(nlk->flags & NETLINK_F_CAP_ACK))
-                       payload += nlmsg_len(nlh);
-               else
-                       flags |= NLM_F_CAPPED;
-               if (nlk_has_extack && extack && extack->bad_attr)
-                       tlvlen += nla_total_size(sizeof(u32));
-       } else {
+       if (err && !(nlk->flags & NETLINK_F_CAP_ACK))
+               payload += nlmsg_len(nlh);
+       else
                flags |= NLM_F_CAPPED;
-
-               if (nlk_has_extack && extack && extack->cookie_len)
-                       tlvlen += nla_total_size(extack->cookie_len);
-       }
+       if (err && nlk_has_extack && extack && extack->bad_attr)
+               tlvlen += nla_total_size(sizeof(u32));
+       if (nlk_has_extack && extack && extack->cookie_len)
+               tlvlen += nla_total_size(extack->cookie_len);
 
        if (tlvlen)
                flags |= NLM_F_ACK_TLVS;
@@ -2427,20 +2422,16 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
                        WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG,
                                               extack->_msg));
                }
-               if (err) {
-                       if (extack->bad_attr &&
-                           !WARN_ON((u8 *)extack->bad_attr < in_skb->data ||
-                                    (u8 *)extack->bad_attr >= in_skb->data +
-                                                              in_skb->len))
-                               WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
-                                                   (u8 *)extack->bad_attr -
-                                                   in_skb->data));
-               } else {
-                       if (extack->cookie_len)
-                               WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
-                                               extack->cookie_len,
-                                               extack->cookie));
-               }
+               if (err && extack->bad_attr &&
+                   !WARN_ON((u8 *)extack->bad_attr < in_skb->data ||
+                            (u8 *)extack->bad_attr >= in_skb->data +
+                                                      in_skb->len))
+                       WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
+                                           (u8 *)extack->bad_attr -
+                                           (u8 *)nlh));
+               if (extack->cookie_len)
+                       WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
+                                       extack->cookie_len, extack->cookie));
        }
 
        nlmsg_end(skb, rep);
index 6f1b096..43811b5 100644 (file)
@@ -181,13 +181,20 @@ exit:
 void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
                          struct sk_buff *skb)
 {
-       u8 gate = hdev->pipes[pipe].gate;
        u8 status = NFC_HCI_ANY_OK;
        struct hci_create_pipe_resp *create_info;
        struct hci_delete_pipe_noti *delete_info;
        struct hci_all_pipe_cleared_noti *cleared_info;
+       u8 gate;
 
-       pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd);
+       pr_debug("from pipe %x cmd %x\n", pipe, cmd);
+
+       if (pipe >= NFC_HCI_MAX_PIPES) {
+               status = NFC_HCI_ANY_E_NOK;
+               goto exit;
+       }
+
+       gate = hdev->pipes[pipe].gate;
 
        switch (cmd) {
        case NFC_HCI_ADM_NOTIFY_PIPE_CREATED:
@@ -375,8 +382,14 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
                            struct sk_buff *skb)
 {
        int r = 0;
-       u8 gate = hdev->pipes[pipe].gate;
+       u8 gate;
+
+       if (pipe >= NFC_HCI_MAX_PIPES) {
+               pr_err("Discarded event %x to invalid pipe %x\n", event, pipe);
+               goto exit;
+       }
 
+       gate = hdev->pipes[pipe].gate;
        if (gate == NFC_HCI_INVALID_GATE) {
                pr_err("Discarded event %x to unopened pipe %x\n", event, pipe);
                goto exit;
index eee0ddd..e894254 100644 (file)
@@ -32,6 +32,7 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
        [NFC_ATTR_DEVICE_NAME] = { .type = NLA_STRING,
                                .len = NFC_DEVICE_NAME_MAXSIZE },
        [NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 },
+       [NFC_ATTR_TARGET_INDEX] = { .type = NLA_U32 },
        [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 },
        [NFC_ATTR_RF_MODE] = { .type = NLA_U8 },
        [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 },
@@ -43,7 +44,10 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
        [NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED },
        [NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING,
                                     .len = NFC_FIRMWARE_NAME_MAXSIZE },
+       [NFC_ATTR_SE_INDEX] = { .type = NLA_U32 },
        [NFC_ATTR_SE_APDU] = { .type = NLA_BINARY },
+       [NFC_ATTR_VENDOR_ID] = { .type = NLA_U32 },
+       [NFC_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 },
        [NFC_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
 
 };
index c047afd..07a7dd1 100644 (file)
@@ -645,6 +645,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
        [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
        [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
        [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
+       [OVS_PACKET_ATTR_HASH] = { .type = NLA_U64 },
 };
 
 static const struct genl_ops dp_packet_genl_ops[] = {
index 30c6879..29bd405 100644 (file)
@@ -2173,6 +2173,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        struct timespec64 ts;
        __u32 ts_status;
        bool is_drop_n_account = false;
+       unsigned int slot_id = 0;
        bool do_vnet = false;
 
        /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
@@ -2274,6 +2275,20 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                                        TP_STATUS_KERNEL, (macoff+snaplen));
        if (!h.raw)
                goto drop_n_account;
+
+       if (po->tp_version <= TPACKET_V2) {
+               slot_id = po->rx_ring.head;
+               if (test_bit(slot_id, po->rx_ring.rx_owner_map))
+                       goto drop_n_account;
+               __set_bit(slot_id, po->rx_ring.rx_owner_map);
+       }
+
+       if (do_vnet &&
+           virtio_net_hdr_from_skb(skb, h.raw + macoff -
+                                   sizeof(struct virtio_net_hdr),
+                                   vio_le(), true, 0))
+               goto drop_n_account;
+
        if (po->tp_version <= TPACKET_V2) {
                packet_increment_rx_head(po, &po->rx_ring);
        /*
@@ -2286,12 +2301,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                        status |= TP_STATUS_LOSING;
        }
 
-       if (do_vnet &&
-           virtio_net_hdr_from_skb(skb, h.raw + macoff -
-                                   sizeof(struct virtio_net_hdr),
-                                   vio_le(), true, 0))
-               goto drop_n_account;
-
        po->stats.stats1.tp_packets++;
        if (copy_skb) {
                status |= TP_STATUS_COPY;
@@ -2379,7 +2388,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 #endif
 
        if (po->tp_version <= TPACKET_V2) {
+               spin_lock(&sk->sk_receive_queue.lock);
                __packet_set_status(po, h.raw, status);
+               __clear_bit(slot_id, po->rx_ring.rx_owner_map);
+               spin_unlock(&sk->sk_receive_queue.lock);
                sk->sk_data_ready(sk);
        } else {
                prb_clear_blk_fill_status(&po->rx_ring);
@@ -4276,6 +4288,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 {
        struct pgv *pg_vec = NULL;
        struct packet_sock *po = pkt_sk(sk);
+       unsigned long *rx_owner_map = NULL;
        int was_running, order = 0;
        struct packet_ring_buffer *rb;
        struct sk_buff_head *rb_queue;
@@ -4361,6 +4374,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                        }
                        break;
                default:
+                       if (!tx_ring) {
+                               rx_owner_map = bitmap_alloc(req->tp_frame_nr,
+                                       GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+                               if (!rx_owner_map)
+                                       goto out_free_pg_vec;
+                       }
                        break;
                }
        }
@@ -4390,6 +4409,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                err = 0;
                spin_lock_bh(&rb_queue->lock);
                swap(rb->pg_vec, pg_vec);
+               if (po->tp_version <= TPACKET_V2)
+                       swap(rb->rx_owner_map, rx_owner_map);
                rb->frame_max = (req->tp_frame_nr - 1);
                rb->head = 0;
                rb->frame_size = req->tp_frame_size;
@@ -4421,6 +4442,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
        }
 
 out_free_pg_vec:
+       bitmap_free(rx_owner_map);
        if (pg_vec)
                free_pg_vec(pg_vec, order, req->tp_block_nr);
 out:
index 82fb2b1..907f4cd 100644 (file)
@@ -70,7 +70,10 @@ struct packet_ring_buffer {
 
        unsigned int __percpu   *pending_refcnt;
 
-       struct tpacket_kbdq_core        prb_bdqc;
+       union {
+               unsigned long                   *rx_owner_map;
+               struct tpacket_kbdq_core        prb_bdqc;
+       };
 };
 
 extern struct mutex fanout_mutex;
index fe42f98..15ee92d 100644 (file)
@@ -285,7 +285,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
                                           gfp_t gfp,
                                           rxrpc_notify_rx_t notify_rx,
                                           bool upgrade,
-                                          bool intr,
+                                          enum rxrpc_interruptibility interruptibility,
                                           unsigned int debug_id)
 {
        struct rxrpc_conn_parameters cp;
@@ -310,7 +310,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
        memset(&p, 0, sizeof(p));
        p.user_call_ID = user_call_ID;
        p.tx_total_len = tx_total_len;
-       p.intr = intr;
+       p.interruptibility = interruptibility;
 
        memset(&cp, 0, sizeof(cp));
        cp.local                = rx->local;
@@ -371,45 +371,18 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
  * rxrpc_kernel_check_life - Check to see whether a call is still alive
  * @sock: The socket the call is on
  * @call: The call to check
- * @_life: Where to store the life value
  *
- * Allow a kernel service to find out whether a call is still alive - ie. we're
- * getting ACKs from the server.  Passes back in *_life a number representing
- * the life state which can be compared to that returned by a previous call and
- * return true if the call is still alive.
- *
- * If the life state stalls, rxrpc_kernel_probe_life() should be called and
- * then 2RTT waited.
+ * Allow a kernel service to find out whether a call is still alive -
+ * ie. whether it has completed.
  */
 bool rxrpc_kernel_check_life(const struct socket *sock,
-                            const struct rxrpc_call *call,
-                            u32 *_life)
+                            const struct rxrpc_call *call)
 {
-       *_life = call->acks_latest;
        return call->state != RXRPC_CALL_COMPLETE;
 }
 EXPORT_SYMBOL(rxrpc_kernel_check_life);
 
 /**
- * rxrpc_kernel_probe_life - Poke the peer to see if it's still alive
- * @sock: The socket the call is on
- * @call: The call to check
- *
- * In conjunction with rxrpc_kernel_check_life(), allow a kernel service to
- * find out whether a call is still alive by pinging it.  This should cause the
- * life state to be bumped in about 2*RTT.
- *
- * The must be called in TASK_RUNNING state on pain of might_sleep() objecting.
- */
-void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call)
-{
-       rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
-                         rxrpc_propose_ack_ping_for_check_life);
-       rxrpc_send_ack_packet(call, true, NULL);
-}
-EXPORT_SYMBOL(rxrpc_kernel_probe_life);
-
-/**
  * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call.
  * @sock: The socket the call is on
  * @call: The call to query
index 7d730c4..3eb1ab4 100644 (file)
@@ -489,7 +489,6 @@ enum rxrpc_call_flag {
        RXRPC_CALL_BEGAN_RX_TIMER,      /* We began the expect_rx_by timer */
        RXRPC_CALL_RX_HEARD,            /* The peer responded at least once to this call */
        RXRPC_CALL_RX_UNDERRUN,         /* Got data underrun */
-       RXRPC_CALL_IS_INTR,             /* The call is interruptible */
        RXRPC_CALL_DISCONNECTED,        /* The call has been disconnected */
 };
 
@@ -598,6 +597,7 @@ struct rxrpc_call {
        atomic_t                usage;
        u16                     service_id;     /* service ID */
        u8                      security_ix;    /* Security type */
+       enum rxrpc_interruptibility interruptibility; /* At what point call may be interrupted */
        u32                     call_id;        /* call ID on connection  */
        u32                     cid;            /* connection ID plus channel index */
        int                     debug_id;       /* debug ID for printks */
@@ -675,7 +675,6 @@ struct rxrpc_call {
 
        /* transmission-phase ACK management */
        ktime_t                 acks_latest_ts; /* Timestamp of latest ACK received */
-       rxrpc_serial_t          acks_latest;    /* serial number of latest ACK received */
        rxrpc_seq_t             acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
        rxrpc_seq_t             acks_lost_top;  /* tx_top at the time lost-ack ping sent */
        rxrpc_serial_t          acks_lost_ping; /* Serial number of probe ACK */
@@ -721,7 +720,7 @@ struct rxrpc_call_params {
                u32             normal;         /* Max time since last call packet (msec) */
        } timeouts;
        u8                      nr_timeouts;    /* Number of timeouts specified */
-       bool                    intr;           /* The call is interruptible */
+       enum rxrpc_interruptibility interruptibility; /* How is interruptible is the call? */
 };
 
 struct rxrpc_send_params {
index c9f34b0..f079702 100644 (file)
@@ -237,8 +237,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
                return call;
        }
 
-       if (p->intr)
-               __set_bit(RXRPC_CALL_IS_INTR, &call->flags);
+       call->interruptibility = p->interruptibility;
        call->tx_total_len = p->tx_total_len;
        trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
                         atomic_read(&call->usage),
index ea7d4c2..f2a1a5d 100644 (file)
@@ -655,13 +655,20 @@ static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp)
 
                add_wait_queue_exclusive(&call->waitq, &myself);
                for (;;) {
-                       if (test_bit(RXRPC_CALL_IS_INTR, &call->flags))
+                       switch (call->interruptibility) {
+                       case RXRPC_INTERRUPTIBLE:
+                       case RXRPC_PREINTERRUPTIBLE:
                                set_current_state(TASK_INTERRUPTIBLE);
-                       else
+                               break;
+                       case RXRPC_UNINTERRUPTIBLE:
+                       default:
                                set_current_state(TASK_UNINTERRUPTIBLE);
+                               break;
+                       }
                        if (call->call_id)
                                break;
-                       if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) &&
+                       if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
+                            call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
                            signal_pending(current)) {
                                ret = -ERESTARTSYS;
                                break;
index ef10fbf..69e09d6 100644 (file)
@@ -882,7 +882,6 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
            before(prev_pkt, call->ackr_prev_seq))
                goto out;
        call->acks_latest_ts = skb->tstamp;
-       call->acks_latest = sp->hdr.serial;
 
        call->ackr_first_seq = first_soft_ack;
        call->ackr_prev_seq = prev_pkt;
index 813fd68..0fcf157 100644 (file)
 #include "ar-internal.h"
 
 /*
+ * Return true if there's sufficient Tx queue space.
+ */
+static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win)
+{
+       unsigned int win_size =
+               min_t(unsigned int, call->tx_winsize,
+                     call->cong_cwnd + call->cong_extra);
+       rxrpc_seq_t tx_win = READ_ONCE(call->tx_hard_ack);
+
+       if (_tx_win)
+               *_tx_win = tx_win;
+       return call->tx_top - tx_win < win_size;
+}
+
+/*
  * Wait for space to appear in the Tx queue or a signal to occur.
  */
 static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
@@ -26,9 +41,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
 {
        for (;;) {
                set_current_state(TASK_INTERRUPTIBLE);
-               if (call->tx_top - call->tx_hard_ack <
-                   min_t(unsigned int, call->tx_winsize,
-                         call->cong_cwnd + call->cong_extra))
+               if (rxrpc_check_tx_space(call, NULL))
                        return 0;
 
                if (call->state >= RXRPC_CALL_COMPLETE)
@@ -49,7 +62,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
  * Wait for space to appear in the Tx queue uninterruptibly, but with
  * a timeout of 2*RTT if no progress was made and a signal occurred.
  */
-static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
+static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx,
                                            struct rxrpc_call *call)
 {
        rxrpc_seq_t tx_start, tx_win;
@@ -58,8 +71,8 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
 
        rtt = READ_ONCE(call->peer->rtt);
        rtt2 = nsecs_to_jiffies64(rtt) * 2;
-       if (rtt2 < 1)
-               rtt2 = 1;
+       if (rtt2 < 2)
+               rtt2 = 2;
 
        timeout = rtt2;
        tx_start = READ_ONCE(call->tx_hard_ack);
@@ -68,16 +81,13 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
                set_current_state(TASK_UNINTERRUPTIBLE);
 
                tx_win = READ_ONCE(call->tx_hard_ack);
-               if (call->tx_top - tx_win <
-                   min_t(unsigned int, call->tx_winsize,
-                         call->cong_cwnd + call->cong_extra))
+               if (rxrpc_check_tx_space(call, &tx_win))
                        return 0;
 
                if (call->state >= RXRPC_CALL_COMPLETE)
                        return call->error;
 
-               if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) &&
-                   timeout == 0 &&
+               if (timeout == 0 &&
                    tx_win == tx_start && signal_pending(current))
                        return -EINTR;
 
@@ -92,6 +102,26 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
 }
 
 /*
+ * Wait for space to appear in the Tx queue uninterruptibly.
+ */
+static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
+                                           struct rxrpc_call *call,
+                                           long *timeo)
+{
+       for (;;) {
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               if (rxrpc_check_tx_space(call, NULL))
+                       return 0;
+
+               if (call->state >= RXRPC_CALL_COMPLETE)
+                       return call->error;
+
+               trace_rxrpc_transmit(call, rxrpc_transmit_wait);
+               *timeo = schedule_timeout(*timeo);
+       }
+}
+
+/*
  * wait for space to appear in the transmit/ACK window
  * - caller holds the socket locked
  */
@@ -108,10 +138,19 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
 
        add_wait_queue(&call->waitq, &myself);
 
-       if (waitall)
-               ret = rxrpc_wait_for_tx_window_nonintr(rx, call);
-       else
-               ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo);
+       switch (call->interruptibility) {
+       case RXRPC_INTERRUPTIBLE:
+               if (waitall)
+                       ret = rxrpc_wait_for_tx_window_waitall(rx, call);
+               else
+                       ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo);
+               break;
+       case RXRPC_PREINTERRUPTIBLE:
+       case RXRPC_UNINTERRUPTIBLE:
+       default:
+               ret = rxrpc_wait_for_tx_window_nonintr(rx, call, timeo);
+               break;
+       }
 
        remove_wait_queue(&call->waitq, &myself);
        set_current_state(TASK_RUNNING);
@@ -302,9 +341,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
 
                        _debug("alloc");
 
-                       if (call->tx_top - call->tx_hard_ack >=
-                           min_t(unsigned int, call->tx_winsize,
-                                 call->cong_cwnd + call->cong_extra)) {
+                       if (!rxrpc_check_tx_space(call, NULL)) {
                                ret = -EAGAIN;
                                if (msg->msg_flags & MSG_DONTWAIT)
                                        goto maybe_error;
@@ -619,7 +656,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                .call.tx_total_len      = -1,
                .call.user_call_ID      = 0,
                .call.nr_timeouts       = 0,
-               .call.intr              = true,
+               .call.interruptibility  = RXRPC_INTERRUPTIBLE,
                .abort_code             = 0,
                .command                = RXRPC_CMD_SEND_DATA,
                .exclusive              = false,
index f685c0d..41114b4 100644 (file)
@@ -739,7 +739,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
        if (goto_ch)
                tcf_chain_put_by_act(goto_ch);
        if (params)
-               kfree_rcu(params, rcu);
+               call_rcu(&params->rcu, tcf_ct_params_free);
        if (res == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
 
index 1ad300e..83dd82f 100644 (file)
@@ -284,10 +284,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
 
        /* mirror is always swallowed */
        if (is_redirect) {
-               skb2->tc_redirected = 1;
-               skb2->tc_from_ingress = skb2->tc_at_ingress;
-               if (skb2->tc_from_ingress)
-                       skb2->tstamp = 0;
+               skb_set_redirected(skb2, skb2->tc_at_ingress);
+
                /* let's the caller reinsert the packet, if possible */
                if (use_reinsert) {
                        res->ingress = want_ingress;
index 6f8786b..5efa3e7 100644 (file)
@@ -534,8 +534,8 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
                        fp = &b->ht[h];
                        for (pfp = rtnl_dereference(*fp); pfp;
                             fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
-                               if (pfp == f) {
-                                       *fp = f->next;
+                               if (pfp == fold) {
+                                       rcu_assign_pointer(*fp, fold->next);
                                        break;
                                }
                        }
index 09b7dc5..9904299 100644 (file)
@@ -261,8 +261,10 @@ static void tcindex_partial_destroy_work(struct work_struct *work)
                                              struct tcindex_data,
                                              rwork);
 
+       rtnl_lock();
        kfree(p->perfect);
        kfree(p);
+       rtnl_unlock();
 }
 
 static void tcindex_free_perfect_hash(struct tcindex_data *cp)
@@ -357,6 +359,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
 
                if (tcindex_alloc_perfect_hash(net, cp) < 0)
                        goto errout;
+               cp->alloc_hash = cp->hash;
                for (i = 0; i < min(cp->hash, p->hash); i++)
                        cp->perfect[i].res = p->perfect[i].res;
                balloc = 1;
index b2905b0..2eaac2f 100644 (file)
@@ -181,6 +181,11 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
        s64 credits;
        int len;
 
+       /* The previous packet is still being sent */
+       if (now < q->last) {
+               qdisc_watchdog_schedule_ns(&q->watchdog, q->last);
+               return NULL;
+       }
        if (q->credits < 0) {
                credits = timediff_to_credits(now - q->last, q->idleslope);
 
@@ -212,7 +217,12 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
        credits += q->credits;
 
        q->credits = max_t(s64, credits, q->locredit);
-       q->last = now;
+       /* Estimate of the transmission of the last byte of the packet in ns */
+       if (unlikely(atomic64_read(&q->port_rate) == 0))
+               q->last = now;
+       else
+               q->last = now + div64_s64(len * NSEC_PER_SEC,
+                                         atomic64_read(&q->port_rate));
 
        return skb;
 }
index a5a2954..371ad84 100644 (file)
@@ -744,6 +744,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
        [TCA_FQ_FLOW_MAX_RATE]          = { .type = NLA_U32 },
        [TCA_FQ_BUCKETS_LOG]            = { .type = NLA_U32 },
        [TCA_FQ_FLOW_REFILL_DELAY]      = { .type = NLA_U32 },
+       [TCA_FQ_ORPHAN_MASK]            = { .type = NLA_U32 },
        [TCA_FQ_LOW_RATE_THRESHOLD]     = { .type = NLA_U32 },
        [TCA_FQ_CE_THRESHOLD]           = { .type = NLA_U32 },
 };
index 660fc45..b1eb12d 100644 (file)
@@ -564,8 +564,10 @@ static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
                prio = skb->priority;
                tc = netdev_get_prio_tc_map(dev, prio);
 
-               if (!(gate_mask & BIT(tc)))
+               if (!(gate_mask & BIT(tc))) {
+                       skb = NULL;
                        continue;
+               }
 
                len = qdisc_pkt_len(skb);
                guard = ktime_add_ns(taprio_get_time(q),
@@ -575,13 +577,17 @@ static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
                 * guard band ...
                 */
                if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
-                   ktime_after(guard, entry->close_time))
+                   ktime_after(guard, entry->close_time)) {
+                       skb = NULL;
                        continue;
+               }
 
                /* ... and no budget. */
                if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
-                   atomic_sub_return(len, &entry->budget) < 0)
+                   atomic_sub_return(len, &entry->budget) < 0) {
+                       skb = NULL;
                        continue;
+               }
 
                skb = child->ops->dequeue(child);
                if (unlikely(!skb))
@@ -768,6 +774,7 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
        [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]           = { .type = NLA_S64 },
        [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
        [TCA_TAPRIO_ATTR_FLAGS]                      = { .type = NLA_U32 },
+       [TCA_TAPRIO_ATTR_TXTIME_DELAY]               = { .type = NLA_U32 },
 };
 
 static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
index 8a15146..1069d7a 100644 (file)
@@ -237,15 +237,11 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
                addrcnt++;
 
        return    nla_total_size(sizeof(struct sctp_info))
-               + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
-               + nla_total_size(1) /* INET_DIAG_TOS */
-               + nla_total_size(1) /* INET_DIAG_TCLASS */
-               + nla_total_size(4) /* INET_DIAG_MARK */
-               + nla_total_size(4) /* INET_DIAG_CLASS_ID */
                + nla_total_size(addrlen * asoc->peer.transport_count)
                + nla_total_size(addrlen * addrcnt)
-               + nla_total_size(sizeof(struct inet_diag_meminfo))
                + nla_total_size(sizeof(struct inet_diag_msg))
+               + inet_diag_msg_attrs_size()
+               + nla_total_size(sizeof(struct inet_diag_meminfo))
                + 64;
 }
 
index d6ba186..05b825b 100644 (file)
@@ -582,6 +582,7 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
        smc_smcr_terminate_all(smcibdev);
        smc_ib_cleanup_per_ibdev(smcibdev);
        ib_unregister_event_handler(&smcibdev->event_handler);
+       cancel_work_sync(&smcibdev->port_event_work);
        kfree(smcibdev);
 }
 
index b79a05d..2eecf15 100644 (file)
@@ -1707,7 +1707,8 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
 
 int __sys_accept4_file(struct file *file, unsigned file_flags,
                       struct sockaddr __user *upeer_sockaddr,
-                      int __user *upeer_addrlen, int flags)
+                      int __user *upeer_addrlen, int flags,
+                      unsigned long nofile)
 {
        struct socket *sock, *newsock;
        struct file *newfile;
@@ -1738,7 +1739,7 @@ int __sys_accept4_file(struct file *file, unsigned file_flags,
         */
        __module_get(newsock->ops->owner);
 
-       newfd = get_unused_fd_flags(flags);
+       newfd = __get_unused_fd_flags(flags, nofile);
        if (unlikely(newfd < 0)) {
                err = newfd;
                sock_release(newsock);
@@ -1807,7 +1808,8 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
        f = fdget(fd);
        if (f.file) {
                ret = __sys_accept4_file(f.file, 0, upeer_sockaddr,
-                                               upeer_addrlen, flags);
+                                               upeer_addrlen, flags,
+                                               rlimit(RLIMIT_NOFILE));
                if (f.flags)
                        fput(f.file);
        }
index 7c35094..bb98624 100644 (file)
@@ -116,6 +116,7 @@ const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
        [TIPC_NLA_PROP_PRIO]            = { .type = NLA_U32 },
        [TIPC_NLA_PROP_TOL]             = { .type = NLA_U32 },
        [TIPC_NLA_PROP_WIN]             = { .type = NLA_U32 },
+       [TIPC_NLA_PROP_MTU]             = { .type = NLA_U32 },
        [TIPC_NLA_PROP_BROADCAST]       = { .type = NLA_U32 },
        [TIPC_NLA_PROP_BROADCAST_RATIO] = { .type = NLA_U32 }
 };
index 5b19e9f..f0af23c 100644 (file)
@@ -470,6 +470,8 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED },
        [NL80211_ATTR_STA_PLINK_STATE] =
                NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_STATES - 1),
+       [NL80211_ATTR_MEASUREMENT_DURATION] = { .type = NLA_U16 },
+       [NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY] = { .type = NLA_FLAG },
        [NL80211_ATTR_MESH_PEER_AID] =
                NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID),
        [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 },
@@ -531,6 +533,8 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_MDID] = { .type = NLA_U16 },
        [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
                                  .len = IEEE80211_MAX_DATA_LEN },
+       [NL80211_ATTR_CRIT_PROT_ID] = { .type = NLA_U16 },
+       [NL80211_ATTR_MAX_CRIT_PROT_DURATION] = { .type = NLA_U16 },
        [NL80211_ATTR_PEER_AID] =
                NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID),
        [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
@@ -561,6 +565,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
                NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_UPS - 1),
        [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 },
        [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
+       [NL80211_ATTR_OPER_CLASS] = { .type = NLA_U8 },
        [NL80211_ATTR_MAC_MASK] = {
                .type = NLA_EXACT_LEN_WARN,
                .len = ETH_ALEN
@@ -16411,7 +16416,7 @@ void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
                goto nla_put_failure;
 
        if ((sta_opmode->changed & STA_OPMODE_MAX_BW_CHANGED) &&
-           nla_put_u8(msg, NL80211_ATTR_CHANNEL_WIDTH, sta_opmode->bw))
+           nla_put_u32(msg, NL80211_ATTR_CHANNEL_WIDTH, sta_opmode->bw))
                goto nla_put_failure;
 
        if ((sta_opmode->changed & STA_OPMODE_N_SS_CHANGED) &&
index aef240f..328402a 100644 (file)
@@ -2022,7 +2022,11 @@ void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev,
 
        spin_lock_bh(&rdev->bss_lock);
 
-       if (WARN_ON(cbss->pub.channel == chan))
+       /*
+        * Some APs use CSA also for bandwidth changes, i.e., without actually
+        * changing the control channel, so no need to update in such a case.
+        */
+       if (cbss->pub.channel == chan)
                goto done;
 
        /* use transmitting bss */
index 50f567a..e2db468 100644 (file)
@@ -78,8 +78,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
        int err;
        unsigned long flags;
        struct xfrm_state *x;
-       struct sk_buff *skb2, *nskb;
        struct softnet_data *sd;
+       struct sk_buff *skb2, *nskb, *pskb = NULL;
        netdev_features_t esp_features = features;
        struct xfrm_offload *xo = xfrm_offload(skb);
        struct sec_path *sp;
@@ -168,14 +168,14 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
                } else {
                        if (skb == skb2)
                                skb = nskb;
-
-                       if (!skb)
-                               return NULL;
+                       else
+                               pskb->next = nskb;
 
                        continue;
                }
 
                skb_push(skb2, skb2->data - skb_mac_header(skb2));
+               pskb = skb2;
        }
 
        return skb;
@@ -383,6 +383,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
                return xfrm_dev_feat_change(dev);
 
        case NETDEV_DOWN:
+       case NETDEV_UNREGISTER:
                return xfrm_dev_down(dev);
        }
        return NOTIFY_DONE;
index dbda08e..8a4af86 100644 (file)
@@ -434,7 +434,9 @@ EXPORT_SYMBOL(xfrm_policy_destroy);
 
 static void xfrm_policy_kill(struct xfrm_policy *policy)
 {
+       write_lock_bh(&policy->lock);
        policy->walk.dead = 1;
+       write_unlock_bh(&policy->lock);
 
        atomic_inc(&policy->genid);
 
index b88ba45..e6cfaa6 100644 (file)
@@ -110,7 +110,8 @@ static inline int verify_sec_ctx_len(struct nlattr **attrs)
                return 0;
 
        uctx = nla_data(rt);
-       if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
+       if (uctx->len > nla_len(rt) ||
+           uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
                return -EINVAL;
 
        return 0;
@@ -2275,6 +2276,9 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
        err = verify_newpolicy_info(&ua->policy);
        if (err)
                goto free_state;
+       err = verify_sec_ctx_len(attrs);
+       if (err)
+               goto free_state;
 
        /*   build an XP */
        xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
index 85334dc..496d11c 100644 (file)
@@ -44,3 +44,10 @@ $(error-if,$(success, $(LD) -v | grep -q gold), gold linker '$(LD)' not supporte
 
 # gcc version including patch level
 gcc-version := $(shell,$(srctree)/scripts/gcc-version.sh $(CC))
+
+# machine bit flags
+#  $(m32-flag): -m32 if the compiler supports it, or an empty string otherwise.
+#  $(m64-flag): -m64 if the compiler supports it, or an empty string otherwise.
+cc-option-bit = $(if-success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null,$(1))
+m32-flag := $(cc-option-bit,-m32)
+m64-flag := $(cc-option-bit,-m64)
index ecddf83..ca08f2f 100644 (file)
@@ -48,6 +48,7 @@ KBUILD_CFLAGS += -Wno-initializer-overrides
 KBUILD_CFLAGS += -Wno-format
 KBUILD_CFLAGS += -Wno-sign-compare
 KBUILD_CFLAGS += -Wno-format-zero-length
+KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
 endif
 
 endif
index 5c6c3fd..b3b7270 100644 (file)
@@ -23,7 +23,6 @@ LINECOMMENT   "//".*\n
 #include "srcpos.h"
 #include "dtc-parser.tab.h"
 
-YYLTYPE yylloc;
 extern bool treesource_error;
 
 /* CAUTION: this will stop working if we ever use yyless() or yyunput() */
index 548330e..feb3d55 100755 (executable)
@@ -94,7 +94,7 @@ if (defined $opt{'o'}) {
 #
 while ( <$module_symvers> ) {
        chomp;
-       my (undef, $symbol, $namespace, $module, $gpl) = split('\t');
+       my (undef, $symbol, $module, $gpl, $namespace) = split('\t');
        $SYMBOL { $symbol } =  [ $module , "0" , $symbol, $gpl];
 }
 close($module_symvers);
index 0133dfa..3e8dea6 100644 (file)
@@ -195,13 +195,13 @@ static struct sym_entry *read_symbol(FILE *in)
                return NULL;
        }
 
-       if (is_ignored_symbol(name, type))
-               return NULL;
-
-       /* Ignore most absolute/undefined (?) symbols. */
        if (strcmp(name, "_text") == 0)
                _text = addr;
 
+       /* Ignore most absolute/undefined (?) symbols. */
+       if (is_ignored_symbol(name, type))
+               return NULL;
+
        check_symbol_range(name, addr, text_ranges, ARRAY_SIZE(text_ranges));
        check_symbol_range(name, addr, &percpu_range, 1);
 
index 054405b..d3c237b 100644 (file)
@@ -145,6 +145,13 @@ int main(void)
        DEVID(i2c_device_id);
        DEVID_FIELD(i2c_device_id, name);
 
+       DEVID(i3c_device_id);
+       DEVID_FIELD(i3c_device_id, match_flags);
+       DEVID_FIELD(i3c_device_id, dcr);
+       DEVID_FIELD(i3c_device_id, manuf_id);
+       DEVID_FIELD(i3c_device_id, part_id);
+       DEVID_FIELD(i3c_device_id, extra_info);
+
        DEVID(spi_device_id);
        DEVID_FIELD(spi_device_id, name);
 
index c91eba7..f81cbe0 100644 (file)
@@ -919,6 +919,24 @@ static int do_i2c_entry(const char *filename, void *symval,
        return 1;
 }
 
+static int do_i3c_entry(const char *filename, void *symval,
+                       char *alias)
+{
+       DEF_FIELD(symval, i3c_device_id, match_flags);
+       DEF_FIELD(symval, i3c_device_id, dcr);
+       DEF_FIELD(symval, i3c_device_id, manuf_id);
+       DEF_FIELD(symval, i3c_device_id, part_id);
+       DEF_FIELD(symval, i3c_device_id, extra_info);
+
+       strcpy(alias, "i3c:");
+       ADD(alias, "dcr", match_flags & I3C_MATCH_DCR, dcr);
+       ADD(alias, "manuf", match_flags & I3C_MATCH_MANUF, manuf_id);
+       ADD(alias, "part", match_flags & I3C_MATCH_PART, part_id);
+       ADD(alias, "ext", match_flags & I3C_MATCH_EXTRA_INFO, extra_info);
+
+       return 1;
+}
+
 /* Looks like: spi:S */
 static int do_spi_entry(const char *filename, void *symval,
                        char *alias)
@@ -1386,6 +1404,7 @@ static const struct devtable devtable[] = {
        {"vmbus", SIZE_hv_vmbus_device_id, do_vmbus_entry},
        {"rpmsg", SIZE_rpmsg_device_id, do_rpmsg_entry},
        {"i2c", SIZE_i2c_device_id, do_i2c_entry},
+       {"i3c", SIZE_i3c_device_id, do_i3c_entry},
        {"spi", SIZE_spi_device_id, do_spi_entry},
        {"dmi", SIZE_dmi_system_id, do_dmi_entry},
        {"platform", SIZE_platform_device_id, do_platform_entry},
index 7edfdb2..55a0a2e 100644 (file)
@@ -308,7 +308,8 @@ static const char *sec_name(struct elf_info *elf, int secindex)
 
 static void *sym_get_data(const struct elf_info *info, const Elf_Sym *sym)
 {
-       Elf_Shdr *sechdr = &info->sechdrs[sym->st_shndx];
+       unsigned int secindex = get_secindex(info, sym);
+       Elf_Shdr *sechdr = &info->sechdrs[secindex];
        unsigned long offset;
 
        offset = sym->st_value;
@@ -2427,7 +2428,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
 }
 
 /* parse Module.symvers file. line format:
- * 0x12345678<tab>symbol<tab>module[[<tab>export]<tab>something]
+ * 0x12345678<tab>symbol<tab>module<tab>export<tab>namespace
  **/
 static void read_dump(const char *fname, unsigned int kernel)
 {
@@ -2440,7 +2441,7 @@ static void read_dump(const char *fname, unsigned int kernel)
                return;
 
        while ((line = get_next_line(&pos, file, size))) {
-               char *symname, *namespace, *modname, *d, *export, *end;
+               char *symname, *namespace, *modname, *d, *export;
                unsigned int crc;
                struct module *mod;
                struct symbol *s;
@@ -2448,16 +2449,16 @@ static void read_dump(const char *fname, unsigned int kernel)
                if (!(symname = strchr(line, '\t')))
                        goto fail;
                *symname++ = '\0';
-               if (!(namespace = strchr(symname, '\t')))
-                       goto fail;
-               *namespace++ = '\0';
-               if (!(modname = strchr(namespace, '\t')))
+               if (!(modname = strchr(symname, '\t')))
                        goto fail;
                *modname++ = '\0';
-               if ((export = strchr(modname, '\t')) != NULL)
-                       *export++ = '\0';
-               if (export && ((end = strchr(export, '\t')) != NULL))
-                       *end = '\0';
+               if (!(export = strchr(modname, '\t')))
+                       goto fail;
+               *export++ = '\0';
+               if (!(namespace = strchr(export, '\t')))
+                       goto fail;
+               *namespace++ = '\0';
+
                crc = strtoul(line, &d, 16);
                if (*symname == '\0' || *modname == '\0' || *d != '\0')
                        goto fail;
@@ -2508,9 +2509,9 @@ static void write_dump(const char *fname)
                                namespace = symbol->namespace;
                                buf_printf(&buf, "0x%08x\t%s\t%s\t%s\t%s\n",
                                           symbol->crc, symbol->name,
-                                          namespace ? namespace : "",
                                           symbol->module->name,
-                                          export_str(symbol->export));
+                                          export_str(symbol->export),
+                                          namespace ? namespace : "");
                        }
                        symbol = symbol->next;
                }
index 255cef1..2ca4eb3 100755 (executable)
@@ -8,13 +8,14 @@ my $input_file = "MAINTAINERS";
 my $output_file = "MAINTAINERS.new";
 my $output_section = "SECTION.new";
 my $help = 0;
-
+my $order = 0;
 my $P = $0;
 
 if (!GetOptions(
                'input=s' => \$input_file,
                'output=s' => \$output_file,
                'section=s' => \$output_section,
+               'order!' => \$order,
                'h|help|usage' => \$help,
            )) {
     die "$P: invalid argument - use --help if necessary\n";
@@ -32,6 +33,22 @@ usage: $P [options] <pattern matching regexes>
   --input => MAINTAINERS file to read (default: MAINTAINERS)
   --output => sorted MAINTAINERS file to write (default: MAINTAINERS.new)
   --section => new sorted MAINTAINERS file to write to (default: SECTION.new)
+  --order => Use the preferred section content output ordering (default: 0)
+    Preferred ordering of section output is:
+      M:  Person acting as a maintainer
+      R:  Person acting as a patch reviewer
+      L:  Mailing list where patches should be sent
+      S:  Maintenance status
+      W:  URI for general information
+      Q:  URI for patchwork tracking
+      B:  URI for bug tracking/submission
+      C:  URI for chat
+      P:  URI or file for subsystem specific coding styles
+      T:  SCM tree type and location
+      F:  File and directory pattern
+      X:  File and directory exclusion pattern
+      N:  File glob
+      K:  Keyword - patch content regex
 
 If <pattern match regexes> exist, then the sections that match the
 regexes are not written to the output file but are written to the
@@ -56,7 +73,7 @@ sub by_category($$) {
 
 sub by_pattern($$) {
     my ($a, $b) = @_;
-    my $preferred_order = 'MRPLSWTQBCFXNK';
+    my $preferred_order = 'MRLSWQBCPTFXNK';
 
     my $a1 = uc(substr($a, 0, 1));
     my $b1 = uc(substr($b, 0, 1));
@@ -105,8 +122,14 @@ sub alpha_output {
                print $file $separator;
            }
            print $file $key . "\n";
-           foreach my $pattern (sort by_pattern split('\n', %$hashref{$key})) {
-               print $file ($pattern . "\n");
+           if ($order) {
+               foreach my $pattern (sort by_pattern split('\n', %$hashref{$key})) {
+                   print $file ($pattern . "\n");
+               }
+           } else {
+               foreach my $pattern (split('\n', %$hashref{$key})) {
+                   print $file ($pattern . "\n");
+               }
            }
        }
     }
index 718bf72..e959b3c 100644 (file)
@@ -382,7 +382,7 @@ int key_payload_reserve(struct key *key, size_t datalen)
                spin_lock(&key->user->lock);
 
                if (delta > 0 &&
-                   (key->user->qnbytes + delta >= maxbytes ||
+                   (key->user->qnbytes + delta > maxbytes ||
                     key->user->qnbytes + delta < key->user->qnbytes)) {
                        ret = -EDQUOT;
                }
index 9b898c9..d1a3dea 100644 (file)
@@ -937,8 +937,8 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
                                key_quota_root_maxbytes : key_quota_maxbytes;
 
                        spin_lock(&newowner->lock);
-                       if (newowner->qnkeys + 1 >= maxkeys ||
-                           newowner->qnbytes + key->quotalen >= maxbytes ||
+                       if (newowner->qnkeys + 1 > maxkeys ||
+                           newowner->qnbytes + key->quotalen > maxbytes ||
                            newowner->qnbytes + key->quotalen <
                            newowner->qnbytes)
                                goto quota_overrun;
index 240e470..752d078 100644 (file)
@@ -111,7 +111,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames)
                while (plugin->next) {
                        if (plugin->dst_frames)
                                frames = plugin->dst_frames(plugin, frames);
-                       if (snd_BUG_ON((snd_pcm_sframes_t)frames <= 0))
+                       if ((snd_pcm_sframes_t)frames <= 0)
                                return -ENXIO;
                        plugin = plugin->next;
                        err = snd_pcm_plugin_alloc(plugin, frames);
@@ -123,7 +123,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames)
                while (plugin->prev) {
                        if (plugin->src_frames)
                                frames = plugin->src_frames(plugin, frames);
-                       if (snd_BUG_ON((snd_pcm_sframes_t)frames <= 0))
+                       if ((snd_pcm_sframes_t)frames <= 0)
                                return -ENXIO;
                        plugin = plugin->prev;
                        err = snd_pcm_plugin_alloc(plugin, frames);
@@ -209,6 +209,8 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p
        if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
                plugin = snd_pcm_plug_last(plug);
                while (plugin && drv_frames > 0) {
+                       if (drv_frames > plugin->buf_frames)
+                               drv_frames = plugin->buf_frames;
                        plugin_prev = plugin->prev;
                        if (plugin->src_frames)
                                drv_frames = plugin->src_frames(plugin, drv_frames);
@@ -220,6 +222,8 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p
                        plugin_next = plugin->next;
                        if (plugin->dst_frames)
                                drv_frames = plugin->dst_frames(plugin, drv_frames);
+                       if (drv_frames > plugin->buf_frames)
+                               drv_frames = plugin->buf_frames;
                        plugin = plugin_next;
                }
        } else
@@ -248,11 +252,15 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc
                                if (frames < 0)
                                        return frames;
                        }
+                       if (frames > plugin->buf_frames)
+                               frames = plugin->buf_frames;
                        plugin = plugin_next;
                }
        } else if (stream == SNDRV_PCM_STREAM_CAPTURE) {
                plugin = snd_pcm_plug_last(plug);
                while (plugin) {
+                       if (frames > plugin->buf_frames)
+                               frames = plugin->buf_frames;
                        plugin_prev = plugin->prev;
                        if (plugin->src_frames) {
                                frames = plugin->src_frames(plugin, frames);
index a88c235..2ddfe22 100644 (file)
@@ -602,6 +602,7 @@ send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq
                len = snd_seq_oss_timer_start(dp->timer);
        if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
                snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev);
+               snd_midi_event_reset_decode(mdev->coder);
        } else {
                len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev);
                if (len > 0)
index 626d87c..77d7037 100644 (file)
@@ -81,6 +81,7 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
                        if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
                                continue;
                        snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)snd_rawmidi_receive, vmidi->substream);
+                       snd_midi_event_reset_decode(vmidi->parser);
                } else {
                        len = snd_midi_event_decode(vmidi->parser, msg, sizeof(msg), ev);
                        if (len > 0)
index 0ac06ff..63e1a56 100644 (file)
@@ -8051,6 +8051,8 @@ static int patch_alc269(struct hda_codec *codec)
                spec->gen.mixer_nid = 0;
                break;
        case 0x10ec0225:
+               codec->power_save_node = 1;
+               /* fall through */
        case 0x10ec0295:
        case 0x10ec0299:
                spec->codec_variant = ALC269_TYPE_ALC225;
@@ -8610,6 +8612,8 @@ enum {
        ALC669_FIXUP_ACER_ASPIRE_ETHOS,
        ALC669_FIXUP_ACER_ASPIRE_ETHOS_HEADSET,
        ALC671_FIXUP_HP_HEADSET_MIC2,
+       ALC662_FIXUP_ACER_X2660G_HEADSET_MODE,
+       ALC662_FIXUP_ACER_NITRO_HEADSET_MODE,
 };
 
 static const struct hda_fixup alc662_fixups[] = {
@@ -8955,6 +8959,25 @@ static const struct hda_fixup alc662_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc671_fixup_hp_headset_mic2,
        },
+       [ALC662_FIXUP_ACER_X2660G_HEADSET_MODE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1a, 0x02a1113c }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC662_FIXUP_USI_FUNC
+       },
+       [ALC662_FIXUP_ACER_NITRO_HEADSET_MODE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1a, 0x01a11140 }, /* use as headset mic, without its own jack detect */
+                       { 0x1b, 0x0221144f },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC662_FIXUP_USI_FUNC
+       },
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -8966,6 +8989,8 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
+       SND_PCI_QUIRK(0x1025, 0x123c, "Acer Nitro N50-600", ALC662_FIXUP_ACER_NITRO_HEADSET_MODE),
+       SND_PCI_QUIRK(0x1025, 0x124e, "Acer 2660G", ALC662_FIXUP_ACER_X2660G_HEADSET_MODE),
        SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05fe, "Dell XPS 15", ALC668_FIXUP_DELL_XPS13),
index b5a3f75..4f09668 100644 (file)
@@ -305,7 +305,7 @@ static void line6_data_received(struct urb *urb)
                                line6_midibuf_read(mb, line6->buffer_message,
                                                LINE6_MIDI_MESSAGE_MAXLEN);
 
-                       if (done == 0)
+                       if (done <= 0)
                                break;
 
                        line6->message_length = done;
index 8d6eefa..6a70463 100644 (file)
@@ -159,7 +159,7 @@ int line6_midibuf_read(struct midi_buffer *this, unsigned char *data,
                        int midi_length_prev =
                            midibuf_message_length(this->command_prev);
 
-                       if (midi_length_prev > 0) {
+                       if (midi_length_prev > 1) {
                                midi_length = midi_length_prev - 1;
                                repeat = 1;
                        } else
index ce3c594..637189e 100644 (file)
@@ -1,18 +1,18 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #if defined(__i386__) || defined(__x86_64__)
-#include "../../arch/x86/include/uapi/asm/errno.h"
+#include "../../../arch/x86/include/uapi/asm/errno.h"
 #elif defined(__powerpc__)
-#include "../../arch/powerpc/include/uapi/asm/errno.h"
+#include "../../../arch/powerpc/include/uapi/asm/errno.h"
 #elif defined(__sparc__)
-#include "../../arch/sparc/include/uapi/asm/errno.h"
+#include "../../../arch/sparc/include/uapi/asm/errno.h"
 #elif defined(__alpha__)
-#include "../../arch/alpha/include/uapi/asm/errno.h"
+#include "../../../arch/alpha/include/uapi/asm/errno.h"
 #elif defined(__mips__)
-#include "../../arch/mips/include/uapi/asm/errno.h"
+#include "../../../arch/mips/include/uapi/asm/errno.h"
 #elif defined(__ia64__)
-#include "../../arch/ia64/include/uapi/asm/errno.h"
+#include "../../../arch/ia64/include/uapi/asm/errno.h"
 #elif defined(__xtensa__)
-#include "../../arch/xtensa/include/uapi/asm/errno.h"
+#include "../../../arch/xtensa/include/uapi/asm/errno.h"
 #else
 #include <asm-generic/errno.h>
 #endif
index 1521073..8533bf0 100644 (file)
@@ -74,6 +74,8 @@ enum {
 #define IPPROTO_UDPLITE                IPPROTO_UDPLITE
   IPPROTO_MPLS = 137,          /* MPLS in IP (RFC 4023)                */
 #define IPPROTO_MPLS           IPPROTO_MPLS
+  IPPROTO_ETHERNET = 143,      /* Ethernet-within-IPv6 Encapsulation   */
+#define IPPROTO_ETHERNET       IPPROTO_ETHERNET
   IPPROTO_RAW = 255,           /* Raw IP packets                       */
 #define IPPROTO_RAW            IPPROTO_RAW
   IPPROTO_MPTCP = 262,         /* Multipath TCP connection             */
index 7902a56..b8fc7d9 100644 (file)
@@ -35,7 +35,7 @@ endif
 # Only pass canonical directory names as the output directory:
 #
 ifneq ($(O),)
-  FULL_O := $(shell readlink -f $(O) || echo $(O))
+  FULL_O := $(shell cd $(PWD); readlink -f $(O) || echo $(O))
 endif
 
 #
index 8d6821d..27653be 100644 (file)
 #include <linux/zalloc.h>
 #include <time.h>
 
-#include "../../util/cpumap.h"
-#include "../../util/event.h"
-#include "../../util/evsel.h"
-#include "../../util/evlist.h"
-#include "../../util/session.h"
+#include "../../../util/cpumap.h"
+#include "../../../util/event.h"
+#include "../../../util/evsel.h"
+#include "../../../util/evlist.h"
+#include "../../../util/session.h"
 #include <internal/lib.h> // page_size
-#include "../../util/pmu.h"
-#include "../../util/debug.h"
-#include "../../util/auxtrace.h"
-#include "../../util/record.h"
-#include "../../util/arm-spe.h"
+#include "../../../util/pmu.h"
+#include "../../../util/debug.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/record.h"
+#include "../../../util/arm-spe.h"
 
 #define KiB(x) ((x) * 1024)
 #define MiB(x) ((x) * 1024 * 1024)
index 2864e2e..2833e10 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-#include "../../util/perf_regs.h"
+#include "../../../util/perf_regs.h"
 
 const struct sample_reg sample_reg_masks[] = {
        SMPL_REG_END
index e9c436e..0a52429 100644 (file)
@@ -4,8 +4,8 @@
 #include <regex.h>
 #include <linux/zalloc.h>
 
-#include "../../util/perf_regs.h"
-#include "../../util/debug.h"
+#include "../../../util/perf_regs.h"
+#include "../../../util/debug.h"
 
 #include <linux/kernel.h>
 
index 7abc9fd..3da506e 100644 (file)
@@ -7,13 +7,13 @@
 #include <errno.h>
 #include <stdbool.h>
 
-#include "../../util/header.h"
-#include "../../util/debug.h"
-#include "../../util/pmu.h"
-#include "../../util/auxtrace.h"
-#include "../../util/intel-pt.h"
-#include "../../util/intel-bts.h"
-#include "../../util/evlist.h"
+#include "../../../util/header.h"
+#include "../../../util/debug.h"
+#include "../../../util/pmu.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/intel-pt.h"
+#include "../../../util/intel-bts.h"
+#include "../../../util/evlist.h"
 
 static
 struct auxtrace_record *auxtrace_record__init_intel(struct evlist *evlist,
index ac45015..047dc00 100644 (file)
@@ -3,12 +3,12 @@
 #include <linux/string.h>
 #include <linux/zalloc.h>
 
-#include "../../util/event.h"
-#include "../../util/synthetic-events.h"
-#include "../../util/machine.h"
-#include "../../util/tool.h"
-#include "../../util/map.h"
-#include "../../util/debug.h"
+#include "../../../util/event.h"
+#include "../../../util/synthetic-events.h"
+#include "../../../util/machine.h"
+#include "../../../util/tool.h"
+#include "../../../util/map.h"
+#include "../../../util/debug.h"
 
 #if defined(__x86_64__)
 
index aa6deb4..578c8c5 100644 (file)
@@ -7,8 +7,8 @@
 #include <string.h>
 #include <regex.h>
 
-#include "../../util/debug.h"
-#include "../../util/header.h"
+#include "../../../util/debug.h"
+#include "../../../util/header.h"
 
 static inline void
 cpuid(unsigned int op, unsigned int *a, unsigned int *b, unsigned int *c,
index 26cee10..09f9380 100644 (file)
 #include <linux/log2.h>
 #include <linux/zalloc.h>
 
-#include "../../util/cpumap.h"
-#include "../../util/event.h"
-#include "../../util/evsel.h"
-#include "../../util/evlist.h"
-#include "../../util/mmap.h"
-#include "../../util/session.h"
-#include "../../util/pmu.h"
-#include "../../util/debug.h"
-#include "../../util/record.h"
-#include "../../util/tsc.h"
-#include "../../util/auxtrace.h"
-#include "../../util/intel-bts.h"
+#include "../../../util/cpumap.h"
+#include "../../../util/event.h"
+#include "../../../util/evsel.h"
+#include "../../../util/evlist.h"
+#include "../../../util/mmap.h"
+#include "../../../util/session.h"
+#include "../../../util/pmu.h"
+#include "../../../util/debug.h"
+#include "../../../util/record.h"
+#include "../../../util/tsc.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/intel-bts.h"
 #include <internal/lib.h> // page_size
 
 #define KiB(x) ((x) * 1024)
index 7eea4fd..1643aed 100644 (file)
 #include <linux/zalloc.h>
 #include <cpuid.h>
 
-#include "../../util/session.h"
-#include "../../util/event.h"
-#include "../../util/evlist.h"
-#include "../../util/evsel.h"
-#include "../../util/evsel_config.h"
-#include "../../util/cpumap.h"
-#include "../../util/mmap.h"
+#include "../../../util/session.h"
+#include "../../../util/event.h"
+#include "../../../util/evlist.h"
+#include "../../../util/evsel.h"
+#include "../../../util/evsel_config.h"
+#include "../../../util/cpumap.h"
+#include "../../../util/mmap.h"
 #include <subcmd/parse-options.h>
-#include "../../util/parse-events.h"
-#include "../../util/pmu.h"
-#include "../../util/debug.h"
-#include "../../util/auxtrace.h"
-#include "../../util/record.h"
-#include "../../util/target.h"
-#include "../../util/tsc.h"
+#include "../../../util/parse-events.h"
+#include "../../../util/pmu.h"
+#include "../../../util/debug.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/record.h"
+#include "../../../util/target.h"
+#include "../../../util/tsc.h"
 #include <internal/lib.h> // page_size
-#include "../../util/intel-pt.h"
+#include "../../../util/intel-pt.h"
 
 #define KiB(x) ((x) * 1024)
 #define MiB(x) ((x) * 1024 * 1024)
index e17e080..31679c3 100644 (file)
@@ -5,9 +5,9 @@
 #include <stdlib.h>
 
 #include <internal/lib.h> // page_size
-#include "../../util/machine.h"
-#include "../../util/map.h"
-#include "../../util/symbol.h"
+#include "../../../util/machine.h"
+#include "../../../util/map.h"
+#include "../../../util/symbol.h"
 #include <linux/ctype.h>
 
 #include <symbol/kallsyms.h>
index c218b83..fca81b3 100644 (file)
@@ -5,10 +5,10 @@
 #include <linux/kernel.h>
 #include <linux/zalloc.h>
 
-#include "../../perf-sys.h"
-#include "../../util/perf_regs.h"
-#include "../../util/debug.h"
-#include "../../util/event.h"
+#include "../../../perf-sys.h"
+#include "../../../util/perf_regs.h"
+#include "../../../util/debug.h"
+#include "../../../util/event.h"
 
 const struct sample_reg sample_reg_masks[] = {
        SMPL_REG(AX, PERF_REG_X86_AX),
index e33ef5b..d48d608 100644 (file)
@@ -4,9 +4,9 @@
 #include <linux/stddef.h>
 #include <linux/perf_event.h>
 
-#include "../../util/intel-pt.h"
-#include "../../util/intel-bts.h"
-#include "../../util/pmu.h"
+#include "../../../util/intel-pt.h"
+#include "../../../util/intel-bts.h"
+#include "../../../util/pmu.h"
 
 struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
 {
index fddb3ce..4aa6de1 100644 (file)
@@ -2,6 +2,10 @@
 #ifndef BENCH_H
 #define BENCH_H
 
+#include <sys/time.h>
+
+extern struct timeval bench__start, bench__end, bench__runtime;
+
 /*
  * The madvise transparent hugepage constants were added in glibc
  * 2.13. For compatibility with older versions of glibc, define these
index bb617e5..cadc18d 100644 (file)
@@ -35,7 +35,6 @@
 
 static unsigned int nthreads = 0;
 static unsigned int nsecs    = 8;
-struct timeval start, end, runtime;
 static bool done, __verbose, randomize;
 
 /*
@@ -94,8 +93,8 @@ static void toggle_done(int sig __maybe_unused,
 {
        /* inform all threads that we're done for the day */
        done = true;
-       gettimeofday(&end, NULL);
-       timersub(&end, &start, &runtime);
+       gettimeofday(&bench__end, NULL);
+       timersub(&bench__end, &bench__start, &bench__runtime);
 }
 
 static void nest_epollfd(void)
@@ -313,6 +312,7 @@ int bench_epoll_ctl(int argc, const char **argv)
                exit(EXIT_FAILURE);
        }
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
@@ -361,7 +361,7 @@ int bench_epoll_ctl(int argc, const char **argv)
 
        threads_starting = nthreads;
 
-       gettimeofday(&start, NULL);
+       gettimeofday(&bench__start, NULL);
 
        do_threads(worker, cpu);
 
index 7af6944..f938c58 100644 (file)
@@ -90,7 +90,6 @@
 
 static unsigned int nthreads = 0;
 static unsigned int nsecs    = 8;
-struct timeval start, end, runtime;
 static bool wdone, done, __verbose, randomize, nonblocking;
 
 /*
@@ -276,8 +275,8 @@ static void toggle_done(int sig __maybe_unused,
 {
        /* inform all threads that we're done for the day */
        done = true;
-       gettimeofday(&end, NULL);
-       timersub(&end, &start, &runtime);
+       gettimeofday(&bench__end, NULL);
+       timersub(&bench__end, &bench__start, &bench__runtime);
 }
 
 static void print_summary(void)
@@ -287,7 +286,7 @@ static void print_summary(void)
 
        printf("\nAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
               avg, rel_stddev_stats(stddev, avg),
-              (int) runtime.tv_sec);
+              (int)bench__runtime.tv_sec);
 }
 
 static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
@@ -427,6 +426,7 @@ int bench_epoll_wait(int argc, const char **argv)
                exit(EXIT_FAILURE);
        }
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
@@ -479,7 +479,7 @@ int bench_epoll_wait(int argc, const char **argv)
 
        threads_starting = nthreads;
 
-       gettimeofday(&start, NULL);
+       gettimeofday(&bench__start, NULL);
 
        do_threads(worker, cpu);
 
@@ -519,7 +519,7 @@ int bench_epoll_wait(int argc, const char **argv)
                qsort(worker, nthreads, sizeof(struct worker), cmpworker);
 
        for (i = 0; i < nthreads; i++) {
-               unsigned long t = worker[i].ops/runtime.tv_sec;
+               unsigned long t = worker[i].ops / bench__runtime.tv_sec;
 
                update_stats(&throughput_stats, t);
 
index 8ba0c33..65eebe0 100644 (file)
@@ -37,7 +37,7 @@ static unsigned int nfutexes = 1024;
 static bool fshared = false, done = false, silent = false;
 static int futex_flag = 0;
 
-struct timeval start, end, runtime;
+struct timeval bench__start, bench__end, bench__runtime;
 static pthread_mutex_t thread_lock;
 static unsigned int threads_starting;
 static struct stats throughput_stats;
@@ -103,8 +103,8 @@ static void toggle_done(int sig __maybe_unused,
 {
        /* inform all threads that we're done for the day */
        done = true;
-       gettimeofday(&end, NULL);
-       timersub(&end, &start, &runtime);
+       gettimeofday(&bench__end, NULL);
+       timersub(&bench__end, &bench__start, &bench__runtime);
 }
 
 static void print_summary(void)
@@ -114,7 +114,7 @@ static void print_summary(void)
 
        printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
               !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
-              (int) runtime.tv_sec);
+              (int)bench__runtime.tv_sec);
 }
 
 int bench_futex_hash(int argc, const char **argv)
@@ -137,6 +137,7 @@ int bench_futex_hash(int argc, const char **argv)
        if (!cpu)
                goto errmem;
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
@@ -161,7 +162,7 @@ int bench_futex_hash(int argc, const char **argv)
 
        threads_starting = nthreads;
        pthread_attr_init(&thread_attr);
-       gettimeofday(&start, NULL);
+       gettimeofday(&bench__start, NULL);
        for (i = 0; i < nthreads; i++) {
                worker[i].tid = i;
                worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex));
@@ -204,7 +205,7 @@ int bench_futex_hash(int argc, const char **argv)
        pthread_mutex_destroy(&thread_lock);
 
        for (i = 0; i < nthreads; i++) {
-               unsigned long t = worker[i].ops/runtime.tv_sec;
+               unsigned long t = worker[i].ops / bench__runtime.tv_sec;
                update_stats(&throughput_stats, t);
                if (!silent) {
                        if (nfutexes == 1)
index d0cae81..89fd8f3 100644 (file)
@@ -37,7 +37,6 @@ static bool silent = false, multi = false;
 static bool done = false, fshared = false;
 static unsigned int nthreads = 0;
 static int futex_flag = 0;
-struct timeval start, end, runtime;
 static pthread_mutex_t thread_lock;
 static unsigned int threads_starting;
 static struct stats throughput_stats;
@@ -64,7 +63,7 @@ static void print_summary(void)
 
        printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
               !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
-              (int) runtime.tv_sec);
+              (int)bench__runtime.tv_sec);
 }
 
 static void toggle_done(int sig __maybe_unused,
@@ -73,8 +72,8 @@ static void toggle_done(int sig __maybe_unused,
 {
        /* inform all threads that we're done for the day */
        done = true;
-       gettimeofday(&end, NULL);
-       timersub(&end, &start, &runtime);
+       gettimeofday(&bench__end, NULL);
+       timersub(&bench__end, &bench__start, &bench__runtime);
 }
 
 static void *workerfn(void *arg)
@@ -161,6 +160,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
        if (!cpu)
                err(EXIT_FAILURE, "calloc");
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
@@ -185,7 +185,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
 
        threads_starting = nthreads;
        pthread_attr_init(&thread_attr);
-       gettimeofday(&start, NULL);
+       gettimeofday(&bench__start, NULL);
 
        create_threads(worker, thread_attr, cpu);
        pthread_attr_destroy(&thread_attr);
@@ -211,7 +211,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
        pthread_mutex_destroy(&thread_lock);
 
        for (i = 0; i < nthreads; i++) {
-               unsigned long t = worker[i].ops/runtime.tv_sec;
+               unsigned long t = worker[i].ops / bench__runtime.tv_sec;
 
                update_stats(&throughput_stats, t);
                if (!silent)
index a00a689..7a15c2e 100644 (file)
@@ -128,6 +128,7 @@ int bench_futex_requeue(int argc, const char **argv)
        if (!cpu)
                err(EXIT_FAILURE, "cpu_map__new");
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
index a053cf2..cd2b81a 100644 (file)
@@ -234,6 +234,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
                exit(EXIT_FAILURE);
        }
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
index df81009..2dfcef3 100644 (file)
@@ -43,7 +43,7 @@ static bool done = false, silent = false, fshared = false;
 static pthread_mutex_t thread_lock;
 static pthread_cond_t thread_parent, thread_worker;
 static struct stats waketime_stats, wakeup_stats;
-static unsigned int ncpus, threads_starting, nthreads = 0;
+static unsigned int threads_starting, nthreads = 0;
 static int futex_flag = 0;
 
 static const struct option options[] = {
@@ -136,12 +136,13 @@ int bench_futex_wake(int argc, const char **argv)
        if (!cpu)
                err(EXIT_FAILURE, "calloc");
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
 
        if (!nthreads)
-               nthreads = ncpus;
+               nthreads = cpu->nr;
 
        worker = calloc(nthreads, sizeof(*worker));
        if (!worker)
index f8b6ae5..c03c36f 100644 (file)
@@ -1312,7 +1312,8 @@ static int cycles_printf(struct hist_entry *he, struct hist_entry *pair,
        end_line = map__srcline(he->ms.map, bi->sym->start + bi->end,
                                he->ms.sym);
 
-       if ((start_line != SRCLINE_UNKNOWN) && (end_line != SRCLINE_UNKNOWN)) {
+       if ((strncmp(start_line, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0) &&
+           (strncmp(end_line, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0)) {
                scnprintf(buf, sizeof(buf), "[%s -> %s] %4ld",
                          start_line, end_line, block_he->diff.cycles);
        } else {
index f6dd1a6..d2539b7 100644 (file)
@@ -684,7 +684,9 @@ repeat:
        delay_msecs = top->delay_secs * MSEC_PER_SEC;
        set_term_quiet_input(&save);
        /* trash return*/
-       getc(stdin);
+       clearerr(stdin);
+       if (poll(&stdin_poll, 1, 0) > 0)
+               getc(stdin);
 
        while (!done) {
                perf_top__print_sym_table(top);
index 079c77b..27b4da8 100644 (file)
@@ -1082,10 +1082,9 @@ static int process_one_file(const char *fpath, const struct stat *sb,
  */
 int main(int argc, char *argv[])
 {
-       int rc;
+       int rc, ret = 0;
        int maxfds;
        char ldirname[PATH_MAX];
-
        const char *arch;
        const char *output_file;
        const char *start_dirname;
@@ -1156,7 +1155,8 @@ int main(int argc, char *argv[])
                /* Make build fail */
                fclose(eventsfp);
                free_arch_std_events();
-               return 1;
+               ret = 1;
+               goto out_free_mapfile;
        } else if (rc) {
                goto empty_map;
        }
@@ -1174,14 +1174,17 @@ int main(int argc, char *argv[])
                /* Make build fail */
                fclose(eventsfp);
                free_arch_std_events();
-               return 1;
+               ret = 1;
        }
 
-       return 0;
+
+       goto out_free_mapfile;
 
 empty_map:
        fclose(eventsfp);
        create_empty_mapping(output_file);
        free_arch_std_events();
-       return 0;
+out_free_mapfile:
+       free(mapfile);
+       return ret;
 }
index d0b9353..489b506 100644 (file)
@@ -19,7 +19,7 @@
 #include "../perf-sys.h"
 #include "cloexec.h"
 
-volatile long the_var;
+static volatile long the_var;
 
 static noinline int test_function(void)
 {
index c4b030b..fbbb6d6 100644 (file)
@@ -295,7 +295,8 @@ static int block_range_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
        end_line = map__srcline(he->ms.map, bi->sym->start + bi->end,
                                he->ms.sym);
 
-       if ((start_line != SRCLINE_UNKNOWN) && (end_line != SRCLINE_UNKNOWN)) {
+       if ((strncmp(start_line, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0) &&
+           (strncmp(end_line, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0)) {
                scnprintf(buf, sizeof(buf), "[%s -> %s]",
                          start_line, end_line);
        } else {
index 6242a92..4154f94 100644 (file)
@@ -343,11 +343,11 @@ static const char *normalize_arch(char *arch)
 
 const char *perf_env__arch(struct perf_env *env)
 {
-       struct utsname uts;
        char *arch_name;
 
        if (!env || !env->arch) { /* Assume local operation */
-               if (uname(&uts) < 0)
+               static struct utsname uts = { .machine[0] = '\0', };
+               if (uts.machine[0] == '\0' && uname(&uts) < 0)
                        return NULL;
                arch_name = uts.machine;
        } else
index a08ca27..b342f74 100644 (file)
@@ -89,7 +89,7 @@ static inline bool replace_android_lib(const char *filename, char *newfilename)
                return true;
        }
 
-       if (!strncmp(filename, "/system/lib/", 11)) {
+       if (!strncmp(filename, "/system/lib/", 12)) {
                char *ndk, *app;
                const char *arch;
                size_t ndk_length;
@@ -431,7 +431,7 @@ int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
 
        if (map && map->dso) {
                char *srcline = map__srcline(map, addr, NULL);
-               if (srcline != SRCLINE_UNKNOWN)
+               if (strncmp(srcline, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0)
                        ret = fprintf(fp, "%s%s", prefix, srcline);
                free_srcline(srcline);
        }
index c01ba6f..a7dc0b0 100644 (file)
@@ -257,21 +257,15 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
                                path = zalloc(sizeof(*path));
                                if (!path)
                                        return NULL;
-                               path->system = malloc(MAX_EVENT_LENGTH);
-                               if (!path->system) {
+                               if (asprintf(&path->system, "%.*s", MAX_EVENT_LENGTH, sys_dirent->d_name) < 0) {
                                        free(path);
                                        return NULL;
                                }
-                               path->name = malloc(MAX_EVENT_LENGTH);
-                               if (!path->name) {
+                               if (asprintf(&path->name, "%.*s", MAX_EVENT_LENGTH, evt_dirent->d_name) < 0) {
                                        zfree(&path->system);
                                        free(path);
                                        return NULL;
                                }
-                               strncpy(path->system, sys_dirent->d_name,
-                                       MAX_EVENT_LENGTH);
-                               strncpy(path->name, evt_dirent->d_name,
-                                       MAX_EVENT_LENGTH);
                                return path;
                        }
                }
@@ -1219,7 +1213,7 @@ static int config_attr(struct perf_event_attr *attr,
 static int get_config_terms(struct list_head *head_config,
                            struct list_head *head_terms __maybe_unused)
 {
-#define ADD_CONFIG_TERM(__type)                                        \
+#define ADD_CONFIG_TERM(__type, __weak)                                \
        struct perf_evsel_config_term *__t;                     \
                                                                \
        __t = zalloc(sizeof(*__t));                             \
@@ -1228,18 +1222,18 @@ static int get_config_terms(struct list_head *head_config,
                                                                \
        INIT_LIST_HEAD(&__t->list);                             \
        __t->type       = PERF_EVSEL__CONFIG_TERM_ ## __type;   \
-       __t->weak       = term->weak;                           \
+       __t->weak       = __weak;                               \
        list_add_tail(&__t->list, head_terms)
 
-#define ADD_CONFIG_TERM_VAL(__type, __name, __val)             \
+#define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak)     \
 do {                                                           \
-       ADD_CONFIG_TERM(__type);                                \
+       ADD_CONFIG_TERM(__type, __weak);                        \
        __t->val.__name = __val;                                \
 } while (0)
 
-#define ADD_CONFIG_TERM_STR(__type, __val)                     \
+#define ADD_CONFIG_TERM_STR(__type, __val, __weak)             \
 do {                                                           \
-       ADD_CONFIG_TERM(__type);                                \
+       ADD_CONFIG_TERM(__type, __weak);                        \
        __t->val.str = strdup(__val);                           \
        if (!__t->val.str) {                                    \
                zfree(&__t);                                    \
@@ -1253,62 +1247,62 @@ do {                                                            \
        list_for_each_entry(term, head_config, list) {
                switch (term->type_term) {
                case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
-                       ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num);
+                       ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
-                       ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num);
+                       ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_TIME:
-                       ADD_CONFIG_TERM_VAL(TIME, time, term->val.num);
+                       ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
-                       ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str);
+                       ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
-                       ADD_CONFIG_TERM_STR(BRANCH, term->val.str);
+                       ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
                        ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
-                                           term->val.num);
+                                           term->val.num, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_INHERIT:
                        ADD_CONFIG_TERM_VAL(INHERIT, inherit,
-                                           term->val.num ? 1 : 0);
+                                           term->val.num ? 1 : 0, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
                        ADD_CONFIG_TERM_VAL(INHERIT, inherit,
-                                           term->val.num ? 0 : 1);
+                                           term->val.num ? 0 : 1, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
                        ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
-                                           term->val.num);
+                                           term->val.num, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
                        ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
-                                           term->val.num);
+                                           term->val.num, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
                        ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
-                                           term->val.num ? 1 : 0);
+                                           term->val.num ? 1 : 0, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
                        ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
-                                           term->val.num ? 0 : 1);
+                                           term->val.num ? 0 : 1, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
-                       ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str);
+                       ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_PERCORE:
                        ADD_CONFIG_TERM_VAL(PERCORE, percore,
-                                           term->val.num ? true : false);
+                                           term->val.num ? true : false, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
                        ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
-                                           term->val.num ? 1 : 0);
+                                           term->val.num ? 1 : 0, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
                        ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
-                                           term->val.num);
+                                           term->val.num, term->weak);
                        break;
                default:
                        break;
@@ -1345,7 +1339,7 @@ static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
        }
 
        if (bits)
-               ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits);
+               ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
 
 #undef ADD_CONFIG_TERM
        return 0;
index 0f5fda1..8c85294 100644 (file)
@@ -206,6 +206,9 @@ static struct strlist *__probe_file__get_namelist(int fd, bool include_group)
                } else
                        ret = strlist__add(sl, tev.event);
                clear_probe_trace_event(&tev);
+               /* Skip if there is same name multi-probe event in the list */
+               if (ret == -EEXIST)
+                       ret = 0;
                if (ret < 0)
                        break;
        }
index 1c817ad..e4cff49 100644 (file)
@@ -637,14 +637,19 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
                return -EINVAL;
        }
 
-       /* Try to get actual symbol name from symtab */
-       symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
+       if (dwarf_entrypc(sp_die, &eaddr) == 0) {
+               /* If the DIE has entrypc, use it. */
+               symbol = dwarf_diename(sp_die);
+       } else {
+               /* Try to get actual symbol name and address from symtab */
+               symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
+               eaddr = sym.st_value;
+       }
        if (!symbol) {
                pr_warning("Failed to find symbol at 0x%lx\n",
                           (unsigned long)paddr);
                return -ENOENT;
        }
-       eaddr = sym.st_value;
 
        tp->offset = (unsigned long)(paddr - eaddr);
        tp->address = (unsigned long)paddr;
index aa344a1..8a065a6 100644 (file)
@@ -2,11 +2,13 @@ from os import getenv
 from subprocess import Popen, PIPE
 from re import sub
 
+cc = getenv("CC")
+cc_is_clang = b"clang version" in Popen([cc, "-v"], stderr=PIPE).stderr.readline()
+
 def clang_has_option(option):
-    return [o for o in Popen(['clang', option], stderr=PIPE).stderr.readlines() if b"unknown argument" in o] == [ ]
+    return [o for o in Popen([cc, option], stderr=PIPE).stderr.readlines() if b"unknown argument" in o] == [ ]
 
-cc = getenv("CC")
-if cc == "clang":
+if cc_is_clang:
     from distutils.sysconfig import get_config_vars
     vars = get_config_vars()
     for var in ('CFLAGS', 'OPT'):
@@ -40,7 +42,7 @@ class install_lib(_install_lib):
 cflags = getenv('CFLAGS', '').split()
 # switch off several checks (need to be at the end of cflags list)
 cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls' ]
-if cc != "clang":
+if not cc_is_clang:
     cflags += ['-Wno-cast-function-type' ]
 
 src_perf  = getenv('srctree') + '/tools/perf'
index 1077013..26bc6a0 100644 (file)
@@ -1622,7 +1622,12 @@ int dso__load(struct dso *dso, struct map *map)
                goto out;
        }
 
-       if (dso->kernel) {
+       kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
+               dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
+               dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
+               dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
+
+       if (dso->kernel && !kmod) {
                if (dso->kernel == DSO_TYPE_KERNEL)
                        ret = dso__load_kernel_sym(dso, map);
                else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
@@ -1650,12 +1655,6 @@ int dso__load(struct dso *dso, struct map *map)
        if (!name)
                goto out;
 
-       kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
-               dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
-               dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
-               dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
-
-
        /*
         * Read the build id if possible. This is required for
         * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
index 33dc34d..20f4634 100644 (file)
@@ -82,7 +82,7 @@ static struct pci_access *pci_acc;
 static struct pci_dev *amd_fam14h_pci_dev;
 static int nbp1_entered;
 
-struct timespec start_time;
+static struct timespec start_time;
 static unsigned long long timediff;
 
 #ifdef DEBUG
index 3c4cee1..a65f7d0 100644 (file)
@@ -19,7 +19,7 @@ struct cpuidle_monitor cpuidle_sysfs_monitor;
 
 static unsigned long long **previous_count;
 static unsigned long long **current_count;
-struct timespec start_time;
+static struct timespec start_time;
 static unsigned long long timediff;
 
 static int cpuidle_get_count_percent(unsigned int id, double *percent,
index 6d44fec..7c77045 100644 (file)
@@ -27,6 +27,8 @@ struct cpuidle_monitor *all_monitors[] = {
 0
 };
 
+int cpu_count;
+
 static struct cpuidle_monitor *monitors[MONITORS_MAX];
 static unsigned int avail_monitors;
 
index 5b5eb1d..c559d31 100644 (file)
@@ -25,7 +25,7 @@
 #endif
 #define CSTATE_DESC_LEN 60
 
-int cpu_count;
+extern int cpu_count;
 
 /* Hard to define the right names ...: */
 enum power_range_e {
index 13f1e8b..2b65512 100644 (file)
@@ -16,7 +16,7 @@ override CFLAGS +=    -D_FORTIFY_SOURCE=2
 
 %: %.c
        @mkdir -p $(BUILD_OUTPUT)
-       $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ $(LDFLAGS)
+       $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ $(LDFLAGS) -lcap
 
 .PHONY : clean
 clean :
index 31c1ca0..33b3708 100644 (file)
@@ -30,7 +30,7 @@
 #include <sched.h>
 #include <time.h>
 #include <cpuid.h>
-#include <linux/capability.h>
+#include <sys/capability.h>
 #include <errno.h>
 #include <math.h>
 
@@ -304,6 +304,10 @@ int *irqs_per_cpu;         /* indexed by cpu_num */
 
 void setup_all_buffers(void);
 
+char *sys_lpi_file;
+char *sys_lpi_file_sysfs = "/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us";
+char *sys_lpi_file_debugfs = "/sys/kernel/debug/pmc_core/slp_s0_residency_usec";
+
 int cpu_is_not_present(int cpu)
 {
        return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
@@ -2916,8 +2920,6 @@ int snapshot_gfx_mhz(void)
  *
  * record snapshot of
  * /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us
- *
- * return 1 if config change requires a restart, else return 0
  */
 int snapshot_cpu_lpi_us(void)
 {
@@ -2941,17 +2943,14 @@ int snapshot_cpu_lpi_us(void)
 /*
  * snapshot_sys_lpi()
  *
- * record snapshot of
- * /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us
- *
- * return 1 if config change requires a restart, else return 0
+ * record snapshot of sys_lpi_file
  */
 int snapshot_sys_lpi_us(void)
 {
        FILE *fp;
        int retval;
 
-       fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r");
+       fp = fopen_or_die(sys_lpi_file, "r");
 
        retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us);
        if (retval != 1) {
@@ -3151,28 +3150,42 @@ void check_dev_msr()
                        err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
 }
 
-void check_permissions()
+/*
+ * check for CAP_SYS_RAWIO
+ * return 0 on success
+ * return 1 on fail
+ */
+int check_for_cap_sys_rawio(void)
 {
-       struct __user_cap_header_struct cap_header_data;
-       cap_user_header_t cap_header = &cap_header_data;
-       struct __user_cap_data_struct cap_data_data;
-       cap_user_data_t cap_data = &cap_data_data;
-       extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
-       int do_exit = 0;
-       char pathname[32];
+       cap_t caps;
+       cap_flag_value_t cap_flag_value;
 
-       /* check for CAP_SYS_RAWIO */
-       cap_header->pid = getpid();
-       cap_header->version = _LINUX_CAPABILITY_VERSION;
-       if (capget(cap_header, cap_data) < 0)
-               err(-6, "capget(2) failed");
+       caps = cap_get_proc();
+       if (caps == NULL)
+               err(-6, "cap_get_proc\n");
 
-       if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) {
-               do_exit++;
+       if (cap_get_flag(caps, CAP_SYS_RAWIO, CAP_EFFECTIVE, &cap_flag_value))
+               err(-6, "cap_get\n");
+
+       if (cap_flag_value != CAP_SET) {
                warnx("capget(CAP_SYS_RAWIO) failed,"
                        " try \"# setcap cap_sys_rawio=ep %s\"", progname);
+               return 1;
        }
 
+       if (cap_free(caps) == -1)
+               err(-6, "cap_free\n");
+
+       return 0;
+}
+void check_permissions(void)
+{
+       int do_exit = 0;
+       char pathname[32];
+
+       /* check for CAP_SYS_RAWIO */
+       do_exit += check_for_cap_sys_rawio();
+
        /* test file permissions */
        sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
        if (euidaccess(pathname, R_OK)) {
@@ -3265,6 +3278,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
        case INTEL_FAM6_ATOM_GOLDMONT:  /* BXT */
        case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
        case INTEL_FAM6_ATOM_GOLDMONT_D:        /* DNV */
+       case INTEL_FAM6_ATOM_TREMONT:   /* EHL */
                pkg_cstate_limits = glm_pkg_cstate_limits;
                break;
        default:
@@ -3336,6 +3350,17 @@ int is_skx(unsigned int family, unsigned int model)
        }
        return 0;
 }
+int is_ehl(unsigned int family, unsigned int model)
+{
+       if (!genuine_intel)
+               return 0;
+
+       switch (model) {
+       case INTEL_FAM6_ATOM_TREMONT:
+               return 1;
+       }
+       return 0;
+}
 
 int has_turbo_ratio_limit(unsigned int family, unsigned int model)
 {
@@ -3478,6 +3503,23 @@ dump_cstate_pstate_config_info(unsigned int family, unsigned int model)
        dump_nhm_cst_cfg();
 }
 
+static void dump_sysfs_file(char *path)
+{
+       FILE *input;
+       char cpuidle_buf[64];
+
+       input = fopen(path, "r");
+       if (input == NULL) {
+               if (debug)
+                       fprintf(outf, "NSFOD %s\n", path);
+               return;
+       }
+       if (!fgets(cpuidle_buf, sizeof(cpuidle_buf), input))
+               err(1, "%s: failed to read file", path);
+       fclose(input);
+
+       fprintf(outf, "%s: %s", strrchr(path, '/') + 1, cpuidle_buf);
+}
 static void
 dump_sysfs_cstate_config(void)
 {
@@ -3491,6 +3533,15 @@ dump_sysfs_cstate_config(void)
        if (!DO_BIC(BIC_sysfs))
                return;
 
+       if (access("/sys/devices/system/cpu/cpuidle", R_OK)) {
+               fprintf(outf, "cpuidle not loaded\n");
+               return;
+       }
+
+       dump_sysfs_file("/sys/devices/system/cpu/cpuidle/current_driver");
+       dump_sysfs_file("/sys/devices/system/cpu/cpuidle/current_governor");
+       dump_sysfs_file("/sys/devices/system/cpu/cpuidle/current_governor_ro");
+
        for (state = 0; state < 10; ++state) {
 
                sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name",
@@ -3894,6 +3945,20 @@ void rapl_probe_intel(unsigned int family, unsigned int model)
                else
                        BIC_PRESENT(BIC_PkgWatt);
                break;
+       case INTEL_FAM6_ATOM_TREMONT:   /* EHL */
+               do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO;
+               if (rapl_joules) {
+                       BIC_PRESENT(BIC_Pkg_J);
+                       BIC_PRESENT(BIC_Cor_J);
+                       BIC_PRESENT(BIC_RAM_J);
+                       BIC_PRESENT(BIC_GFX_J);
+               } else {
+                       BIC_PRESENT(BIC_PkgWatt);
+                       BIC_PRESENT(BIC_CorWatt);
+                       BIC_PRESENT(BIC_RAMWatt);
+                       BIC_PRESENT(BIC_GFXWatt);
+               }
+               break;
        case INTEL_FAM6_SKYLAKE_L:      /* SKL */
        case INTEL_FAM6_CANNONLAKE_L:   /* CNL */
                do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO;
@@ -4295,6 +4360,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
        case INTEL_FAM6_ATOM_GOLDMONT:          /* BXT */
        case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
        case INTEL_FAM6_ATOM_GOLDMONT_D:        /* DNV */
+       case INTEL_FAM6_ATOM_TREMONT:           /* EHL */
                return 1;
        }
        return 0;
@@ -4324,6 +4390,7 @@ int has_c8910_msrs(unsigned int family, unsigned int model)
        case INTEL_FAM6_CANNONLAKE_L:   /* CNL */
        case INTEL_FAM6_ATOM_GOLDMONT:  /* BXT */
        case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+       case INTEL_FAM6_ATOM_TREMONT:   /* EHL */
                return 1;
        }
        return 0;
@@ -4610,14 +4677,24 @@ unsigned int intel_model_duplicates(unsigned int model)
        case INTEL_FAM6_SKYLAKE:
        case INTEL_FAM6_KABYLAKE_L:
        case INTEL_FAM6_KABYLAKE:
+       case INTEL_FAM6_COMETLAKE_L:
+       case INTEL_FAM6_COMETLAKE:
                return INTEL_FAM6_SKYLAKE_L;
 
        case INTEL_FAM6_ICELAKE_L:
        case INTEL_FAM6_ICELAKE_NNPI:
+       case INTEL_FAM6_TIGERLAKE_L:
+       case INTEL_FAM6_TIGERLAKE:
                return INTEL_FAM6_CANNONLAKE_L;
 
        case INTEL_FAM6_ATOM_TREMONT_D:
                return INTEL_FAM6_ATOM_GOLDMONT_D;
+
+       case INTEL_FAM6_ATOM_TREMONT_L:
+               return INTEL_FAM6_ATOM_TREMONT;
+
+       case INTEL_FAM6_ICELAKE_X:
+               return INTEL_FAM6_SKYLAKE_X;
        }
        return model;
 }
@@ -4872,7 +4949,8 @@ void process_cpuid()
        do_slm_cstates = is_slm(family, model);
        do_knl_cstates  = is_knl(family, model);
 
-       if (do_slm_cstates || do_knl_cstates || is_cnl(family, model))
+       if (do_slm_cstates || do_knl_cstates || is_cnl(family, model) ||
+           is_ehl(family, model))
                BIC_NOT_PRESENT(BIC_CPU_c3);
 
        if (!quiet)
@@ -4907,10 +4985,16 @@ void process_cpuid()
        else
                BIC_NOT_PRESENT(BIC_CPU_LPI);
 
-       if (!access("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", R_OK))
+       if (!access(sys_lpi_file_sysfs, R_OK)) {
+               sys_lpi_file = sys_lpi_file_sysfs;
                BIC_PRESENT(BIC_SYS_LPI);
-       else
+       } else if (!access(sys_lpi_file_debugfs, R_OK)) {
+               sys_lpi_file = sys_lpi_file_debugfs;
+               BIC_PRESENT(BIC_SYS_LPI);
+       } else {
+               sys_lpi_file_sysfs = NULL;
                BIC_NOT_PRESENT(BIC_SYS_LPI);
+       }
 
        if (!quiet)
                decode_misc_feature_control();
@@ -5306,7 +5390,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 19.08.31"
+       fprintf(outf, "turbostat version 20.03.20"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
@@ -5323,9 +5407,9 @@ int add_counter(unsigned int msr_num, char *path, char *name,
        }
 
        msrp->msr_num = msr_num;
-       strncpy(msrp->name, name, NAME_BYTES);
+       strncpy(msrp->name, name, NAME_BYTES - 1);
        if (path)
-               strncpy(msrp->path, path, PATH_BYTES);
+               strncpy(msrp->path, path, PATH_BYTES - 1);
        msrp->width = width;
        msrp->type = type;
        msrp->format = format;
index ded7a95..6d2f3a1 100644 (file)
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 ifneq ($(O),)
 ifeq ($(origin O), command line)
-       dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
-       ABSOLUTE_O := $(shell cd $(O) ; pwd)
+       dummy := $(if $(shell cd $(PWD); test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
+       ABSOLUTE_O := $(shell cd $(PWD); cd $(O) ; pwd)
        OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/)
        COMMAND_O := O=$(ABSOLUTE_O)
 ifeq ($(objtree),)
index 220d04f..7570e36 100755 (executable)
@@ -30,7 +30,7 @@ my %default = (
     "EMAIL_WHEN_STARTED"       => 0,
     "NUM_TESTS"                        => 1,
     "TEST_TYPE"                        => "build",
-    "BUILD_TYPE"               => "randconfig",
+    "BUILD_TYPE"               => "oldconfig",
     "MAKE_CMD"                 => "make",
     "CLOSE_CONSOLE_SIGNAL"     => "INT",
     "TIMEOUT"                  => 120,
@@ -1030,7 +1030,7 @@ sub __read_config {
            }
 
            if (!$skip && $rest !~ /^\s*$/) {
-               die "$name: $.: Gargbage found after $type\n$_";
+               die "$name: $.: Garbage found after $type\n$_";
            }
 
            if ($skip && $type eq "TEST_START") {
@@ -1063,7 +1063,7 @@ sub __read_config {
            }
 
            if ($rest !~ /^\s*$/) {
-               die "$name: $.: Gargbage found after DEFAULTS\n$_";
+               die "$name: $.: Garbage found after DEFAULTS\n$_";
            }
 
        } elsif (/^\s*INCLUDE\s+(\S+)/) {
@@ -1154,7 +1154,7 @@ sub __read_config {
            # on of these sections that have SKIP defined.
            # The save variable can be
            # defined multiple times and the new one simply overrides
-           # the prevous one.
+           # the previous one.
            set_variable($lvalue, $rvalue);
 
        } else {
@@ -1234,7 +1234,7 @@ sub read_config {
        foreach my $option (keys %not_used) {
            print "$option\n";
        }
-       print "Set IGRNORE_UNUSED = 1 to have ktest ignore unused variables\n";
+       print "Set IGNORE_UNUSED = 1 to have ktest ignore unused variables\n";
        if (!read_yn "Do you want to continue?") {
            exit -1;
        }
@@ -1345,7 +1345,7 @@ sub eval_option {
        # Check for recursive evaluations.
        # 100 deep should be more than enough.
        if ($r++ > 100) {
-           die "Over 100 evaluations accurred with $option\n" .
+           die "Over 100 evaluations occurred with $option\n" .
                "Check for recursive variables\n";
        }
        $prev = $option;
@@ -1383,7 +1383,7 @@ sub reboot {
 
     } else {
        # Make sure everything has been written to disk
-       run_ssh("sync");
+       run_ssh("sync", 10);
 
        if (defined($time)) {
            start_monitor;
@@ -1461,7 +1461,7 @@ sub get_test_name() {
 
 sub dodie {
 
-    # avoid recusion
+    # avoid recursion
     return if ($in_die);
     $in_die = 1;
 
index c3bc933..27666b8 100644 (file)
@@ -10,7 +10,7 @@
 #
 
 # Options set in the beginning of the file are considered to be
-# default options. These options can be overriden by test specific
+# default options. These options can be overridden by test specific
 # options, with the following exceptions:
 #
 #  LOG_FILE
 #
 # This config file can also contain "config variables".
 # These are assigned with ":=" instead of the ktest option
-# assigment "=".
+# assignment "=".
 #
 # The difference between ktest options and config variables
 # is that config variables can be used multiple times,
 #### Using options in other options ####
 #
 # Options that are defined in the config file may also be used
-# by other options. All options are evaulated at time of
+# by other options. All options are evaluated at time of
 # use (except that config variables are evaluated at config
 # processing time).
 #
 #TEST = ssh user@machine /root/run_test
 
 # The build type is any make config type or special command
-#  (default randconfig)
+#  (default oldconfig)
 #   nobuild - skip the clean and build step
 #   useconfig:/path/to/config - use the given config and run
 #              oldconfig on it.
 
 # Line to define a successful boot up in console output.
 # This is what the line contains, not the entire line. If you need
-# the entire line to match, then use regural expression syntax like:
+# the entire line to match, then use regular expression syntax like:
 #  (do not add any quotes around it)
 #
 #  SUCCESS_LINE = ^MyBox Login:$
 # (ignored if POWEROFF_ON_SUCCESS is set)
 #REBOOT_ON_SUCCESS = 1
 
-# In case there are isses with rebooting, you can specify this
+# In case there are issues with rebooting, you can specify this
 # to always powercycle after this amount of time after calling
 # reboot.
 # Note, POWERCYCLE_AFTER_REBOOT = 0 does NOT disable it. It just
 # (default undefined)
 #POWERCYCLE_AFTER_REBOOT = 5
 
-# In case there's isses with halting, you can specify this
+# In case there's issues with halting, you can specify this
 # to always poweroff after this amount of time after calling
 # halt.
 # Note, POWEROFF_AFTER_HALT = 0 does NOT disable it. It just
 #
 #  PATCHCHECK_START is required and is the first patch to
 #   test (the SHA1 of the commit). You may also specify anything
-#   that git checkout allows (branch name, tage, HEAD~3).
+#   that git checkout allows (branch name, tag, HEAD~3).
 #
 #  PATCHCHECK_END is the last patch to check (default HEAD)
 #
 #     IGNORE_WARNINGS is set for the given commit's sha1
 #
 #   IGNORE_WARNINGS can be used to disable the failure of patchcheck
-#     on a particuler commit (SHA1). You can add more than one commit
+#     on a particular commit (SHA1). You can add more than one commit
 #     by adding a list of SHA1s that are space delimited.
 #
 #   If BUILD_NOCLEAN is set, then make mrproper will not be run on
 #   whatever reason. (Can't reboot, want to inspect each iteration)
 #   Doing a BISECT_MANUAL will have the test wait for you to
 #   tell it if the test passed or failed after each iteration.
-#   This is basicall the same as running git bisect yourself
+#   This is basically the same as running git bisect yourself
 #   but ktest will rebuild and install the kernel for you.
 #
 # BISECT_CHECK = 1 (optional, default 0)
 #
 # CONFIG_BISECT_EXEC (optional)
 #  The config bisect is a separate program that comes with ktest.pl.
-#  By befault, it will look for:
+#  By default, it will look for:
 #    `pwd`/config-bisect.pl # the location ktest.pl was executed from.
 #  If it does not find it there, it will look for:
 #    `dirname <ktest.pl>`/config-bisect.pl # The directory that holds ktest.pl
index 6ec5039..b93fa64 100644 (file)
@@ -33,6 +33,7 @@ TARGETS += memory-hotplug
 TARGETS += mount
 TARGETS += mqueue
 TARGETS += net
+TARGETS += net/forwarding
 TARGETS += net/mptcp
 TARGETS += netfilter
 TARGETS += networking/timestamping
diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c b/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c
new file mode 100644 (file)
index 0000000..189a34a
--- /dev/null
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "test_send_signal_kern.skel.h"
+
+static void sigusr1_handler(int signum)
+{
+}
+
+#define THREAD_COUNT 100
+
+static void *worker(void *p)
+{
+       int i;
+
+       for ( i = 0; i < 1000; i++)
+               usleep(1);
+
+       return NULL;
+}
+
+void test_send_signal_sched_switch(void)
+{
+       struct test_send_signal_kern *skel;
+       pthread_t threads[THREAD_COUNT];
+       u32 duration = 0;
+       int i, err;
+
+       signal(SIGUSR1, sigusr1_handler);
+
+       skel = test_send_signal_kern__open_and_load();
+       if (CHECK(!skel, "skel_open_and_load", "skeleton open_and_load failed\n"))
+               return;
+
+       skel->bss->pid = getpid();
+       skel->bss->sig = SIGUSR1;
+
+       err = test_send_signal_kern__attach(skel);
+       if (CHECK(err, "skel_attach", "skeleton attach failed\n"))
+               goto destroy_skel;
+
+       for (i = 0; i < THREAD_COUNT; i++) {
+               err = pthread_create(threads + i, NULL, worker, NULL);
+               if (CHECK(err, "pthread_create", "Error creating thread, %s\n",
+                         strerror(errno)))
+                       goto destroy_skel;
+       }
+
+       for (i = 0; i < THREAD_COUNT; i++)
+               pthread_join(threads[i], NULL);
+
+destroy_skel:
+       test_send_signal_kern__destroy(skel);
+}
index 1acc91e..b4233d3 100644 (file)
@@ -31,6 +31,12 @@ int send_signal_tp(void *ctx)
        return bpf_send_signal_test(ctx);
 }
 
+SEC("tracepoint/sched/sched_switch")
+int send_signal_tp_sched(void *ctx)
+{
+       return bpf_send_signal_test(ctx);
+}
+
 SEC("perf_event")
 int send_signal_perf(void *ctx)
 {
index 93040ca..8da77cd 100644 (file)
@@ -1062,6 +1062,48 @@ static struct btf_raw_test raw_tests[] = {
        .err_str = "Member exceeds struct_size",
 },
 
+/* Test member unexceeds the size of struct
+ *
+ * enum E {
+ *     E0,
+ *     E1,
+ * };
+ *
+ * struct A {
+ *     char m;
+ *     enum E __attribute__((packed)) n;
+ * };
+ */
+{
+       .descr = "size check test #5",
+       .raw_types = {
+               /* int */                       /* [1] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, sizeof(int)),
+               /* char */                      /* [2] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1),
+               /* enum E { */                  /* [3] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 1),
+               BTF_ENUM_ENC(NAME_TBD, 0),
+               BTF_ENUM_ENC(NAME_TBD, 1),
+               /* } */
+               /* struct A { */                /* [4] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 2),
+               BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* char m; */
+               BTF_MEMBER_ENC(NAME_TBD, 3, 8),/* enum E __attribute__((packed)) n; */
+               /* } */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0E\0E0\0E1\0A\0m\0n",
+       .str_sec_size = sizeof("\0E\0E0\0E1\0A\0m\0n"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "size_check5_map",
+       .key_size = sizeof(int),
+       .value_size = 2,
+       .key_type_id = 1,
+       .value_type_id = 4,
+       .max_entries = 4,
+},
+
 /* typedef const void * const_void_ptr;
  * struct A {
  *     const_void_ptr m;
index bf0322e..bd5cae4 100644 (file)
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
+       "jset32: ignores upper bits",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_LD_IMM64(BPF_REG_7, 0x8000000000000000),
+       BPF_LD_IMM64(BPF_REG_8, 0x8000000000000000),
+       BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+       BPF_EXIT_INSN(),
+       BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 2),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 2,
+},
+{
        "jset32: min/max deduction",
        .insns = {
        BPF_RAND_UEXT_R7,
index 287ae91..4c1bd03 100644 (file)
@@ -11,7 +11,9 @@ TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh
 TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh
 TEST_PROGS += test_vxlan_fdb_changelink.sh so_txtime.sh ipv6_flowlabel.sh
 TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh l2tp.sh traceroute.sh
-TEST_PROGS += fin_ack_lat.sh
+TEST_PROGS += fin_ack_lat.sh fib_nexthop_multiprefix.sh fib_nexthops.sh
+TEST_PROGS += altnames.sh icmp_redirect.sh ip6_gre_headroom.sh
+TEST_PROGS += route_localnet.sh
 TEST_PROGS_EXTENDED := in_netns.sh
 TEST_GEN_FILES =  socket nettest
 TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
index 60273f1..b761670 100755 (executable)
@@ -1041,6 +1041,27 @@ ipv6_addr_metric_test()
        fi
        log_test $rc 0 "Prefix route with metric on link up"
 
+       # verify peer metric added correctly
+       set -e
+       run_cmd "$IP -6 addr flush dev dummy2"
+       run_cmd "$IP -6 addr add dev dummy2 2001:db8:104::1 peer 2001:db8:104::2 metric 260"
+       set +e
+
+       check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 260"
+       log_test $? 0 "Set metric with peer route on local side"
+       log_test $? 0 "User specified metric on local address"
+       check_route6 "2001:db8:104::2 dev dummy2 proto kernel metric 260"
+       log_test $? 0 "Set metric with peer route on peer side"
+
+       set -e
+       run_cmd "$IP -6 addr change dev dummy2 2001:db8:104::1 peer 2001:db8:104::3 metric 261"
+       set +e
+
+       check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 261"
+       log_test $? 0 "Modify metric and peer address on local side"
+       check_route6 "2001:db8:104::3 dev dummy2 proto kernel metric 261"
+       log_test $? 0 "Modify metric and peer address on peer side"
+
        $IP li del dummy1
        $IP li del dummy2
        cleanup
@@ -1457,13 +1478,20 @@ ipv4_addr_metric_test()
 
        run_cmd "$IP addr flush dev dummy2"
        run_cmd "$IP addr add dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 260"
-       run_cmd "$IP addr change dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 261"
        rc=$?
        if [ $rc -eq 0 ]; then
-               check_route "172.16.104.2 dev dummy2 proto kernel scope link src 172.16.104.1 metric 261"
+               check_route "172.16.104.2 dev dummy2 proto kernel scope link src 172.16.104.1 metric 260"
+               rc=$?
+       fi
+       log_test $rc 0 "Set metric of address with peer route"
+
+       run_cmd "$IP addr change dev dummy2 172.16.104.1/32 peer 172.16.104.3 metric 261"
+       rc=$?
+       if [ $rc -eq 0 ]; then
+               check_route "172.16.104.3 dev dummy2 proto kernel scope link src 172.16.104.1 metric 261"
                rc=$?
        fi
-       log_test $rc 0 "Modify metric of address with peer route"
+       log_test $rc 0 "Modify metric and peer address for peer route"
 
        $IP li del dummy1
        $IP li del dummy2
diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile
new file mode 100644 (file)
index 0000000..250fbb2
--- /dev/null
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0+ OR MIT
+
+TEST_PROGS = bridge_igmp.sh \
+       bridge_port_isolation.sh \
+       bridge_sticky_fdb.sh \
+       bridge_vlan_aware.sh \
+       bridge_vlan_unaware.sh \
+       ethtool.sh \
+       gre_inner_v4_multipath.sh \
+       gre_inner_v6_multipath.sh \
+       gre_multipath.sh \
+       ip6gre_inner_v4_multipath.sh \
+       ip6gre_inner_v6_multipath.sh \
+       ipip_flat_gre_key.sh \
+       ipip_flat_gre_keys.sh \
+       ipip_flat_gre.sh \
+       ipip_hier_gre_key.sh \
+       ipip_hier_gre_keys.sh \
+       ipip_hier_gre.sh \
+       loopback.sh \
+       mirror_gre_bound.sh \
+       mirror_gre_bridge_1d.sh \
+       mirror_gre_bridge_1d_vlan.sh \
+       mirror_gre_bridge_1q_lag.sh \
+       mirror_gre_bridge_1q.sh \
+       mirror_gre_changes.sh \
+       mirror_gre_flower.sh \
+       mirror_gre_lag_lacp.sh \
+       mirror_gre_neigh.sh \
+       mirror_gre_nh.sh \
+       mirror_gre.sh \
+       mirror_gre_vlan_bridge_1q.sh \
+       mirror_gre_vlan.sh \
+       mirror_vlan.sh \
+       router_bridge.sh \
+       router_bridge_vlan.sh \
+       router_broadcast.sh \
+       router_mpath_nh.sh \
+       router_multicast.sh \
+       router_multipath.sh \
+       router.sh \
+       router_vid_1.sh \
+       sch_ets.sh \
+       sch_tbf_ets.sh \
+       sch_tbf_prio.sh \
+       sch_tbf_root.sh \
+       tc_actions.sh \
+       tc_chains.sh \
+       tc_flower_router.sh \
+       tc_flower.sh \
+       tc_shblocks.sh \
+       tc_vlan_modify.sh \
+       vxlan_asymmetric.sh \
+       vxlan_bridge_1d_port_8472.sh \
+       vxlan_bridge_1d.sh \
+       vxlan_bridge_1q_port_8472.sh \
+       vxlan_bridge_1q.sh \
+       vxlan_symmetric.sh
+
+TEST_PROGS_EXTENDED := devlink_lib.sh \
+       ethtool_lib.sh \
+       fib_offload_lib.sh \
+       forwarding.config.sample \
+       ipip_lib.sh \
+       lib.sh \
+       mirror_gre_lib.sh \
+       mirror_gre_topo_lib.sh \
+       mirror_lib.sh \
+       mirror_topo_lib.sh \
+       sch_ets_core.sh \
+       sch_ets_tests.sh \
+       sch_tbf_core.sh \
+       sch_tbf_etsprio.sh \
+       tc_common.sh
+
+include ../../lib.mk
index c623393..b8475cb 100644 (file)
 #include <sys/socket.h>
 #include <unistd.h>
 
+#ifndef SOL_DCCP
+#define SOL_DCCP 269
+#endif
+
 static const char *IP4_ADDR = "127.0.0.1";
 static const char *IP6_ADDR = "::1";
 static const char *IP4_MAPPED6 = "::ffff:127.0.0.1";
index 08194aa..9c0f758 100644 (file)
@@ -3,6 +3,10 @@
 
 TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \
        conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
-       nft_concat_range.sh
+       nft_concat_range.sh \
+       nft_queue.sh
+
+LDLIBS = -lmnl
+TEST_GEN_FILES =  nf-queue
 
 include ../lib.mk
index 59caa8f..4faf2ce 100644 (file)
@@ -1,2 +1,8 @@
 CONFIG_NET_NS=y
 CONFIG_NF_TABLES_INET=y
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_REDIR=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_FLOW_OFFLOAD=m
+CONFIG_NF_CT_NETLINK=m
diff --git a/tools/testing/selftests/netfilter/nf-queue.c b/tools/testing/selftests/netfilter/nf-queue.c
new file mode 100644 (file)
index 0000000..29c73bc
--- /dev/null
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <time.h>
+#include <arpa/inet.h>
+
+#include <libmnl/libmnl.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_queue.h>
+
+struct options {
+       bool count_packets;
+       int verbose;
+       unsigned int queue_num;
+       unsigned int timeout;
+};
+
+static unsigned int queue_stats[5];
+static struct options opts;
+
+static void help(const char *p)
+{
+       printf("Usage: %s [-c|-v [-vv] ] [-t timeout] [-q queue_num]\n", p);
+}
+
+static int parse_attr_cb(const struct nlattr *attr, void *data)
+{
+       const struct nlattr **tb = data;
+       int type = mnl_attr_get_type(attr);
+
+       /* skip unsupported attribute in user-space */
+       if (mnl_attr_type_valid(attr, NFQA_MAX) < 0)
+               return MNL_CB_OK;
+
+       switch (type) {
+       case NFQA_MARK:
+       case NFQA_IFINDEX_INDEV:
+       case NFQA_IFINDEX_OUTDEV:
+       case NFQA_IFINDEX_PHYSINDEV:
+       case NFQA_IFINDEX_PHYSOUTDEV:
+               if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0) {
+                       perror("mnl_attr_validate");
+                       return MNL_CB_ERROR;
+               }
+               break;
+       case NFQA_TIMESTAMP:
+               if (mnl_attr_validate2(attr, MNL_TYPE_UNSPEC,
+                   sizeof(struct nfqnl_msg_packet_timestamp)) < 0) {
+                       perror("mnl_attr_validate2");
+                       return MNL_CB_ERROR;
+               }
+               break;
+       case NFQA_HWADDR:
+               if (mnl_attr_validate2(attr, MNL_TYPE_UNSPEC,
+                   sizeof(struct nfqnl_msg_packet_hw)) < 0) {
+                       perror("mnl_attr_validate2");
+                       return MNL_CB_ERROR;
+               }
+               break;
+       case NFQA_PAYLOAD:
+               break;
+       }
+       tb[type] = attr;
+       return MNL_CB_OK;
+}
+
+static int queue_cb(const struct nlmsghdr *nlh, void *data)
+{
+       struct nlattr *tb[NFQA_MAX+1] = { 0 };
+       struct nfqnl_msg_packet_hdr *ph = NULL;
+       uint32_t id = 0;
+
+       (void)data;
+
+       mnl_attr_parse(nlh, sizeof(struct nfgenmsg), parse_attr_cb, tb);
+       if (tb[NFQA_PACKET_HDR]) {
+               ph = mnl_attr_get_payload(tb[NFQA_PACKET_HDR]);
+               id = ntohl(ph->packet_id);
+
+               if (opts.verbose > 0)
+                       printf("packet hook=%u, hwproto 0x%x",
+                               ntohs(ph->hw_protocol), ph->hook);
+
+               if (ph->hook >= 5) {
+                       fprintf(stderr, "Unknown hook %d\n", ph->hook);
+                       return MNL_CB_ERROR;
+               }
+
+               if (opts.verbose > 0) {
+                       uint32_t skbinfo = 0;
+
+                       if (tb[NFQA_SKB_INFO])
+                               skbinfo = ntohl(mnl_attr_get_u32(tb[NFQA_SKB_INFO]));
+                       if (skbinfo & NFQA_SKB_CSUMNOTREADY)
+                               printf(" csumnotready");
+                       if (skbinfo & NFQA_SKB_GSO)
+                               printf(" gso");
+                       if (skbinfo & NFQA_SKB_CSUM_NOTVERIFIED)
+                               printf(" csumnotverified");
+                       puts("");
+               }
+
+               if (opts.count_packets)
+                       queue_stats[ph->hook]++;
+       }
+
+       return MNL_CB_OK + id;
+}
+
+static struct nlmsghdr *
+nfq_build_cfg_request(char *buf, uint8_t command, int queue_num)
+{
+       struct nlmsghdr *nlh = mnl_nlmsg_put_header(buf);
+       struct nfqnl_msg_config_cmd cmd = {
+               .command = command,
+               .pf = htons(AF_INET),
+       };
+       struct nfgenmsg *nfg;
+
+       nlh->nlmsg_type = (NFNL_SUBSYS_QUEUE << 8) | NFQNL_MSG_CONFIG;
+       nlh->nlmsg_flags = NLM_F_REQUEST;
+
+       nfg = mnl_nlmsg_put_extra_header(nlh, sizeof(*nfg));
+
+       nfg->nfgen_family = AF_UNSPEC;
+       nfg->version = NFNETLINK_V0;
+       nfg->res_id = htons(queue_num);
+
+       mnl_attr_put(nlh, NFQA_CFG_CMD, sizeof(cmd), &cmd);
+
+       return nlh;
+}
+
+static struct nlmsghdr *
+nfq_build_cfg_params(char *buf, uint8_t mode, int range, int queue_num)
+{
+       struct nlmsghdr *nlh = mnl_nlmsg_put_header(buf);
+       struct nfqnl_msg_config_params params = {
+               .copy_range = htonl(range),
+               .copy_mode = mode,
+       };
+       struct nfgenmsg *nfg;
+
+       nlh->nlmsg_type = (NFNL_SUBSYS_QUEUE << 8) | NFQNL_MSG_CONFIG;
+       nlh->nlmsg_flags = NLM_F_REQUEST;
+
+       nfg = mnl_nlmsg_put_extra_header(nlh, sizeof(*nfg));
+       nfg->nfgen_family = AF_UNSPEC;
+       nfg->version = NFNETLINK_V0;
+       nfg->res_id = htons(queue_num);
+
+       mnl_attr_put(nlh, NFQA_CFG_PARAMS, sizeof(params), &params);
+
+       return nlh;
+}
+
+static struct nlmsghdr *
+nfq_build_verdict(char *buf, int id, int queue_num, int verd)
+{
+       struct nfqnl_msg_verdict_hdr vh = {
+               .verdict = htonl(verd),
+               .id = htonl(id),
+       };
+       struct nlmsghdr *nlh;
+       struct nfgenmsg *nfg;
+
+       nlh = mnl_nlmsg_put_header(buf);
+       nlh->nlmsg_type = (NFNL_SUBSYS_QUEUE << 8) | NFQNL_MSG_VERDICT;
+       nlh->nlmsg_flags = NLM_F_REQUEST;
+       nfg = mnl_nlmsg_put_extra_header(nlh, sizeof(*nfg));
+       nfg->nfgen_family = AF_UNSPEC;
+       nfg->version = NFNETLINK_V0;
+       nfg->res_id = htons(queue_num);
+
+       mnl_attr_put(nlh, NFQA_VERDICT_HDR, sizeof(vh), &vh);
+
+       return nlh;
+}
+
+static void print_stats(void)
+{
+       unsigned int last, total;
+       int i;
+
+       if (!opts.count_packets)
+               return;
+
+       total = 0;
+       last = queue_stats[0];
+
+       for (i = 0; i < 5; i++) {
+               printf("hook %d packets %08u\n", i, queue_stats[i]);
+               last = queue_stats[i];
+               total += last;
+       }
+
+       printf("%u packets total\n", total);
+}
+
+struct mnl_socket *open_queue(void)
+{
+       char buf[MNL_SOCKET_BUFFER_SIZE];
+       unsigned int queue_num;
+       struct mnl_socket *nl;
+       struct nlmsghdr *nlh;
+       struct timeval tv;
+       uint32_t flags;
+
+       nl = mnl_socket_open(NETLINK_NETFILTER);
+       if (nl == NULL) {
+               perror("mnl_socket_open");
+               exit(EXIT_FAILURE);
+       }
+
+       if (mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID) < 0) {
+               perror("mnl_socket_bind");
+               exit(EXIT_FAILURE);
+       }
+
+       queue_num = opts.queue_num;
+       nlh = nfq_build_cfg_request(buf, NFQNL_CFG_CMD_BIND, queue_num);
+
+       if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) {
+               perror("mnl_socket_sendto");
+               exit(EXIT_FAILURE);
+       }
+
+       nlh = nfq_build_cfg_params(buf, NFQNL_COPY_PACKET, 0xFFFF, queue_num);
+
+       flags = NFQA_CFG_F_GSO | NFQA_CFG_F_UID_GID;
+       mnl_attr_put_u32(nlh, NFQA_CFG_FLAGS, htonl(flags));
+       mnl_attr_put_u32(nlh, NFQA_CFG_MASK, htonl(flags));
+
+       if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) {
+               perror("mnl_socket_sendto");
+               exit(EXIT_FAILURE);
+       }
+
+       memset(&tv, 0, sizeof(tv));
+       tv.tv_sec = opts.timeout;
+       if (opts.timeout && setsockopt(mnl_socket_get_fd(nl),
+                                      SOL_SOCKET, SO_RCVTIMEO,
+                                      &tv, sizeof(tv))) {
+               perror("setsockopt(SO_RCVTIMEO)");
+               exit(EXIT_FAILURE);
+       }
+
+       return nl;
+}
+
+static int mainloop(void)
+{
+       unsigned int buflen = 64 * 1024 + MNL_SOCKET_BUFFER_SIZE;
+       struct mnl_socket *nl;
+       struct nlmsghdr *nlh;
+       unsigned int portid;
+       char *buf;
+       int ret;
+
+       buf = malloc(buflen);
+       if (!buf) {
+               perror("malloc");
+               exit(EXIT_FAILURE);
+       }
+
+       nl = open_queue();
+       portid = mnl_socket_get_portid(nl);
+
+       for (;;) {
+               uint32_t id;
+
+               ret = mnl_socket_recvfrom(nl, buf, buflen);
+               if (ret == -1) {
+                       if (errno == ENOBUFS)
+                               continue;
+
+                       if (errno == EAGAIN) {
+                               errno = 0;
+                               ret = 0;
+                               break;
+                       }
+
+                       perror("mnl_socket_recvfrom");
+                       exit(EXIT_FAILURE);
+               }
+
+               ret = mnl_cb_run(buf, ret, 0, portid, queue_cb, NULL);
+               if (ret < 0) {
+                       perror("mnl_cb_run");
+                       exit(EXIT_FAILURE);
+               }
+
+               id = ret - MNL_CB_OK;
+               nlh = nfq_build_verdict(buf, id, opts.queue_num, NF_ACCEPT);
+               if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) {
+                       perror("mnl_socket_sendto");
+                       exit(EXIT_FAILURE);
+               }
+       }
+
+       mnl_socket_close(nl);
+
+       return ret;
+}
+
+static void parse_opts(int argc, char **argv)
+{
+       int c;
+
+       while ((c = getopt(argc, argv, "chvt:q:")) != -1) {
+               switch (c) {
+               case 'c':
+                       opts.count_packets = true;
+                       break;
+               case 'h':
+                       help(argv[0]);
+                       exit(0);
+                       break;
+               case 'q':
+                       opts.queue_num = atoi(optarg);
+                       if (opts.queue_num > 0xffff)
+                               opts.queue_num = 0;
+                       break;
+               case 't':
+                       opts.timeout = atoi(optarg);
+                       break;
+               case 'v':
+                       opts.verbose++;
+                       break;
+               }
+       }
+}
+
+int main(int argc, char *argv[])
+{
+       int ret;
+
+       parse_opts(argc, argv);
+
+       ret = mainloop();
+       if (opts.count_packets)
+               print_stats();
+
+       return ret;
+}
diff --git a/tools/testing/selftests/netfilter/nft_queue.sh b/tools/testing/selftests/netfilter/nft_queue.sh
new file mode 100755 (executable)
index 0000000..6898448
--- /dev/null
@@ -0,0 +1,332 @@
+#!/bin/bash
+#
+# This tests nf_queue:
+# 1. can process packets from all hooks
+# 2. support running nfqueue from more than one base chain
+#
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+nsrouter="nsrouter-$sfx"
+
+cleanup()
+{
+       ip netns del ${ns1}
+       ip netns del ${ns2}
+       ip netns del ${nsrouter}
+       rm -f "$TMPFILE0"
+       rm -f "$TMPFILE1"
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without nft tool"
+       exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+
+ip netns add ${nsrouter}
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not create net namespace"
+       exit $ksft_skip
+fi
+
+TMPFILE0=$(mktemp)
+TMPFILE1=$(mktemp)
+trap cleanup EXIT
+
+ip netns add ${ns1}
+ip netns add ${ns2}
+
+ip link add veth0 netns ${nsrouter} type veth peer name eth0 netns ${ns1} > /dev/null 2>&1
+if [ $? -ne 0 ];then
+    echo "SKIP: No virtual ethernet pair device support in kernel"
+    exit $ksft_skip
+fi
+ip link add veth1 netns ${nsrouter} type veth peer name eth0 netns ${ns2}
+
+ip -net ${nsrouter} link set lo up
+ip -net ${nsrouter} link set veth0 up
+ip -net ${nsrouter} addr add 10.0.1.1/24 dev veth0
+ip -net ${nsrouter} addr add dead:1::1/64 dev veth0
+
+ip -net ${nsrouter} link set veth1 up
+ip -net ${nsrouter} addr add 10.0.2.1/24 dev veth1
+ip -net ${nsrouter} addr add dead:2::1/64 dev veth1
+
+ip -net ${ns1} link set lo up
+ip -net ${ns1} link set eth0 up
+
+ip -net ${ns2} link set lo up
+ip -net ${ns2} link set eth0 up
+
+ip -net ${ns1} addr add 10.0.1.99/24 dev eth0
+ip -net ${ns1} addr add dead:1::99/64 dev eth0
+ip -net ${ns1} route add default via 10.0.1.1
+ip -net ${ns1} route add default via dead:1::1
+
+ip -net ${ns2} addr add 10.0.2.99/24 dev eth0
+ip -net ${ns2} addr add dead:2::99/64 dev eth0
+ip -net ${ns2} route add default via 10.0.2.1
+ip -net ${ns2} route add default via dead:2::1
+
+load_ruleset() {
+       local name=$1
+       local prio=$2
+
+ip netns exec ${nsrouter} nft -f - <<EOF
+table inet $name {
+       chain nfq {
+               ip protocol icmp queue bypass
+               icmpv6 type { "echo-request", "echo-reply" } queue num 1 bypass
+       }
+       chain pre {
+               type filter hook prerouting priority $prio; policy accept;
+               jump nfq
+       }
+       chain input {
+               type filter hook input priority $prio; policy accept;
+               jump nfq
+       }
+       chain forward {
+               type filter hook forward priority $prio; policy accept;
+               tcp dport 12345 queue num 2
+               jump nfq
+       }
+       chain output {
+               type filter hook output priority $prio; policy accept;
+               tcp dport 12345 queue num 3
+               jump nfq
+       }
+       chain post {
+               type filter hook postrouting priority $prio; policy accept;
+               jump nfq
+       }
+}
+EOF
+}
+
+load_counter_ruleset() {
+       local prio=$1
+
+ip netns exec ${nsrouter} nft -f - <<EOF
+table inet countrules {
+       chain pre {
+               type filter hook prerouting priority $prio; policy accept;
+               counter
+       }
+       chain input {
+               type filter hook input priority $prio; policy accept;
+               counter
+       }
+       chain forward {
+               type filter hook forward priority $prio; policy accept;
+               counter
+       }
+       chain output {
+               type filter hook output priority $prio; policy accept;
+               counter
+       }
+       chain post {
+               type filter hook postrouting priority $prio; policy accept;
+               counter
+       }
+}
+EOF
+}
+
+test_ping() {
+  ip netns exec ${ns1} ping -c 1 -q 10.0.2.99 > /dev/null
+  if [ $? -ne 0 ];then
+       return 1
+  fi
+
+  ip netns exec ${ns1} ping -c 1 -q dead:2::99 > /dev/null
+  if [ $? -ne 0 ];then
+       return 1
+  fi
+
+  return 0
+}
+
+test_ping_router() {
+  ip netns exec ${ns1} ping -c 1 -q 10.0.2.1 > /dev/null
+  if [ $? -ne 0 ];then
+       return 1
+  fi
+
+  ip netns exec ${ns1} ping -c 1 -q dead:2::1 > /dev/null
+  if [ $? -ne 0 ];then
+       return 1
+  fi
+
+  return 0
+}
+
+test_queue_blackhole() {
+       local proto=$1
+
+ip netns exec ${nsrouter} nft -f - <<EOF
+table $proto blackh {
+       chain forward {
+       type filter hook forward priority 0; policy accept;
+               queue num 600
+       }
+}
+EOF
+       if [ $proto = "ip" ] ;then
+               ip netns exec ${ns1} ping -c 1 -q 10.0.2.99 > /dev/null
+               lret=$?
+       elif [ $proto = "ip6" ]; then
+               ip netns exec ${ns1} ping -c 1 -q dead:2::99 > /dev/null
+               lret=$?
+       else
+               lret=111
+       fi
+
+       # queue without bypass keyword should drop traffic if no listener exists.
+       if [ $lret -eq 0 ];then
+               echo "FAIL: $proto expected failure, got $lret" 1>&2
+               exit 1
+       fi
+
+       ip netns exec ${nsrouter} nft delete table $proto blackh
+       if [ $? -ne 0 ] ;then
+               echo "FAIL: $proto: Could not delete blackh table"
+               exit 1
+       fi
+
+        echo "PASS: $proto: statement with no listener results in packet drop"
+}
+
+test_queue()
+{
+       local expected=$1
+       local last=""
+
+       # spawn nf-queue listeners
+       ip netns exec ${nsrouter} ./nf-queue -c -q 0 -t 3 > "$TMPFILE0" &
+       ip netns exec ${nsrouter} ./nf-queue -c -q 1 -t 3 > "$TMPFILE1" &
+       sleep 1
+       test_ping
+       ret=$?
+       if [ $ret -ne 0 ];then
+               echo "FAIL: netns routing/connectivity with active listener on queue $queue: $ret" 1>&2
+               exit $ret
+       fi
+
+       test_ping_router
+       ret=$?
+       if [ $ret -ne 0 ];then
+               echo "FAIL: netns router unreachable listener on queue $queue: $ret" 1>&2
+               exit $ret
+       fi
+
+       wait
+       ret=$?
+
+       for file in $TMPFILE0 $TMPFILE1; do
+               last=$(tail -n1 "$file")
+               if [ x"$last" != x"$expected packets total" ]; then
+                       echo "FAIL: Expected $expected packets total, but got $last" 1>&2
+                       cat "$file" 1>&2
+
+                       ip netns exec ${nsrouter} nft list ruleset
+                       exit 1
+               fi
+       done
+
+       echo "PASS: Expected and received $last"
+}
+
+test_tcp_forward()
+{
+       ip netns exec ${nsrouter} ./nf-queue -q 2 -t 10 &
+       local nfqpid=$!
+
+       tmpfile=$(mktemp) || exit 1
+       dd conv=sparse status=none if=/dev/zero bs=1M count=100 of=$tmpfile
+       ip netns exec ${ns2} nc -w 5 -l -p 12345 <"$tmpfile" >/dev/null &
+       local rpid=$!
+
+       sleep 1
+       ip netns exec ${ns1} nc -w 5 10.0.2.99 12345 <"$tmpfile" >/dev/null &
+
+       rm -f "$tmpfile"
+
+       wait $rpid
+       wait $lpid
+       [ $? -eq 0 ] && echo "PASS: tcp and nfqueue in forward chain"
+}
+
+test_tcp_localhost()
+{
+       tc -net "${nsrouter}" qdisc add dev lo root netem loss random 1%
+
+       tmpfile=$(mktemp) || exit 1
+
+       dd conv=sparse status=none if=/dev/zero bs=1M count=900 of=$tmpfile
+       ip netns exec ${nsrouter} nc -w 5 -l -p 12345 <"$tmpfile" >/dev/null &
+       local rpid=$!
+
+       ip netns exec ${nsrouter} ./nf-queue -q 3 -t 30 &
+       local nfqpid=$!
+
+       sleep 1
+       ip netns exec ${nsrouter} nc -w 5 127.0.0.1 12345 <"$tmpfile" > /dev/null
+       rm -f "$tmpfile"
+
+       wait $rpid
+       [ $? -eq 0 ] && echo "PASS: tcp via loopback"
+}
+
+ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+load_ruleset "filter" 0
+
+sleep 3
+
+test_ping
+ret=$?
+if [ $ret -eq 0 ];then
+       # queue bypass works (rules were skipped, no listener)
+       echo "PASS: ${ns1} can reach ${ns2}"
+else
+       echo "FAIL: ${ns1} cannot reach ${ns2}: $ret" 1>&2
+       exit $ret
+fi
+
+test_queue_blackhole ip
+test_queue_blackhole ip6
+
+# dummy ruleset to add base chains between the
+# queueing rules.  We don't want the second reinject
+# to re-execute the old hooks.
+load_counter_ruleset 10
+
+# we are hooking all: prerouting/input/forward/output/postrouting.
+# we ping ${ns2} from ${ns1} via ${nsrouter} using ipv4 and ipv6, so:
+# 1x icmp prerouting,forward,postrouting -> 3 queue events (6 incl. reply).
+# 1x icmp prerouting,input,output postrouting -> 4 queue events incl. reply.
+# so we expect that userspace program receives 10 packets.
+test_queue 10
+
+# same.  We queue to a second program as well.
+load_ruleset "filter2" 20
+test_queue 20
+
+test_tcp_forward
+test_tcp_localhost
+
+exit $ret
index 477bc61..c03af46 100644 (file)
@@ -57,3 +57,4 @@ CONFIG_NET_IFE_SKBMARK=m
 CONFIG_NET_IFE_SKBPRIO=m
 CONFIG_NET_IFE_SKBTCINDEX=m
 CONFIG_NET_SCH_FIFO=y
+CONFIG_NET_SCH_ETS=m
index 138d46b..936e1ca 100755 (executable)
@@ -527,11 +527,16 @@ n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0
 n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75
 n0 wg set wg0 peer "$pub2" allowed-ips ::/0
 n0 wg set wg0 peer "$pub2" remove
-low_order_points=( AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38= )
-n0 wg set wg0 private-key /dev/null ${low_order_points[@]/#/peer }
-[[ -z $(n0 wg show wg0 peers) ]]
-n0 wg set wg0 private-key <(echo "$key1") ${low_order_points[@]/#/peer }
-[[ -z $(n0 wg show wg0 peers) ]]
+for low_order_point in AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38=; do
+       n0 wg set wg0 peer "$low_order_point" persistent-keepalive 1 endpoint 127.0.0.1:1111
+done
+[[ -n $(n0 wg show wg0 peers) ]]
+exec 4< <(n0 ncat -l -u -p 1111)
+ncat_pid=$!
+waitncatudp $netns0 $ncat_pid
+ip0 link set wg0 up
+! read -r -n 1 -t 2 <&4 || false
+kill $ncat_pid
 ip0 link del wg0
 
 declare -A objects
index 28d4776..90598a4 100644 (file)
@@ -41,7 +41,7 @@ $(DISTFILES_PATH)/$(1):
        flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -O $$@.tmp $(2)$(1) || rm -f $$@.tmp; [ -f $$@.tmp ] || exit 1; if echo "$(3)  $$@.tmp" | sha256sum -c -; then mv $$@.tmp $$@; else rm -f $$@.tmp; exit 71; fi'
 endef
 
-$(eval $(call tar_download,MUSL,musl,1.1.24,.tar.gz,https://www.musl-libc.org/releases/,1370c9a812b2cf2a7d92802510cca0058cc37e66a7bedd70051f0a34015022a3))
+$(eval $(call tar_download,MUSL,musl,1.2.0,.tar.gz,https://musl.libc.org/releases/,c6de7b191139142d3f9a7b5b702c9cae1b5ee6e7f57e582da9328629408fd4e8))
 $(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c))
 $(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d))
 $(eval $(call tar_download,IPROUTE2,iproute2,5.4.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,fe97aa60a0d4c5ac830be18937e18dc3400ca713a33a89ad896ff1e3d46086ae))
index 90bc981..c969812 100644 (file)
@@ -13,7 +13,6 @@
 #include <fcntl.h>
 #include <sys/wait.h>
 #include <sys/mount.h>
-#include <sys/types.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/io.h>
index af9323a..d531de1 100644 (file)
@@ -56,7 +56,6 @@ CONFIG_NO_HZ_IDLE=y
 CONFIG_NO_HZ_FULL=n
 CONFIG_HZ_PERIODIC=n
 CONFIG_HIGH_RES_TIMERS=y
-CONFIG_COMPAT_32BIT_TIME=y
 CONFIG_ARCH_RANDOM=y
 CONFIG_FILE_LOCKING=y
 CONFIG_POSIX_TIMERS=y
index bdf5bbd..96afb03 100644 (file)
@@ -124,17 +124,6 @@ choice
 
          If in doubt, select 'None'
 
-config INITRAMFS_COMPRESSION_NONE
-       bool "None"
-       help
-         Do not compress the built-in initramfs at all. This may sound wasteful
-         in space, but, you should be aware that the built-in initramfs will be
-         compressed at a later stage anyways along with the rest of the kernel,
-         on those architectures that support this. However, not compressing the
-         initramfs may lead to slightly higher memory consumption during a
-         short time at boot, while both the cpio image and the unpacked
-         filesystem image will be present in memory simultaneously
-
 config INITRAMFS_COMPRESSION_GZIP
        bool "Gzip"
        depends on RD_GZIP
@@ -207,4 +196,15 @@ config INITRAMFS_COMPRESSION_LZ4
          If you choose this, keep in mind that most distros don't provide lz4
          by default which could cause a build failure.
 
+config INITRAMFS_COMPRESSION_NONE
+       bool "None"
+       help
+         Do not compress the built-in initramfs at all. This may sound wasteful
+         in space, but, you should be aware that the built-in initramfs will be
+         compressed at a later stage anyways along with the rest of the kernel,
+         on those architectures that support this. However, not compressing the
+         initramfs may lead to slightly higher memory consumption during a
+         short time at boot, while both the cpio image and the unpacked
+         filesystem image will be present in memory simultaneously
+
 endchoice